content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import random
import re
import shlex
import tempfile
import uuid
import subprocess as sub
import json
import sys
try:
# python 3
from urllib.parse import quote_plus
except ImportError:
# python 2
from urllib import quote_plus
try:
# python 3
import configparser
except ImportError:
# python 2
import ConfigParser as configparser
def is_windows():
return sys.platform.startswith('win')
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split(os.linesep)
return lines[0]
def init_storm_env():
global CLUSTER_CONF_DIR
ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini')
if not os.path.isfile(ini_file):
return
config = configparser.ConfigParser()
config.optionxform = str
config.read(ini_file)
options = config.options('environment')
for option in options:
value = config.get('environment', option)
os.environ[option] = value
def get_java_cmd():
cmd = 'java' if not is_windows() else 'java.exe'
if JAVA_HOME:
cmd = os.path.join(JAVA_HOME, 'bin', cmd)
return cmd
normclasspath = cygpath if sys.platform == 'cygwin' else identity
STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2])
USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm")
STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None)
if STORM_CONF_DIR == None:
CLUSTER_CONF_DIR = os.path.join(STORM_DIR, "conf")
else:
CLUSTER_CONF_DIR = STORM_CONF_DIR
if (not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml"))):
USER_CONF_DIR = CLUSTER_CONF_DIR
STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker")
STORM_LIB_DIR = os.path.join(STORM_DIR, "lib")
STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools")
STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp")
STORM_BIN_DIR = os.path.join(STORM_DIR, "bin")
STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2")
STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log")
init_storm_env()
CONFIG_OPTS = []
CONFFILE = ""
JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', ''))
JAVA_HOME = os.getenv('JAVA_HOME', None)
JAVA_CMD = get_java_cmd();
if JAVA_HOME and not os.path.exists(JAVA_CMD):
print("ERROR: JAVA_HOME is invalid. Could not find bin/java at %s." % JAVA_HOME)
sys.exit(1)
STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None)
STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None)
DEP_JARS_OPTS = []
DEP_ARTIFACTS_OPTS = []
DEP_ARTIFACTS_REPOSITORIES_OPTS = []
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = None
DEP_PROXY_URL = None
DEP_PROXY_USERNAME = None
DEP_PROXY_PASSWORD = None
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + ','.join(map(quote_plus,CONFIG_OPTS))
if not os.path.exists(STORM_LIB_DIR):
print("******************************************")
print("The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code.")
print("\nYou can download a Storm release at http://storm.apache.org/downloads.html")
print("******************************************")
sys.exit(1)
def get_jars_full(adir):
files = []
if os.path.isdir(adir):
files = os.listdir(adir)
elif os.path.exists(adir):
files = [adir]
ret = []
for f in files:
if f.endswith(".jar"):
ret.append(os.path.join(adir, f))
return ret
# If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.
def get_wildcard_dir(path):
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret
def get_classpath(extrajars, daemon=True, client=False):
ret = get_wildcard_dir(STORM_DIR)
if client:
ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR))
else :
ret.extend(get_wildcard_dir(STORM_LIB_DIR))
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib")))
if daemon:
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon")))
if STORM_EXT_CLASSPATH != None:
ret.append(STORM_EXT_CLASSPATH)
if daemon and STORM_EXT_CLASSPATH_DAEMON != None:
ret.append(STORM_EXT_CLASSPATH_DAEMON)
ret.extend(extrajars)
return normclasspath(os.pathsep.join(ret))
def confvalue(name, extrapaths, daemon=True):
global CONFFILE
command = [
JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.ConfigValue", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split(os.linesep)
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
return ""
def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, proxy_url, proxy_username, proxy_password):
if len(artifacts) == 0:
return {}
print("Resolving dependencies on demand: artifacts (%s) with repositories (%s)" % (artifacts, artifact_repositories))
if maven_local_repos_dir is not None:
print("Local repository directory: %s" % maven_local_repos_dir)
if proxy_url is not None:
print("Proxy information: url (%s) username (%s)" % (proxy_url, proxy_username))
sys.stdout.flush()
# storm-submit module doesn't rely on storm-core and relevant libs
extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools"))
classpath = normclasspath(os.pathsep.join(extrajars))
command = [
JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain"
]
command.extend(["--artifacts", ",".join(artifacts)])
command.extend(["--artifactRepositories", ",".join(artifact_repositories)])
if maven_local_repos_dir is not None:
command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir])
if proxy_url is not None:
command.extend(["--proxyUrl", proxy_url])
if proxy_username is not None:
command.extend(["--proxyUsername", proxy_username])
command.extend(["--proxyPassword", proxy_password])
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
if p.returncode != 0:
raise RuntimeError("dependency handler returns non-zero code: code<%s> syserr<%s>" % (p.returncode, errors))
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
# For debug purpose, uncomment when you need to debug DependencyResolver
# print("Resolved dependencies: %s" % output)
try:
out_dict = json.loads(output)
return out_dict
except:
raise RuntimeError("dependency handler returns non-json response: sysout<%s>", output)
def print_localconfvalue(name):
"""Syntax: [storm localconfvalue conf-name]
Prints out the value for conf-name in the local Storm configs.
The local Storm configs are the ones in ~/.storm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print(name + ": " + confvalue(name, [USER_CONF_DIR]))
def print_remoteconfvalue(name):
"""Syntax: [storm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's Storm configs.
The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print(name + ": " + confvalue(name, [CLUSTER_CONF_DIR]))
def parse_args(string):
"""Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f\'f' "g\"g" "i""i" 'j''j' k" "k l' l' mm n\\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n\n']
"""
re_split = re.compile(r'''((?:
[^\s"'\\] |
"(?: [^"\\] | \\.)*" |
'(?: [^'\\] | \\.)*' |
\\.
)+)''', re.VERBOSE)
args = re_split.split(string)[1::2]
args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args]
args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args]
return [re.compile(r'\\(.)').sub('\\1', x) for x in args]
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""):
global CONFFILE
storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR])
if(storm_log_dir == None or storm_log_dir == "null"):
storm_log_dir = os.path.join(STORM_DIR, "logs")
all_args = [
JAVA_CMD, jvmtype,
"-Ddaemon.name=" + daemonName,
get_config_opts(),
"-Dstorm.home=" + STORM_DIR,
"-Dstorm.log.dir=" + storm_log_dir,
"-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon),
"-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrajars, daemon, client=client),
] + jvmopts + [klass] + list(args)
print("Running: " + " ".join(all_args))
sys.stdout.flush()
exit_code = 0
if fork:
exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args)
elif is_windows():
# handling whitespaces in JAVA_CMD
try:
ret = sub.check_output(all_args, stderr=sub.STDOUT)
print(ret)
except sub.CalledProcessError as e:
print(e.output)
sys.exit(e.returncode)
else:
os.execvp(JAVA_CMD, all_args)
return exit_code
def run_client_jar(jarfile, klass, args, daemon=False, client=True, extrajvmopts=[]):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
extra_jars=[jarfile, USER_CONF_DIR, STORM_BIN_DIR]
extra_jars.extend(local_jars)
extra_jars.extend(artifact_to_file_jars.values())
exec_storm_class(
klass,
jvmtype="-client",
extrajars=extra_jars,
args=args,
daemon=False,
jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] +
["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def local(jarfile, klass, *args):
"""Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
"""
[ttl, debug_args, args] = parse_local_opts(args)
extrajvmopts = ["-Dstorm.local.sleeptime=" + ttl]
if debug_args != None:
extrajvmopts = extrajvmopts + ["-agentlib:jdwp=" + debug_args]
run_client_jar(jarfile, "org.apache.storm.LocalCluster", [klass] + list(args), client=False, daemon=False, extrajvmopts=extrajvmopts)
def jar(jarfile, klass, *args):
"""Syntax: [storm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The storm worker dependencies and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
will upload the jar at topology-jar-path when the topology is submitted.
When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.
For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.
And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.
You can also exclude some dependencies like what you're doing in maven pom.
Please add exclusion artifacts with '^' separated string after the artifact.
For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.
When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.
Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters.
For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.
You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).
You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy:
--proxyUrl: URL representation of proxy ('http://host:port')
--proxyUsername: username of proxy if it requires basic auth
--proxyPassword: password of proxy if it requires basic auth
Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`
When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.
If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
"""
[server_class_path, args] = parse_jar_opts(args)
run_client_jar(jarfile, klass, list(args), client=not server_class_path, daemon=False)
def sql(sql_file, topology_name):
"""Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
"""
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
# include storm-sql-runtime jar(s) to local jar list
# --jars doesn't support wildcard so it should call get_jars_full
sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "runtime"))
local_jars.extend(sql_runtime_jars)
extrajars=[USER_CONF_DIR, STORM_BIN_DIR]
extrajars.extend(local_jars)
extrajars.extend(artifact_to_file_jars.values())
# include this for running StormSqlRunner, but not for generated topology
sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "core"))
extrajars.extend(sql_core_jars)
if topology_name == "--explain":
args = ["--file", sql_file, "--explain"]
else:
args = ["--file", sql_file, "--topology", topology_name]
exec_storm_class(
"org.apache.storm.sql.StormSqlRunner",
jvmtype="-client",
extrajars=extrajars,
args=args,
daemon=False,
jvmopts=["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def kill(*args):
"""Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
"""
if not args:
print_usage(command="kill")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.KillTopology",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def upload_credentials(*args):
"""Syntax: [storm upload-credentials topology-name [credkey credvalue]*]
Uploads a new set of credentials to a running topology
"""
if not args:
print_usage(command="upload-credentials")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.UploadCredentials",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args):
"""Syntax: [storm blobstore cmd]
list [KEY...] - lists blobs currently in the blob store
cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).
create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE
or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.
update [-f FILE] KEY - update the contents of a blob. Contents comes from
a FILE or STDIN (requires write access).
delete KEY - delete an entry from the blob store (requires write access).
set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma
separated list (requires admin access).
replication --read KEY - Used to read the replication factor of the blob.
replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the
replication factor of a blob.
For example, the following would create a mytopo:data.tgz key using the data
stored in data.tgz. User alice would have full access, bob would have
read/write access and everyone else would have read access.
storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
"""
exec_storm_class(
"org.apache.storm.command.Blobstore",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args):
"""Syntax: [storm heartbeats [cmd]]
list PATH - lists heartbeats nodes under PATH currently in the ClusterState.
get PATH - Get the heartbeat data at PATH
"""
exec_storm_class(
"org.apache.storm.command.Heartbeats",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args):
"""Syntax: [storm activate topology-name]
Activates the specified topology's spouts.
"""
if not args:
print_usage(command="activate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Activate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args):
"""
Dynamically change topology log levels
Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]
where log level is one of:
ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
and timeout is integer seconds.
e.g.
./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name
Set the root logger's level to DEBUG for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN topology-name
Set the com.myapp logger's level to WARN for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name
Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger
to ERROR for 123 seconds
./bin/storm set_log_level -r com.myOtherLogger topology-name
Clears settings, resetting back to the original level
"""
exec_storm_class(
"org.apache.storm.command.SetLogLevel",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args):
"""Syntax: [storm list]
List the running topologies and their statuses.
"""
exec_storm_class(
"org.apache.storm.command.ListTopologies",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args):
"""Syntax: [storm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
if not args:
print_usage(command="deactivate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Deactivate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def rebalance(*args):
"""Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
"""
if not args:
print_usage(command="rebalance")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Rebalance",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def get_errors(*args):
"""Syntax: [storm get-errors topology-name]
Get the latest error from the running topology. The returned result contains
the key value pairs for component-name and component-error for the components in error.
The result is returned in json format.
"""
if not args:
print_usage(command="get-errors")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.GetErrors",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def healthcheck(*args):
"""Syntax: [storm node-health-check]
Run health checks on the local supervisor.
"""
exec_storm_class(
"org.apache.storm.command.HealthCheck",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def kill_workers(*args):
"""Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers.
"""
exec_storm_class(
"org.apache.storm.command.KillWorkers",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def admin(*args):
"""Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology.
"""
exec_storm_class(
"org.apache.storm.command.AdminCommands",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def shell(resourcesdir, command, *args):
"""Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
"""
tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar"
os.system("jar cf %s %s" % (tmpjarpath, resourcesdir))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class(
"org.apache.storm.command.shell_submission",
args=runnerargs,
jvmtype="-client",
extrajars=[USER_CONF_DIR],
fork=True)
os.system("rm " + tmpjarpath)
def repl():
"""Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class("clojure.main", jvmtype="-client", extrajars=cppaths)
def get_log4j2_conf_dir():
cppaths = [CLUSTER_CONF_DIR]
storm_log4j2_conf_dir = confvalue("storm.log4j2.conf.dir", cppaths)
if(storm_log4j2_conf_dir == None or storm_log4j2_conf_dir == "null"):
storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR
elif(not os.path.isabs(storm_log4j2_conf_dir)):
storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
return storm_log4j2_conf_dir
def nimbus(klass="org.apache.storm.daemon.nimbus.Nimbus"):
"""Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=nimbus.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="nimbus",
extrajars=cppaths,
jvmopts=jvmopts)
def pacemaker(klass="org.apache.storm.pacemaker.Pacemaker"):
"""Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("pacemaker.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=pacemaker.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="pacemaker",
extrajars=cppaths,
jvmopts=jvmopts)
def supervisor(klass="org.apache.storm.daemon.supervisor.Supervisor"):
"""Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE,
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="supervisor",
extrajars=cppaths,
jvmopts=jvmopts)
def ui():
"""Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=ui.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.ui.UIServer",
jvmtype="-server",
daemonName="ui",
jvmopts=jvmopts,
extrajars=allextrajars)
def logviewer():
"""Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=logviewer.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.logviewer.LogviewerServer",
jvmtype="-server",
daemonName="logviewer",
jvmopts=jvmopts,
extrajars=allextrajars)
def drpcclient(*args):
"""Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs.
"""
if not args:
print_usage(command="drpc-client")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.BasicDrpcClient",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def drpc():
"""Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=drpc.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.drpc.DRPCServer",
jvmtype="-server",
daemonName="drpc",
jvmopts=jvmopts,
extrajars=allextrajars)
def dev_zookeeper():
"""Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
"""
jvmopts = [
"-Dlogfile.name=dev-zookeeper.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.command.DevZookeeper",
jvmtype="-server",
daemonName="dev_zookeeper",
jvmopts=jvmopts,
extrajars=[CLUSTER_CONF_DIR])
def version():
"""Syntax: [storm version]
Prints the version number of this Storm release.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.utils.VersionInfo",
jvmtype="-client",
extrajars=[CLUSTER_CONF_DIR])
def print_classpath():
"""Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands.
"""
print(get_classpath([], client=True))
def print_server_classpath():
"""Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands.
"""
print(get_classpath([], daemon=True))
def monitor(*args):
"""Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted';
"""
exec_storm_class(
"org.apache.storm.command.Monitor",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def print_commands():
"""Print all client commands and link to documentation"""
print("Commands:\n\t" + "\n\t".join(sorted(COMMANDS.keys())))
print("\nHelp: \n\thelp \n\thelp <command>")
print("\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n")
print("Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n")
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print(COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print("<%s> is not a valid command" % command)
else:
print_commands()
def unknown_command(*args):
print("Unknown command: [storm %s]" % ' '.join(sys.argv[1:]))
print_usage()
sys.exit(254)
COMMANDS = {"local": local, "jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer,
"drpc": drpc, "drpc-client": drpcclient, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, "server_classpath": print_server_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version, "monitor": monitor,
"upload-credentials": upload_credentials, "pacemaker": pacemaker, "heartbeats": heartbeats, "blobstore": blobstore,
"get-errors": get_errors, "set_log_level": set_log_level, "kill_workers": kill_workers,
"node-health-check": healthcheck, "sql": sql, "admin": admin}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_local_opts(args):
curr = list(args[:])
curr.reverse()
ttl = "20"
debug_args = None
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--local-ttl":
ttl = curr.pop()
elif token == "--java-debug":
debug_args = curr.pop()
else:
args_list.append(token)
return ttl, debug_args, args_list
def parse_jar_opts(args):
curr = list(args[:])
curr.reverse()
server_class_path = False
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--storm-server-classpath":
server_class_path = True
else:
args_list.append(token)
return server_class_path, args_list
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
jars_list = []
artifacts_list = []
artifact_repositories_list = []
maven_local_repository_dir = None
proxy_url = None
proxy_username = None
proxy_password = None
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CONFFILE
CONFFILE = curr.pop()
elif token == "--jars":
jars_list.extend(curr.pop().split(','))
elif token == "--artifacts":
artifacts_list.extend(curr.pop().split(','))
elif token == "--artifactRepositories":
artifact_repositories_list.extend(curr.pop().split(','))
elif token == "--mavenLocalRepositoryDirectory":
maven_local_repository_dir = curr.pop()
elif token == "--proxyUrl":
proxy_url = curr.pop()
elif token == "--proxyUsername":
proxy_username = curr.pop()
elif token == "--proxyPassword":
proxy_password = curr.pop()
else:
args_list.append(token)
return config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_repository_dir, \
proxy_url, proxy_username, proxy_password, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS, DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, \
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, \
DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_directory, proxy_url, \
proxy_username, proxy_password, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
DEP_JARS_OPTS = jars_list
DEP_ARTIFACTS_OPTS = artifacts_list
DEP_ARTIFACTS_REPOSITORIES_OPTS = artifact_repositories_list
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = maven_local_directory
DEP_PROXY_URL = proxy_url
DEP_PROXY_USERNAME = proxy_username
DEP_PROXY_PASSWORD = proxy_password
COMMAND = args[0]
ARGS = args[1:]
(COMMANDS.get(COMMAND, unknown_command))(*ARGS)
if __name__ == "__main__":
main()
| bin/storm.py | 43,055 | Syntax: [storm activate topology-name]
Activates the specified topology's spouts.
Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology.
Syntax: [storm blobstore cmd]
list [KEY...] - lists blobs currently in the blob store
cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).
create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE
or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.
update [-f FILE] KEY - update the contents of a blob. Contents comes from
a FILE or STDIN (requires write access).
delete KEY - delete an entry from the blob store (requires write access).
set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma
separated list (requires admin access).
replication --read KEY - Used to read the replication factor of the blob.
replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the
replication factor of a blob.
For example, the following would create a mytopo:data.tgz key using the data
stored in data.tgz. User alice would have full access, bob would have
read/write access and everyone else would have read access.
storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
Syntax: [storm deactivate topology-name]
Deactivates the specified topology's spouts.
Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs.
Syntax: [storm get-errors topology-name]
Get the latest error from the running topology. The returned result contains
the key value pairs for component-name and component-error for the components in error.
The result is returned in json format.
Syntax: [storm node-health-check]
Run health checks on the local supervisor.
Syntax: [storm heartbeats [cmd]]
list PATH - lists heartbeats nodes under PATH currently in the ClusterState.
get PATH - Get the heartbeat data at PATH
Syntax: [storm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The storm worker dependencies and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
will upload the jar at topology-jar-path when the topology is submitted.
When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.
For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.
And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.
You can also exclude some dependencies like what you're doing in maven pom.
Please add exclusion artifacts with '^' separated string after the artifact.
For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.
When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.
Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters.
For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.
You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).
You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy:
--proxyUrl: URL representation of proxy ('http://host:port')
--proxyUsername: username of proxy if it requires basic auth
--proxyPassword: password of proxy if it requires basic auth
Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`
When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.
If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers.
Syntax: [storm list]
List the running topologies and their statuses.
Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted';
Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f'f' "g"g" "i""i" 'j''j' k" "k l' l' mm n\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n
']
Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands.
Print all client commands and link to documentation
Syntax: [storm localconfvalue conf-name]
Prints out the value for conf-name in the local Storm configs.
The local Storm configs are the ones in ~/.storm/storm.yaml merged
in with the configs in defaults.yaml.
Syntax: [storm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's Storm configs.
The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands.
Print one help message or list of available commands
Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging.
Dynamically change topology log levels
Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]
where log level is one of:
ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
and timeout is integer seconds.
e.g.
./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name
Set the root logger's level to DEBUG for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN topology-name
Set the com.myapp logger's level to WARN for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name
Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger
to ERROR for 123 seconds
./bin/storm set_log_level -r com.myOtherLogger topology-name
Clears settings, resetting back to the original level
Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
Syntax: [storm upload-credentials topology-name [credkey credvalue]*]
Uploads a new set of credentials to a running topology
Syntax: [storm version]
Prints the version number of this Storm release.
!/usr/bin/env python Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. python 3 python 2 python 3 python 2 If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory. python 3 storm-submit module doesn't rely on storm-core and relevant libs python 3 For debug purpose, uncomment when you need to debug DependencyResolver print("Resolved dependencies: %s" % output) handling whitespaces in JAVA_CMD include storm-sql-runtime jar(s) to local jar list --jars doesn't support wildcard so it should call get_jars_full include this for running StormSqlRunner, but not for generated topology | 16,766 | en | 0.776383 |
"""
The pyinspirehep is A python wrapper for Inspirehep API.
"""
from pyinspirehep.client import Client | pyinspirehep/__init__.py | 104 | The pyinspirehep is A python wrapper for Inspirehep API. | 56 | en | 0.40504 |
# twitter_app/iris_classifier.py
import os
import pickle
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
MODEL_FILEPATH = os.path.join(os.path.dirname(__file__), "..", "models", "latest_model.pkl")
def train_and_save_model():
print("TRAINING THE MODEL...")
X, y = load_iris(return_X_y=True)
#print(type(X), X.shape) #> <class 'numpy.ndarray'> (150, 4)
#print(type(y), y.shape) #> <class 'numpy.ndarray'> (150,)
classifier = LogisticRegression() # for example
classifier.fit(X, y)
print("SAVING THE MODEL...")
with open(MODEL_FILEPATH, "wb") as model_file:
pickle.dump(classifier, model_file)
return classifier
def load_model():
print("LOADING THE MODEL...")
with open(MODEL_FILEPATH, "rb") as model_file:
saved_model = pickle.load(model_file)
return saved_model
if __name__ == "__main__":
#train_and_save_model()
clf = load_model()
print("CLASSIFIER:", clf)
X, y = load_iris(return_X_y=True) # just to have some data to use when predicting
inputs = X[:2, :]
print(type(inputs), inputs)
result = clf.predict(inputs)
print("RESULT:", result) | twitter_app/iris_classifier.py | 1,191 | twitter_app/iris_classifier.pyprint(type(X), X.shape) > <class 'numpy.ndarray'> (150, 4)print(type(y), y.shape) > <class 'numpy.ndarray'> (150,) for exampletrain_and_save_model() just to have some data to use when predicting | 224 | en | 0.395596 |
"""BOM data 'collector' that downloads the observation data."""
import asyncio
import datetime
import aiohttp
import logging
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_UPDATES = datetime.timedelta(minutes=10)
BASE_URL = "https://api.weather.bom.gov.au"
DAILY_FORECASTS_URL = "/v1/locations/{}/forecasts/daily"
LOCATIONS_URL = "/v1/locations/{}"
MDI_ICON_MAP = {
"clear": "mdi:weather-night",
"cloudy": "mdi:weather-cloudy",
"cyclone": "mdi:weather-hurricane",
"dust": "mdi:weather-hazy",
"dusty": "mdi:weather-hazy",
"fog": "mdi:weather-fog",
"frost": "mdi:snowflake-melt",
"haze": "mdi:weather-hazy",
"hazy": "mdi:weather-hazy",
"heavy_shower": "mdi:weather-pouring",
"heavy_showers": "mdi:weather-pouring",
"light_rain": "mdi:weather-partly-rainy",
"light_shower": "mdi:weather-light-showers",
"light_showers": "mdi:weather-light-showers",
"mostly_sunny": "mdi:weather-sunny",
"partly_cloudy": "mdi:weather-partly-cloudy",
"rain": "mdi:weather-pouring",
"shower": "mdi:weather-rainy",
"showers": "mdi:weather-rainy",
"snow": "mdi:weather-snowy",
"storm": "mdi:weather-lightning-rainy",
"storms": "mdi:weather-lightning-rainy",
"sunny": "mdi:weather-sunny",
"tropical_cyclone": "mdi:weather-hurricane",
"wind": "mdi:weather-windy",
"windy": "mdi:weather-windy",
None: None,
}
OBSERVATIONS_URL = "https://api.weather.bom.gov.au/v1/locations/{}/observations"
UV_MAP = {
"extreme": "Extreme",
"veryhigh": "Very High",
"high": "High",
"moderate": "Moderate",
"low": "Low",
None: None,
}
class Collector:
"""Data collector for BOM integration."""
def __init__(self, latitude, longitude):
"""Init BOM data collector."""
self.observations_data = None
self.daily_forecasts_data = None
self.geohash = self.geohash_encode(latitude, longitude)
_LOGGER.debug(f"geohash: {self.geohash}")
async def get_location_name(self):
"""Get JSON location name from BOM API endpoint."""
url = BASE_URL + LOCATIONS_URL.format(self.geohash)
async with aiohttp.ClientSession() as session:
response = await session.get(url)
if response is not None and response.status == 200:
locations_data = await response.json()
self.location_name = locations_data["data"]["name"]
return True
async def get_observations_data(self):
"""Get JSON observations data from BOM API endpoint."""
url = OBSERVATIONS_URL.format(self.geohash)
async with aiohttp.ClientSession() as session:
response = await session.get(url)
if response is not None and response.status == 200:
self.observations_data = await response.json()
await self.format_observations_data()
async def format_observations_data(self):
"""Flatten out wind and gust data."""
flattened = {}
wind = self.observations_data["data"]["wind"]
flattened["wind_speed_kilometre"] = wind["speed_kilometre"]
flattened["wind_speed_knot"] = wind["speed_knot"]
flattened["wind_direction"] = wind["direction"]
if self.observations_data["data"]["gust"] is not None:
gust = self.observations_data["data"]["gust"]
flattened["gust_speed_kilometre"] = gust["speed_kilometre"]
flattened["gust_speed_knot"] = gust["speed_knot"]
else:
flattened["gust_speed_kilometre"] = None
flattened["gust_speed_knot"] = None
self.observations_data["data"].update(flattened)
async def get_daily_forecasts_data(self):
"""Get JSON daily forecasts data from BOM API endpoint."""
url = BASE_URL + DAILY_FORECASTS_URL.format(self.geohash)
async with aiohttp.ClientSession() as session:
response = await session.get(url)
if response is not None and response.status == 200:
self.daily_forecasts_data = await response.json()
await self.format_forecast_data()
async def format_forecast_data(self):
"""Flatten out forecast data."""
flattened = {}
days = len(self.daily_forecasts_data["data"])
for day in range(0, days):
icon = self.daily_forecasts_data["data"][day]["icon_descriptor"]
flattened["mdi_icon"] = MDI_ICON_MAP[icon]
uv = self.daily_forecasts_data["data"][day]["uv"]
flattened["uv_category"] = UV_MAP[uv["category"]]
flattened["uv_max_index"] = uv["max_index"]
flattened["uv_start_time"] = uv["start_time"]
flattened["uv_end_time"] = uv["end_time"]
rain = self.daily_forecasts_data["data"][day]["rain"]
flattened["rain_chance"] = rain["chance"]
flattened["rain_amount_min"] = rain["amount"]["min"]
# When rain amount max is None, set as rain amount min
if rain["amount"]["max"] is None:
flattened["rain_amount_max"] = flattened["rain_amount_min"]
flattened["rain_amount_range"] = rain["amount"]["min"]
else:
flattened["rain_amount_max"] = rain["amount"]["max"]
flattened["rain_amount_range"] = "{} to {}".format(
rain["amount"]["min"],
rain["amount"]["max"],
)
self.daily_forecasts_data["data"][day].update(flattened)
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self):
"""Refresh the data on the collector object."""
await self.get_observations_data()
await self.get_daily_forecasts_data()
def geohash_encode(self, latitude, longitude, precision=6):
base32 = '0123456789bcdefghjkmnpqrstuvwxyz'
lat_interval = (-90.0, 90.0)
lon_interval = (-180.0, 180.0)
geohash = []
bits = [16, 8, 4, 2, 1]
bit = 0
ch = 0
even = True
while len(geohash) < precision:
if even:
mid = (lon_interval[0] + lon_interval[1]) / 2
if longitude > mid:
ch |= bits[bit]
lon_interval = (mid, lon_interval[1])
else:
lon_interval = (lon_interval[0], mid)
else:
mid = (lat_interval[0] + lat_interval[1]) / 2
if latitude > mid:
ch |= bits[bit]
lat_interval = (mid, lat_interval[1])
else:
lat_interval = (lat_interval[0], mid)
even = not even
if bit < 4:
bit += 1
else:
geohash += base32[ch]
bit = 0
ch = 0
return ''.join(geohash)
| custom_components/bureau_of_meteorology/PyBoM/collector.py | 6,890 | Data collector for BOM integration.
Init BOM data collector.
BOM data 'collector' that downloads the observation data.
When rain amount max is None, set as rain amount min | 173 | en | 0.721828 |
# SPDX-License-Identifier: MIT
# Copyright (c) 2018-2022 Amano Team
import os
import shutil
import tempfile
from PIL import Image
from pyrogram import Client, filters
from pyrogram.enums import MessageEntityType
from pyrogram.errors import PeerIdInvalid, StickersetInvalid
from pyrogram.raw.functions.messages import GetStickerSet, SendMedia
from pyrogram.raw.functions.stickers import AddStickerToSet, CreateStickerSet
from pyrogram.raw.types import (
DocumentAttributeFilename,
InputDocument,
InputMediaUploadedDocument,
InputStickerSetItem,
InputStickerSetShortName,
)
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, Message
from eduu.config import LOG_CHAT, PREFIXES
from eduu.utils import EMOJI_PATTERN, http
from eduu.utils.localization import use_chat_lang
@Client.on_message(filters.command(["kang", "kibe", "steal"], PREFIXES))
@use_chat_lang()
async def kang_sticker(c: Client, m: Message, strings):
prog_msg = await m.reply_text(strings("kanging_sticker_msg"))
bot_username = c.me.username
sticker_emoji = "🤔"
packnum = 0
packname_found = False
resize = False
animated = False
reply = m.reply_to_message
user = await c.resolve_peer(m.from_user.username or m.from_user.id)
if reply and reply.media:
if reply.photo:
resize = True
elif reply.document:
if "image" in reply.document.mime_type:
# mime_type: image/webp
resize = True
elif "tgsticker" in reply.document.mime_type:
# mime_type: application/x-tgsticker
animated = True
elif reply.sticker:
if not reply.sticker.file_name:
return await prog_msg.edit_text(strings("err_sticker_no_file_name"))
if reply.sticker.emoji:
sticker_emoji = reply.sticker.emoji
animated = reply.sticker.is_animated
if not reply.sticker.file_name.endswith(".tgs"):
resize = True
else:
return await prog_msg.edit_text(strings("invalid_media_string"))
pack_prefix = "anim" if animated else "a"
packname = f"{pack_prefix}_{m.from_user.id}_by_{bot_username}"
if len(m.command) > 1:
if m.command[1].isdigit() and int(m.command[1]) > 0:
# provide pack number to kang in desired pack
packnum = m.command.pop(1)
packname = f"{pack_prefix}{packnum}_{m.from_user.id}_by_{bot_username}"
if len(m.command) > 1:
# matches all valid emojis in input
sticker_emoji = (
"".join(set(EMOJI_PATTERN.findall("".join(m.command[1:]))))
or sticker_emoji
)
filename = await c.download_media(m.reply_to_message)
if not filename:
# Failed to download
await prog_msg.delete()
return
elif m.entities and len(m.entities) > 1:
packname = f"a_{m.from_user.id}_by_{bot_username}"
pack_prefix = "a"
# searching if image_url is given
img_url = None
filename = "sticker.png"
for y in m.entities:
if y.type == MessageEntityType.URL:
img_url = m.text[y.offset : (y.offset + y.length)]
break
if not img_url:
await prog_msg.delete()
return
try:
r = await http.get(img_url)
if r.status_code == 200:
with open(filename, mode="wb") as f:
f.write(r.read())
except Exception as r_e:
return await prog_msg.edit_text(f"{r_e.__class__.__name__} : {r_e}")
if len(m.command) > 2:
# m.command[1] is image_url
if m.command[2].isdigit() and int(m.command[2]) > 0:
packnum = m.command.pop(2)
packname = f"a{packnum}_{m.from_user.id}_by_{bot_username}"
if len(m.command) > 2:
sticker_emoji = (
"".join(set(EMOJI_PATTERN.findall("".join(m.command[2:]))))
or sticker_emoji
)
resize = True
else:
return await prog_msg.delete()
try:
if resize:
filename = resize_image(filename)
max_stickers = 50 if animated else 120
while not packname_found:
try:
stickerset = await c.invoke(
GetStickerSet(
stickerset=InputStickerSetShortName(short_name=packname),
hash=0,
)
)
if stickerset.set.count >= max_stickers:
packnum += 1
packname = (
f"{pack_prefix}_{packnum}_{m.from_user.id}_by_{bot_username}"
)
else:
packname_found = True
except StickersetInvalid:
break
file = await c.save_file(filename)
media = await c.invoke(
SendMedia(
peer=(await c.resolve_peer(LOG_CHAT)),
media=InputMediaUploadedDocument(
file=file,
mime_type=c.guess_mime_type(filename),
attributes=[DocumentAttributeFilename(file_name=filename)],
),
message=f"#Sticker kang by UserID -> {m.from_user.id}",
random_id=c.rnd_id(),
)
)
stkr_file = media.updates[-1].message.media.document
if packname_found:
await prog_msg.edit_text(strings("use_existing_pack"))
await c.invoke(
AddStickerToSet(
stickerset=InputStickerSetShortName(short_name=packname),
sticker=InputStickerSetItem(
document=InputDocument(
id=stkr_file.id,
access_hash=stkr_file.access_hash,
file_reference=stkr_file.file_reference,
),
emoji=sticker_emoji,
),
)
)
else:
await prog_msg.edit_text(strings("create_new_pack_string"))
u_name = m.from_user.username
if u_name:
u_name = f"@{u_name}"
else:
u_name = str(m.from_user.id)
stkr_title = f"{u_name}'s "
if animated:
stkr_title += "Anim. "
stkr_title += "EduuPack"
if packnum != 0:
stkr_title += f" v{packnum}"
try:
await c.invoke(
CreateStickerSet(
user_id=user,
title=stkr_title,
short_name=packname,
stickers=[
InputStickerSetItem(
document=InputDocument(
id=stkr_file.id,
access_hash=stkr_file.access_hash,
file_reference=stkr_file.file_reference,
),
emoji=sticker_emoji,
)
],
animated=animated,
)
)
except PeerIdInvalid:
return await prog_msg.edit_text(
strings("cant_create_sticker_pack_string"),
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
"/start", url=f"https://t.me/{bot_username}?start"
)
]
]
),
)
except Exception as all_e:
await prog_msg.edit_text(f"{all_e.__class__.__name__} : {all_e}")
else:
markup = InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
strings("view_sticker_pack_btn"),
url=f"t.me/addstickers/{packname}",
)
]
]
)
kanged_success_msg = strings("sticker_kanged_string")
await prog_msg.edit_text(
kanged_success_msg.format(sticker_emoji=sticker_emoji), reply_markup=markup
)
# Cleanup
try:
os.remove(filename)
except OSError:
pass
def resize_image(filename: str) -> str:
im = Image.open(filename)
maxsize = 512
scale = maxsize / max(im.width, im.height)
sizenew = (int(im.width * scale), int(im.height * scale))
im = im.resize(sizenew, Image.NEAREST)
downpath, f_name = os.path.split(filename)
# not hardcoding png_image as "sticker.png"
png_image = os.path.join(downpath, f"{f_name.split('.', 1)[0]}.png")
im.save(png_image, "PNG")
if png_image != filename:
os.remove(filename)
return png_image
@Client.on_message(filters.command("stickerid", PREFIXES) & filters.reply)
@use_chat_lang()
async def getstickerid(c: Client, m: Message, strings):
if m.reply_to_message.sticker:
await m.reply_text(
strings("get_sticker_id_string").format(
stickerid=m.reply_to_message.sticker.file_id
)
)
@Client.on_message(filters.command("getsticker", PREFIXES) & filters.reply)
@use_chat_lang()
async def getstickeraspng(c: Client, m: Message, strings):
sticker = m.reply_to_message.sticker
if sticker:
if sticker.is_animated:
await m.reply_text(strings("animated_not_supported"))
elif not sticker.is_animated:
with tempfile.TemporaryDirectory() as tempdir:
path = os.path.join(tempdir, "getsticker")
sticker_file = await c.download_media(
message=m.reply_to_message,
file_name=f"{path}/{sticker.set_name}.png",
)
await m.reply_to_message.reply_document(
document=sticker_file,
caption=strings("sticker_info").format(
emoji=sticker.emoji, id=sticker.file_id
),
)
shutil.rmtree(tempdir, ignore_errors=True)
else:
await m.reply_text(strings("not_sticker"))
| eduu/plugins/stickers.py | 10,593 | SPDX-License-Identifier: MIT Copyright (c) 2018-2022 Amano Team mime_type: image/webp mime_type: application/x-tgsticker provide pack number to kang in desired pack matches all valid emojis in input Failed to download searching if image_url is given m.command[1] is image_url Cleanup not hardcoding png_image as "sticker.png" | 325 | en | 0.555265 |
# coding: utf-8
import arrow
from flask import current_app, request, g
from itsdangerous import TimedJSONWebSignatureSerializer as JWT
from actor_libs.errors import AuthFailed
from app.models import Application, User
__all__ = ['basic_auth', 'token_auth']
def basic_auth(username, password) -> bool:
""" HTTP basic authorization """
query_result = Application.query \
.join(User, User.id == Application.userIntID) \
.with_entities(Application, User) \
.filter(Application.appStatus == 1, User.enable == 1,
Application.appID == username).first()
if not query_result:
raise AuthFailed(field='appID')
application, user = query_result
# Verify that app is available
date_now = arrow.now().naive
if application.expiredAt and date_now > application.expiredAt:
raise AuthFailed(field='expiredAt')
if application.appToken != password:
raise AuthFailed(field='appToken')
g.user_id: int = user.id
g.tenant_uid: str = user.tenantID
g.role_id: int = application.roleIntID
g.app_uid: str = application.appID
user.lastRequestTime = date_now # Update user active time
user.update()
return True
def token_auth(token) -> bool:
""" HTTP bearer token authorization """
jwt = JWT(current_app.config['SECRET_KEY'])
try:
data = jwt.loads(token)
except Exception:
raise AuthFailed(field='token')
if data.get('consumer_id'):
# todo consumer user auth ?
...
else:
# Normal user
if ('user_id' or 'role_id') not in data:
raise AuthFailed(field='token')
if data['role_id'] != 1 and not data.get('tenant_uid'):
raise AuthFailed(field='token')
user = User.query \
.filter(User.roleIntID == data['role_id'], User.id == data['user_id'],
User.tenantID == data['tenant_uid']).first()
if not user:
raise AuthFailed(field='token')
g.user_id: int = user.id
g.tenant_uid: str = user.tenantID
g.role_id: int = user.roleIntID
g.app_uid: str = None
g.user_auth_type: int = user.userAuthType
user.lastRequestTime = arrow.now().naive
user.update()
return True
| server/actor_libs/auth/base.py | 2,277 | HTTP basic authorization
HTTP bearer token authorization
coding: utf-8 Verify that app is available Update user active time todo consumer user auth ? Normal user | 165 | en | 0.764575 |
# *****************************************************************************
# Copyright (c) 2019 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
import testUtils
class TestRegistryStatus(testUtils.AbstractTest):
# =========================================================================
# Service Status
# =========================================================================
def testStatus(self):
status = self.appClient.status.serviceStatus()
assert status.region == "us"
assert status.dashboard in ["green", "orange", "red"]
assert status.messaging in ["green", "orange", "red"]
assert status.thirdParty in ["green", "orange", "red"]
| test/test_api_status.py | 1,039 | ***************************************************************************** Copyright (c) 2019 IBM Corporation and other Contributors. All rights reserved. This program and the accompanying materials are made available under the terms of the Eclipse Public License v1.0 which accompanies this distribution, and is available at http://www.eclipse.org/legal/epl-v10.html ***************************************************************************** ========================================================================= Service Status ========================================================================= | 611 | en | 0.64008 |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../useis'))
# -- Project information -----------------------------------------------------
project = 'useis'
copyright = '2021, Jean-Philippe Mercier'
author = 'Jean-Philippe Mercier'
# The full version, including alpha/beta/rc tags
release = '"0.5.0"'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'rinoh.frontend.sphinx',
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx.ext.coverage'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static'] | docs/source/conf.py | 2,073 | Configuration file for the Sphinx documentation builder. This file only contains a selection of the most common options. For a full list see the documentation: https://www.sphinx-doc.org/en/master/usage/configuration.html -- Path setup -------------------------------------------------------------- If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. -- Project information ----------------------------------------------------- The full version, including alpha/beta/rc tags -- General configuration --------------------------------------------------- Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Add any paths that contain templates here, relative to this directory. List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. This pattern also affects html_static_path and html_extra_path. -- Options for HTML output ------------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". | 1,531 | en | 0.700087 |
"""Amazon Neptune Module."""
import logging
import re
from typing import Any
import pandas as pd
from gremlin_python.process.graph_traversal import GraphTraversalSource, __
from gremlin_python.process.translator import Translator
from gremlin_python.process.traversal import Cardinality, T
from gremlin_python.structure.graph import Graph
from awswrangler import exceptions
from awswrangler.neptune.client import NeptuneClient
_logger: logging.Logger = logging.getLogger(__name__)
def execute_gremlin(client: NeptuneClient, query: str) -> pd.DataFrame:
"""Return results of a Gremlin traversal as pandas dataframe.
Parameters
----------
client : neptune.Client
instance of the neptune client to use
traversal : str
The gremlin traversal to execute
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Results as Pandas DataFrame
Examples
--------
Run a Gremlin Query
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> df = wr.neptune.execute_gremlin(client, "g.V().limit(1)")
"""
results = client.read_gremlin(query)
df = pd.DataFrame.from_records(results)
return df
def execute_opencypher(client: NeptuneClient, query: str) -> pd.DataFrame:
"""Return results of a openCypher traversal as pandas dataframe.
Parameters
----------
client : NeptuneClient
instance of the neptune client to use
query : str
The openCypher query to execute
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Results as Pandas DataFrame
Examples
--------
Run an openCypher query
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> resp = wr.neptune.execute_opencypher(client, "MATCH (n) RETURN n LIMIT 1")
"""
resp = client.read_opencypher(query)
df = pd.DataFrame.from_dict(resp)
return df
def execute_sparql(client: NeptuneClient, query: str) -> pd.DataFrame:
"""Return results of a SPARQL query as pandas dataframe.
Parameters
----------
client : NeptuneClient
instance of the neptune client to use
query : str
The SPARQL traversal to execute
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Results as Pandas DataFrame
Examples
--------
Run a SPARQL query
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> df = wr.neptune.execute_sparql(client, "PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT ?name
WHERE {
?person foaf:name ?name .
"""
data = client.read_sparql(query)
df = None
if "results" in data and "bindings" in data["results"]:
df = pd.DataFrame(data["results"]["bindings"])
df.applymap(lambda x: x["value"])
else:
df = pd.DataFrame(data)
return df
def to_property_graph(
client: NeptuneClient, df: pd.DataFrame, batch_size: int = 50, use_header_cardinality: bool = True
) -> bool:
"""Write records stored in a DataFrame into Amazon Neptune.
If writing to a property graph then DataFrames for vertices and edges must be written separately.
DataFrames for vertices must have a ~label column with the label and a ~id column for the vertex id.
If the ~id column does not exist, the specified id does not exists, or is empty then a new vertex will be added.
If no ~label column exists an exception will be thrown.
DataFrames for edges must have a ~id, ~label, ~to, and ~from column. If the ~id column does not exist
the specified id does not exists, or is empty then a new edge will be added. If no ~label, ~to, or ~from column
exists an exception will be thrown.
If you would like to save data using `single` cardinality then you can postfix (single) to the column header and
set use_header_cardinality=True (default). e.g. A column named `name(single)` will save the `name` property
as single
cardinality. You can disable this by setting by setting `use_header_cardinality=False`.
Parameters
----------
client : NeptuneClient
instance of the neptune client to use
df : pandas.DataFrame
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
batch_size: int
The number of rows to save at a time. Default 50
use_header_cardinality: bool
If True, then the header cardinality will be used to save the data. Default True
Returns
-------
bool
True if records were written
Examples
--------
Writing to Amazon Neptune
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> wr.neptune.gremlin.to_property_graph(
... df=df
... )
"""
# check if ~id and ~label column exist and if not throw error
g = Graph().traversal()
is_edge_df = False
is_update_df = True
if "~id" in df.columns:
if "~label" in df.columns:
is_update_df = False
if "~to" in df.columns and "~from" in df.columns:
is_edge_df = True
else:
raise exceptions.InvalidArgumentValue(
"Dataframe must contain at least a ~id and a ~label column to be saved to Amazon Neptune"
)
# Loop through items in the DF
for (index, row) in df.iterrows():
# build up a query
if is_update_df:
g = _build_gremlin_update(g, row, use_header_cardinality)
elif is_edge_df:
g = _build_gremlin_insert_edges(g, row.to_dict(), use_header_cardinality)
else:
g = _build_gremlin_insert_vertices(g, row.to_dict(), use_header_cardinality)
# run the query
if index > 0 and index % batch_size == 0:
res = _run_gremlin_insert(client, g)
if res:
g = Graph().traversal()
return _run_gremlin_insert(client, g)
def to_rdf_graph(
client: NeptuneClient,
df: pd.DataFrame,
batch_size: int = 50,
subject_column: str = "s",
predicate_column: str = "p",
object_column: str = "o",
graph_column: str = "g",
) -> bool:
"""Write records stored in a DataFrame into Amazon Neptune.
The DataFrame must consist of triples with column names for the subject, predicate, and object specified.
If you want to add data into a named graph then you will also need the graph column.
Parameters
----------
client (NeptuneClient) :
instance of the neptune client to use
df (pandas.DataFrame) :
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
subject_column (str, optional) :
The column name in the dataframe for the subject. Defaults to 's'
predicate_column (str, optional) :
The column name in the dataframe for the predicate. Defaults to 'p'
object_column (str, optional) :
The column name in the dataframe for the object. Defaults to 'o'
graph_column (str, optional) :
The column name in the dataframe for the graph if sending across quads. Defaults to 'g'
Returns
-------
bool
True if records were written
Examples
--------
Writing to Amazon Neptune
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> wr.neptune.gremlin.to_rdf_graph(
... df=df
... )
"""
is_quads = False
if pd.Series([subject_column, object_column, predicate_column]).isin(df.columns).all():
if graph_column in df.columns:
is_quads = True
else:
raise exceptions.InvalidArgumentValue(
"""Dataframe must contain at least the subject, predicate, and object columns defined or the defaults
(s, p, o) to be saved to Amazon Neptune"""
)
query = ""
# Loop through items in the DF
for (index, row) in df.iterrows():
# build up a query
if is_quads:
insert = f"""INSERT DATA {{ GRAPH <{row[graph_column]}> {{<{row[subject_column]}>
<{str(row[predicate_column])}> <{row[object_column]}> . }} }}; """
query = query + insert
else:
insert = f"""INSERT DATA {{ <{row[subject_column]}> <{str(row[predicate_column])}>
<{row[object_column]}> . }}; """
query = query + insert
# run the query
if index > 0 and index % batch_size == 0:
res = client.write_sparql(query)
if res:
query = ""
return client.write_sparql(query)
def connect(host: str, port: int, iam_enabled: bool = False, **kwargs: Any) -> NeptuneClient:
"""Create a connection to a Neptune cluster.
Parameters
----------
host : str
The host endpoint to connect to
port : int
The port endpoint to connect to
iam_enabled : bool, optional
True if IAM is enabled on the cluster. Defaults to False.
Returns
-------
NeptuneClient
[description]
"""
return NeptuneClient(host, port, iam_enabled, **kwargs)
def _get_column_name(column: str) -> str:
if "(single)" in column.lower():
return re.compile(r"\(single\)", re.IGNORECASE).sub("", column)
return column
def _set_properties(g: GraphTraversalSource, use_header_cardinality: bool, row: Any) -> GraphTraversalSource:
for (column, value) in row.items():
if column not in ["~id", "~label", "~to", "~from"]:
# If the column header is specifying the cardinality then use it
if use_header_cardinality:
if column.lower().find("(single)") > 0 and pd.notna(value):
g = g.property(Cardinality.single, _get_column_name(column), value)
else:
g = _expand_properties(g, _get_column_name(column), value)
else:
# If not using header cardinality then use the default of set
g = _expand_properties(g, column, value)
return g
def _expand_properties(g: GraphTraversalSource, column: str, value: Any) -> GraphTraversalSource:
# If this is a list then expand it out into multiple property calls
if isinstance(value, list) and len(value) > 0:
for item in value:
g = g.property(Cardinality.set_, column, item)
elif pd.notna(value):
g = g.property(Cardinality.set_, column, value)
return g
def _build_gremlin_update(g: GraphTraversalSource, row: Any, use_header_cardinality: bool) -> GraphTraversalSource:
g = g.V(str(row["~id"]))
g = _set_properties(g, use_header_cardinality, row)
return g
def _build_gremlin_insert_vertices(
g: GraphTraversalSource, row: Any, use_header_cardinality: bool = False
) -> GraphTraversalSource:
g = g.V(str(row["~id"])).fold().coalesce(__.unfold(), __.addV(row["~label"]).property(T.id, str(row["~id"])))
g = _set_properties(g, use_header_cardinality, row)
return g
def _build_gremlin_insert_edges(
g: GraphTraversalSource, row: pd.Series, use_header_cardinality: bool
) -> GraphTraversalSource:
g = (
g.V(str(row["~from"]))
.fold()
.coalesce(__.unfold(), _build_gremlin_insert_vertices(__, {"~id": row["~from"], "~label": "Vertex"}))
.addE(row["~label"])
.property(T.id, str(row["~id"]))
.to(
__.V(str(row["~to"]))
.fold()
.coalesce(__.unfold(), _build_gremlin_insert_vertices(__, {"~id": row["~to"], "~label": "Vertex"}))
)
)
g = _set_properties(g, use_header_cardinality, row)
return g
def _run_gremlin_insert(client: NeptuneClient, g: GraphTraversalSource) -> bool:
translator = Translator("g")
s = translator.translate(g.bytecode)
s = s.replace("Cardinality.", "") # hack to fix parser error for set cardinality
_logger.debug(s)
res = client.write_gremlin(s)
return res
def flatten_nested_df(
df: pd.DataFrame, include_prefix: bool = True, seperator: str = "_", recursive: bool = True
) -> pd.DataFrame:
"""Flatten the lists and dictionaries of the input data frame.
Parameters
----------
df : pd.DataFrame
The input data frame
include_prefix : bool, optional
If True, then it will prefix the new column name with the original column name.
Defaults to True.
seperator : str, optional
The seperator to use between field names when a dictionary is exploded.
Defaults to "_".
recursive : bool, optional
If True, then this will recurse the fields in the data frame. Defaults to True.
Returns
-------
pd.DataFrame: The flattened data frame
"""
if seperator is None:
seperator = "_"
df = df.reset_index()
# search for list and map
s = (df.applymap(type) == list).all()
list_columns = s[s].index.tolist()
s = (df.applymap(type) == dict).all()
dict_columns = s[s].index.tolist()
if len(list_columns) > 0 or len(dict_columns) > 0:
new_columns = []
for col in dict_columns:
# expand dictionaries horizontally
expanded = None
if include_prefix:
expanded = pd.json_normalize(df[col], sep=seperator).add_prefix(f"{col}{seperator}")
else:
expanded = pd.json_normalize(df[col], sep=seperator).add_prefix(f"{seperator}")
expanded.index = df.index
df = pd.concat([df, expanded], axis=1).drop(columns=[col])
new_columns.extend(expanded.columns)
for col in list_columns:
df = df.drop(columns=[col]).join(df[col].explode().to_frame())
new_columns.append(col)
# check if there are still dict o list fields to flatten
s = (df[new_columns].applymap(type) == list).all()
list_columns = s[s].index.tolist()
s = (df[new_columns].applymap(type) == dict).all()
dict_columns = s[s].index.tolist()
if recursive and (len(list_columns) > 0 or len(dict_columns) > 0):
df = flatten_nested_df(df, include_prefix=include_prefix, seperator=seperator, recursive=recursive)
return df
| awswrangler/neptune/neptune.py | 14,445 | Create a connection to a Neptune cluster.
Parameters
----------
host : str
The host endpoint to connect to
port : int
The port endpoint to connect to
iam_enabled : bool, optional
True if IAM is enabled on the cluster. Defaults to False.
Returns
-------
NeptuneClient
[description]
Return results of a Gremlin traversal as pandas dataframe.
Parameters
----------
client : neptune.Client
instance of the neptune client to use
traversal : str
The gremlin traversal to execute
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Results as Pandas DataFrame
Examples
--------
Run a Gremlin Query
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> df = wr.neptune.execute_gremlin(client, "g.V().limit(1)")
Return results of a openCypher traversal as pandas dataframe.
Parameters
----------
client : NeptuneClient
instance of the neptune client to use
query : str
The openCypher query to execute
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Results as Pandas DataFrame
Examples
--------
Run an openCypher query
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> resp = wr.neptune.execute_opencypher(client, "MATCH (n) RETURN n LIMIT 1")
Return results of a SPARQL query as pandas dataframe.
Parameters
----------
client : NeptuneClient
instance of the neptune client to use
query : str
The SPARQL traversal to execute
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Results as Pandas DataFrame
Examples
--------
Run a SPARQL query
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> df = wr.neptune.execute_sparql(client, "PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT ?name
WHERE {
?person foaf:name ?name .
Flatten the lists and dictionaries of the input data frame.
Parameters
----------
df : pd.DataFrame
The input data frame
include_prefix : bool, optional
If True, then it will prefix the new column name with the original column name.
Defaults to True.
seperator : str, optional
The seperator to use between field names when a dictionary is exploded.
Defaults to "_".
recursive : bool, optional
If True, then this will recurse the fields in the data frame. Defaults to True.
Returns
-------
pd.DataFrame: The flattened data frame
Write records stored in a DataFrame into Amazon Neptune.
If writing to a property graph then DataFrames for vertices and edges must be written separately.
DataFrames for vertices must have a ~label column with the label and a ~id column for the vertex id.
If the ~id column does not exist, the specified id does not exists, or is empty then a new vertex will be added.
If no ~label column exists an exception will be thrown.
DataFrames for edges must have a ~id, ~label, ~to, and ~from column. If the ~id column does not exist
the specified id does not exists, or is empty then a new edge will be added. If no ~label, ~to, or ~from column
exists an exception will be thrown.
If you would like to save data using `single` cardinality then you can postfix (single) to the column header and
set use_header_cardinality=True (default). e.g. A column named `name(single)` will save the `name` property
as single
cardinality. You can disable this by setting by setting `use_header_cardinality=False`.
Parameters
----------
client : NeptuneClient
instance of the neptune client to use
df : pandas.DataFrame
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
batch_size: int
The number of rows to save at a time. Default 50
use_header_cardinality: bool
If True, then the header cardinality will be used to save the data. Default True
Returns
-------
bool
True if records were written
Examples
--------
Writing to Amazon Neptune
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> wr.neptune.gremlin.to_property_graph(
... df=df
... )
Write records stored in a DataFrame into Amazon Neptune.
The DataFrame must consist of triples with column names for the subject, predicate, and object specified.
If you want to add data into a named graph then you will also need the graph column.
Parameters
----------
client (NeptuneClient) :
instance of the neptune client to use
df (pandas.DataFrame) :
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
subject_column (str, optional) :
The column name in the dataframe for the subject. Defaults to 's'
predicate_column (str, optional) :
The column name in the dataframe for the predicate. Defaults to 'p'
object_column (str, optional) :
The column name in the dataframe for the object. Defaults to 'o'
graph_column (str, optional) :
The column name in the dataframe for the graph if sending across quads. Defaults to 'g'
Returns
-------
bool
True if records were written
Examples
--------
Writing to Amazon Neptune
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> wr.neptune.gremlin.to_rdf_graph(
... df=df
... )
Amazon Neptune Module.
check if ~id and ~label column exist and if not throw error Loop through items in the DF build up a query run the query Loop through items in the DF build up a query run the query If the column header is specifying the cardinality then use it If not using header cardinality then use the default of set If this is a list then expand it out into multiple property calls hack to fix parser error for set cardinality search for list and map expand dictionaries horizontally check if there are still dict o list fields to flatten | 5,887 | en | 0.568817 |
# The MIT License (MIT)
#
# Copyright (c) 2019 Paul Sajna for Adafruit Industries LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_imageload.gif`
====================================================
Load pixel values (indices or colors) into one or more bitmaps and colors into a palette from a GIF file.
* Author(s): Paul Sajna
"""
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_ImageLoad.git"
bitmaps = []
def load(f):
bitmaps = []
palette = []
table = []
f.seek(3)
version = f.read(3)
if (version != b'89a') and (version != b'87a'):
raise RuntimeError("Invalid GIF version")
width = int.from_bytes(f.read(2), 'little')
height = int.from_bytes(f.read(2), 'little')
gct_header = int.from_bytes(f.read(1), 'little')
if (gct_header & 0b10000000) != 0b10000000:
raise NotImplementedError("Only gifs with a global color table are supported")
#if (gct_header & 0b0111000 >> 3) + 1 != 8:
#raise NotImplementedError("Only 8-bit color is supported")
gct_size = 2 ** ((gct_header & 0b00000111) + 1)
bg_color_index = int.from_bytes(f.read(1), 'little')
f.seek(1, 1) # seek one byte relative to the current position (skip a byte)
for i in range(gct_size):
color = f.read(3)
palette.append(color)
while True:
separator = f.read(1)
if separator:
separator = int.from_bytes(separator, 'little')
if separator == 0x21:
# Extension
label = int.from_bytes(f.read(1), 'little')
if label == 0xf9:
# Graphic Control Extension
print("Graphic Control Extension")
f.seek(1,1)
packed = int.from_bytes(f.read(1), 'little')
# delay in seconds between frames
delay = int.from_bytes(f.read(2), 'little') / 100
# We only care about the transparency flag for now
if packed & 1 == 1:
transparency_index = int.from_bytes(f.read(1), 'little')
else:
f.seek(1,1)
f.seek(1,1)
elif label == 0xff:
# Application Extension
print("Application Extension")
f.seek(1,1)
application = f.read(8)
if application == b'NETSCAPE':
f.seek(5,1)
loop_count = int.from_bytes(f.read(2), 'little')
f.seek(1,1)
else:
raise NotImplementedError("Unimplemented application extension: "
+ ''.join([chr(b) for b in application]))
elif label == 0xfe:
# Comment Extension
comment = b''
while not comment.endswith(b'\0'):
byte = f.read(1)
comment += byte
comment = ''.join([chr(b) for b in comment])
print(comment)
else:
raise NotImplementedError("Unimplemented extension: " + hex(label))
elif separator == 0x2c:
# Image Descriptor
print("Image Descriptor")
image_start_x = int.from_bytes(f.read(2), 'little')
image_start_y = int.from_bytes(f.read(2), 'little')
image_width = int.from_bytes(f.read(2), 'little')
image_height = int.from_bytes(f.read(2), 'little')
# Ignore the packed fields for now
f.seek(1,1)
# Image Data
print("Image Data")
lzw_code_size = int.from_bytes(f.read(1), 'little')
compressed = bytearray()
while True:
block_size = int.from_bytes(f.read(1), 'little')
if block_size == 0:
break
compressed += f.read(block_size)
bitmap = decompress(compressed, lzw_code_size)
bitmaps.append(bitmap)
elif separator == 0x3b:
# Trailer
break
else:
raise RuntimeError("Got an unexpected separator: " + hex(separator))
def decompress(block, min_code_size):
clear_code = 1 << min_code_size
eoi_code = clear_code + 1
cur_code_size = min_code_size + 1
bit_offset = 0
code_stream = []
index_stream = []
table = []
prev_code = None
nextcode = clear_code + 2
while bit_offset < 8*(len(block)-1):
if nextcode == (1 << cur_code_size):
cur_code_size += 1
code = fetch_bits(block, cur_code_size, bit_offset)
#print(code, prev_code)
bit_offset += cur_code_size
if code == clear_code:
# print(table)
# print(len(table))
table = [[i] for i in range(1 << min_code_size)]
table.append([clear_code])
table.append([eoi_code])
# print(table)
nextcode = clear_code + 2
prev_code = None
print("table reset")
continue
elif code == eoi_code:
print("stop")
break
elif code < len(table):
index_stream.append(table[code])
k = [table[code][0]]
if prev_code is not None:
table.append(table[prev_code] + k)
nextcode +=1
elif prev_code is None:
raise ValueError("First code after a reset must be in the table")
else:
k = [table[prev_code][0]]
index_stream.append(table[prev_code] + k)
table.append(table[prev_code] + k)
nextcode +=1
prev_code = code
#nextcode = len(table)
index_stream = flatten(index_stream)
#print(index_stream)
return index_stream
def fetch_bits(bytearr, nbits, bit_offset):
byte_offset = bit_offset//8
rem = bit_offset % 8
bits = 0
for i in range(nbits):
bit = (bytearr[byte_offset] | (bytearr[byte_offset+1] << 8)) & (1 << (rem + i))
bits |= bit >> (rem)
return bits
def flatten(items, seqtypes=(list, tuple)):
for i, x in enumerate(items):
while i < len(items) and isinstance(items[i], seqtypes):
items[i:i+1] = items[i]
return items
| adafruit_imageload/gif/__init__.py | 7,552 | `adafruit_imageload.gif`
====================================================
Load pixel values (indices or colors) into one or more bitmaps and colors into a palette from a GIF file.
* Author(s): Paul Sajna
The MIT License (MIT) Copyright (c) 2019 Paul Sajna for Adafruit Industries LLC Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.if (gct_header & 0b0111000 >> 3) + 1 != 8:raise NotImplementedError("Only 8-bit color is supported") seek one byte relative to the current position (skip a byte) Extension Graphic Control Extension delay in seconds between frames We only care about the transparency flag for now Application Extension Comment Extension Image Descriptor Ignore the packed fields for now Image Data Trailerprint(code, prev_code) print(table) print(len(table)) print(table)nextcode = len(table)print(index_stream) | 1,815 | en | 0.78473 |
import math
import ctypes
import pyglet
pyglet.options["shadow_window"] = False
pyglet.options["debug_gl"] = False
import pyglet.gl as gl
import matrix
import shader
import camera
import block_type
import texture_manager
class Window(pyglet.window.Window):
def __init__(self, **args):
super().__init__(**args)
# create blocks
self.texture_manager = texture_manager.Texture_manager(16, 16, 256)
self.cobblestone = block_type.Block_type(self.texture_manager, "cobblestone", {"all": "cobblestone"})
self.grass = block_type.Block_type(self.texture_manager, "grass", {"top": "grass", "bottom": "dirt", "sides": "grass_side"})
self.dirt = block_type.Block_type(self.texture_manager, "dirt", {"all": "dirt"})
self.stone = block_type.Block_type(self.texture_manager, "stone", {"all": "stone"})
self.sand = block_type.Block_type(self.texture_manager, "sand", {"all": "sand"})
self.planks = block_type.Block_type(self.texture_manager, "planks", {"all": "planks"})
self.log = block_type.Block_type(self.texture_manager, "log", {"top": "log_top", "bottom": "log_top", "sides": "log_side"})
self.texture_manager.generate_mipmaps()
# create vertex array object
self.vao = gl.GLuint(0)
gl.glGenVertexArrays(1, ctypes.byref(self.vao))
gl.glBindVertexArray(self.vao)
# create vertex position vbo
self.vertex_position_vbo = gl.GLuint(0)
gl.glGenBuffers(1, ctypes.byref(self.vertex_position_vbo))
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vertex_position_vbo)
gl.glBufferData(
gl.GL_ARRAY_BUFFER,
ctypes.sizeof(gl.GLfloat * len(self.grass.vertex_positions)),
(gl.GLfloat * len(self.grass.vertex_positions)) (*self.grass.vertex_positions),
gl.GL_STATIC_DRAW)
gl.glVertexAttribPointer(0, 3, gl.GL_FLOAT, gl.GL_FALSE, 0, 0)
gl.glEnableVertexAttribArray(0)
# create tex coord vbo
self.tex_coord_vbo = gl.GLuint(0)
gl.glGenBuffers(1, ctypes.byref(self.tex_coord_vbo))
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.tex_coord_vbo)
gl.glBufferData(
gl.GL_ARRAY_BUFFER,
ctypes.sizeof(gl.GLfloat * len(self.grass.tex_coords)),
(gl.GLfloat * len(self.grass.tex_coords)) (*self.grass.tex_coords),
gl.GL_STATIC_DRAW)
gl.glVertexAttribPointer(1, 3, gl.GL_FLOAT, gl.GL_FALSE, 0, 0)
gl.glEnableVertexAttribArray(1)
# create shading value vbo
self.shading_value_vbo = gl.GLuint(0)
gl.glGenBuffers(1, ctypes.byref(self.shading_value_vbo))
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.shading_value_vbo)
gl.glBufferData(
gl.GL_ARRAY_BUFFER,
ctypes.sizeof(gl.GLfloat * len(self.grass.shading_values)),
(gl.GLfloat * len(self.grass.shading_values)) (*self.grass.shading_values),
gl.GL_STATIC_DRAW)
gl.glVertexAttribPointer(2, 1, gl.GL_FLOAT, gl.GL_FALSE, 0, 0)
gl.glEnableVertexAttribArray(2)
# create index buffer object
self.ibo = gl.GLuint(0)
gl.glGenBuffers(1, self.ibo)
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ibo)
gl.glBufferData(
gl.GL_ELEMENT_ARRAY_BUFFER,
ctypes.sizeof(gl.GLuint * len(self.grass.indices)),
(gl.GLuint * len(self.grass.indices)) (*self.grass.indices),
gl.GL_STATIC_DRAW)
# create shader
self.shader = shader.Shader("vert.glsl", "frag.glsl")
self.shader_sampler_location = self.shader.find_uniform(b"texture_array_sampler")
self.shader.use()
# pyglet stuff
pyglet.clock.schedule_interval(self.update, 1.0 / 60)
self.mouse_captured = False
# camera stuff
self.camera = camera.Camera(self.shader, self.width, self.height)
def update(self, delta_time):
if not self.mouse_captured:
self.camera.input = [0, 0, 0]
self.camera.update_camera(delta_time)
def on_draw(self):
self.camera.update_matrices()
# bind textures
gl.glActiveTexture(gl.GL_TEXTURE0)
gl.glBindTexture(gl.GL_TEXTURE_2D_ARRAY, self.texture_manager.texture_array)
gl.glUniform1i(self.shader_sampler_location, 0)
# draw stuff
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glClearColor(0.0, 0.0, 0.0, 1.0)
self.clear()
gl.glDrawElements(
gl.GL_TRIANGLES,
len(self.grass.indices),
gl.GL_UNSIGNED_INT,
None)
# input functions
def on_resize(self, width, height):
print(f"Resize {width} * {height}")
gl.glViewport(0, 0, width, height)
self.camera.width = width
self.camera.height = height
def on_mouse_press(self, x, y, button, modifiers):
self.mouse_captured = not self.mouse_captured
self.set_exclusive_mouse(self.mouse_captured)
def on_mouse_motion(self, x, y, delta_x, delta_y):
if self.mouse_captured:
sensitivity = 0.004
self.camera.rotation[0] -= delta_x * sensitivity
self.camera.rotation[1] += delta_y * sensitivity
self.camera.rotation[1] = max(-math.tau / 4, min(math.tau / 4, self.camera.rotation[1]))
def on_key_press(self, key, modifiers):
if not self.mouse_captured:
return
if key == pyglet.window.key.D: self.camera.input[0] += 1
elif key == pyglet.window.key.A: self.camera.input[0] -= 1
elif key == pyglet.window.key.W: self.camera.input[2] += 1
elif key == pyglet.window.key.S: self.camera.input[2] -= 1
elif key == pyglet.window.key.SPACE : self.camera.input[1] += 1
elif key == pyglet.window.key.LSHIFT: self.camera.input[1] -= 1
def on_key_release(self, key, modifiers):
if not self.mouse_captured:
return
if key == pyglet.window.key.D: self.camera.input[0] -= 1
elif key == pyglet.window.key.A: self.camera.input[0] += 1
elif key == pyglet.window.key.W: self.camera.input[2] -= 1
elif key == pyglet.window.key.S: self.camera.input[2] += 1
elif key == pyglet.window.key.SPACE : self.camera.input[1] -= 1
elif key == pyglet.window.key.LSHIFT: self.camera.input[1] += 1
class Game:
def __init__(self):
self.config = gl.Config(major_version = 3, depth_size = 16)
self.window = Window(config = self.config, width = 800, height = 600, caption = "Minecraft clone", resizable = True, vsync = False)
def run(self):
pyglet.app.run()
if __name__ == "__main__":
game = Game()
game.run()
| episode-7/main.py | 5,965 | create blocks create vertex array object create vertex position vbo create tex coord vbo create shading value vbo create index buffer object create shader pyglet stuff camera stuff bind textures draw stuff input functions | 221 | en | 0.15747 |
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import shutil
import sys
import tempfile
import unittest
from glob import glob
import nibabel as nib
import numpy as np
import torch
import monai
from monai.data import create_test_image_2d
from monai.engines import GanTrainer
from monai.engines.utils import GanKeys as Keys
from monai.handlers import CheckpointSaver, StatsHandler, TensorBoardStatsHandler
from monai.networks import normal_init
from monai.networks.nets import Discriminator, Generator
from monai.transforms import AsChannelFirstd, Compose, LoadImaged, RandFlipd, ScaleIntensityd, ToTensord
from monai.utils import set_determinism
from tests.utils import DistTestCase, TimedCall, skip_if_quick
def run_training_test(root_dir, device="cuda:0"):
real_images = sorted(glob(os.path.join(root_dir, "img*.nii.gz")))
train_files = [{"reals": img} for img in zip(real_images)]
# prepare real data
train_transforms = Compose(
[
LoadImaged(keys=["reals"]),
AsChannelFirstd(keys=["reals"]),
ScaleIntensityd(keys=["reals"]),
RandFlipd(keys=["reals"], prob=0.5),
ToTensord(keys=["reals"]),
]
)
train_ds = monai.data.CacheDataset(data=train_files, transform=train_transforms, cache_rate=0.5)
train_loader = monai.data.DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=4)
learning_rate = 2e-4
betas = (0.5, 0.999)
real_label = 1
fake_label = 0
# create discriminator
disc_net = Discriminator(
in_shape=(1, 64, 64), channels=(8, 16, 32, 64, 1), strides=(2, 2, 2, 2, 1), num_res_units=1, kernel_size=5
).to(device)
disc_net.apply(normal_init)
disc_opt = torch.optim.Adam(disc_net.parameters(), learning_rate, betas=betas)
disc_loss_criterion = torch.nn.BCELoss()
def discriminator_loss(gen_images, real_images):
real = real_images.new_full((real_images.shape[0], 1), real_label)
gen = gen_images.new_full((gen_images.shape[0], 1), fake_label)
realloss = disc_loss_criterion(disc_net(real_images), real)
genloss = disc_loss_criterion(disc_net(gen_images.detach()), gen)
return torch.div(torch.add(realloss, genloss), 2)
# create generator
latent_size = 64
gen_net = Generator(
latent_shape=latent_size, start_shape=(latent_size, 8, 8), channels=[32, 16, 8, 1], strides=[2, 2, 2, 1]
)
gen_net.apply(normal_init)
gen_net.conv.add_module("activation", torch.nn.Sigmoid())
gen_net = gen_net.to(device)
gen_opt = torch.optim.Adam(gen_net.parameters(), learning_rate, betas=betas)
gen_loss_criterion = torch.nn.BCELoss()
def generator_loss(gen_images):
output = disc_net(gen_images)
cats = output.new_full(output.shape, real_label)
return gen_loss_criterion(output, cats)
key_train_metric = None
train_handlers = [
StatsHandler(
name="training_loss", output_transform=lambda x: {Keys.GLOSS: x[Keys.GLOSS], Keys.DLOSS: x[Keys.DLOSS]}
),
TensorBoardStatsHandler(
log_dir=root_dir,
tag_name="training_loss",
output_transform=lambda x: {Keys.GLOSS: x[Keys.GLOSS], Keys.DLOSS: x[Keys.DLOSS]},
),
CheckpointSaver(
save_dir=root_dir, save_dict={"g_net": gen_net, "d_net": disc_net}, save_interval=2, epoch_level=True
),
]
disc_train_steps = 2
num_epochs = 5
trainer = GanTrainer(
device,
num_epochs,
train_loader,
gen_net,
gen_opt,
generator_loss,
disc_net,
disc_opt,
discriminator_loss,
d_train_steps=disc_train_steps,
latent_shape=latent_size,
key_train_metric=key_train_metric,
train_handlers=train_handlers,
)
trainer.run()
return trainer.state
@skip_if_quick
class IntegrationWorkflowsGAN(DistTestCase):
def setUp(self):
set_determinism(seed=0)
self.data_dir = tempfile.mkdtemp()
for i in range(40):
im, _ = create_test_image_2d(64, 64, num_objs=3, rad_max=14, num_seg_classes=1, channel_dim=-1)
n = nib.Nifti1Image(im, np.eye(4))
nib.save(n, os.path.join(self.data_dir, f"img{i:d}.nii.gz"))
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu:0")
monai.config.print_config()
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
def tearDown(self):
set_determinism(seed=None)
shutil.rmtree(self.data_dir)
@TimedCall(seconds=200, daemon=False)
def test_training(self):
torch.manual_seed(0)
finish_state = run_training_test(self.data_dir, device=self.device)
# assert GAN training finished
self.assertEqual(finish_state.iteration, 100)
self.assertEqual(finish_state.epoch, 5)
if __name__ == "__main__":
unittest.main()
| tests/test_integration_workflows_gan.py | 5,498 | Copyright (c) MONAI Consortium Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. prepare real data create discriminator create generator assert GAN training finished | 637 | en | 0.83967 |
import logging
import urllib.request
from datetime import datetime
from multiprocessing import Manager, Value
from multiprocessing.pool import ThreadPool
class EntryPoint:
Log = logging.getLogger(__name__)
def __init__(self):
self.__total_size = Value('i', 0)
self.__sizes_by_file = Manager().dict()
def main(self):
urls = ['https://code.jquery.com/jquery-git.js',
'https://code.jquery.com/jquery-3.1.0.js',
'https://code.jquery.com/jquery-3.0.0.js',
'https://code.jquery.com/jquery-2.2.0.js',
'https://code.jquery.com/jquery-2.1.0.js',
'https://code.jquery.com/jquery-2.0.0.js',
'https://code.jquery.com/jquery-1.12.0.js',
'https://code.jquery.com/jquery-1.11.0.js',
'https://code.jquery.com/jquery-1.10.0.js',
'https://code.jquery.com/jquery-1.9.0.js',
'https://code.jquery.com/jquery-1.7.0.js',
'https://code.jquery.com/jquery-1.6.js',
'https://code.jquery.com/jquery-1.5.js',
'https://code.jquery.com/jquery-1.4.js',
'https://code.jquery.com/jquery-1.3.js',
'https://code.jquery.com/jquery-1.2.js',
'https://code.jquery.com/jquery-1.1.js',
'https://code.jquery.com/jquery-1.0.js']
self.__compute_serially(urls)
self.__compute_with_threadpool(urls)
def __compute_serially(self, urls):
start_time = datetime.utcnow()
sizes_by_file = dict()
for url in urls:
sizes_by_file[url] = self.__get_size_of_file(url)
self.Log.info('Total size of all {0} URLs: {1}'.format(len(urls), sum(sizes_by_file.values())))
time_diff = datetime.utcnow() - start_time
self.Log.info("Serial version took: {0}".format(self.get_timespan(time_diff.seconds)))
def __compute_with_threadpool(self, urls):
start_time = datetime.utcnow()
pool = ThreadPool(processes=8)
pool.map(self.__get_size_of_file_in_parallel, urls)
self.Log.info('Total size of all {0} URLs: {1}'.format(len(urls), sum(self.__sizes_by_file.values())))
time_diff = datetime.utcnow() - start_time
self.Log.info("Threadpool version took: {0}".format(self.get_timespan(time_diff.seconds)))
def __get_size_of_file_in_parallel(self, url):
self.__sizes_by_file[url] = self.__get_size_of_file(url)
# with self.__total_size.get_lock():
# self.__total_size.value += self.__get_size_of_file(url)
@staticmethod
def __get_size_of_file(url):
with urllib.request.urlopen(url) as f:
contents = f.read()
return len(contents)
@staticmethod
def get_timespan(seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "%d:%02d:%02d" % (h, m, s)
def setup_logging():
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
logger = logging.StreamHandler()
logger.setFormatter(logging.Formatter('%(asctime)s %(levelname)s - [%(thread)d] %(name)s - %(message)s'))
root_logger.addHandler(logger)
def main():
setup_logging()
log = logging.getLogger()
try:
EntryPoint().main()
except Exception as e:
log.exception(e)
if __name__ == '__main__':
main()
| python/threadpool_example.py | 3,503 | with self.__total_size.get_lock(): self.__total_size.value += self.__get_size_of_file(url) | 93 | en | 0.441894 |
from typing import List, Tuple, Union
import numpy as np
import torch
import pytorch_lightning as pl
def calc_area(bbox: np.ndarray):
return (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
def calc_bbox_overlap_union_iou(pred: np.ndarray or None, teacher: np.ndarray) -> Tuple[float, float, float]:
"""
:param pred: ndarray (4, )
:param teacher: ndarray (4, )
:return: overlap, union, iou
"""
teacher_area = (teacher[2] - teacher[0]) * (teacher[3] - teacher[1])
if pred is None:
return 0.0, teacher_area, 0.0
pred_area = (pred[2] - pred[0]) * (pred[3] - pred[1])
intersection_width = np.maximum(np.minimum(pred[2], teacher[2]) - np.maximum(pred[0], teacher[0]), 0)
intersection_height = np.maximum(np.minimum(pred[3], teacher[3]) - np.maximum(pred[1], teacher[1]), 0)
overlap = intersection_width * intersection_height
union = teacher_area + pred_area - overlap
iou = overlap / union
return overlap, union, iou
class DetectionIoU(pl.metrics.Metric):
def __init__(self, n_classes: int, by_classes: bool = False):
super().__init__(compute_on_step=False)
self._n_classes = n_classes
self._by_classes = by_classes
self.add_state("image_count_by_classes", default=torch.tensor([0. for _ in range(n_classes)]),
dist_reduce_fx="sum")
self.add_state("total_iou_by_classes", default=torch.tensor([0. for _ in range(n_classes)]),
dist_reduce_fx="sum")
def update(self, preds: List[np.ndarray], targets: Union[np.ndarray, torch.Tensor]) -> None:
"""
:param preds: Sorted by score. (Batch size, bounding boxes by batch, 5(x_min, y_min, x_max, y_max, label))
:param targets: (batch size, bounding box count, 5(x_min, y_min, x_max, y_max, label))
:return:
"""
targets = targets.cpu().detach().numpy() if isinstance(targets, torch.Tensor) else targets
# 全探索だと遅いのでクラスごとにまとめておく
preds_by_class = []
for pred_bboxes in preds:
pred_by_class = [[] for _ in range(self._n_classes)]
for pred_bbox in pred_bboxes:
pred_by_class[int(pred_bbox[4])].append(pred_bbox)
preds_by_class.append(pred_by_class)
for i in range(targets.shape[0]): # Explore every batch.
bbox_annotations = targets[i, :, :]
# Exclude invalid label annotation.
bbox_annotations = bbox_annotations[bbox_annotations[:, 4] >= 0]
pred_by_class = preds_by_class[i]
"""
1画像でラベルごとに計算.
ラベルごとの面積合計/overlapを計算
1画像ごとにIoU算出、最終的に画像平均を算出
"""
total_area_by_classes = [0 for _ in range(self._n_classes)]
total_overlap_by_classes = [0 for _ in range(self._n_classes)]
is_label_appeared = [False for _ in range(self._n_classes)]
for bbox_annotation in bbox_annotations:
label = int(bbox_annotation[4])
total_area_by_classes[label] += calc_area(bbox_annotation)
pred_bboxes = pred_by_class[label]
if pred_bboxes is None or len(pred_bboxes) == 0:
continue
# Calculate area and overlap by class.
for pred_bbox in pred_bboxes:
overlap, _, _ = calc_bbox_overlap_union_iou(pred_bbox, bbox_annotation)
total_overlap_by_classes[label] += overlap
if is_label_appeared[label]:
continue
total_area_by_classes[label] += calc_area(pred_bbox)
is_label_appeared[label] = True
for label in range(self._n_classes):
# Not exist label in this data.
if total_area_by_classes[label] <= 0:
continue
self.total_iou_by_classes[label] += total_overlap_by_classes[label] / (
total_area_by_classes[label] - total_overlap_by_classes[label])
self.image_count_by_classes[label] += 1
def compute(self):
epsilon = 1e-8
iou_by_classes = self.total_iou_by_classes / (self.image_count_by_classes + epsilon)
if self._by_classes:
return iou_by_classes
return torch.mean(iou_by_classes)
class RecallPrecision(pl.metrics.Metric):
def __init__(self, n_classes: int, by_classes: bool = False):
super().__init__(compute_on_step=False)
self._n_classes = n_classes
self._by_classes = by_classes
self.add_state("tp_by_classes", default=torch.tensor([0 for _ in range(n_classes)]), dist_reduce_fx="sum")
self.add_state("fp_by_classes", default=torch.tensor([0 for _ in range(n_classes)]), dist_reduce_fx="sum")
self.add_state("fn_by_classes", default=torch.tensor([0 for _ in range(n_classes)]), dist_reduce_fx="sum")
def update(self, preds: List[np.ndarray], targets: Union[np.ndarray, torch.Tensor]) -> None:
"""
:param preds: Sorted by score. (Batch size, bounding boxes by batch, 5(x_min, y_min, x_max, y_max, label))
:param targets: (batch size, bounding box count, 5(x_min, y_min, x_max, y_max, label))
:return:
"""
targets = targets.cpu().detach().numpy() if isinstance(targets, torch.Tensor) else targets
# 全探索だと遅いのでクラスごとにまとめておく
preds_by_class = []
for pred_bboxes in preds:
pred_by_class = [[] for _ in range(self._n_classes)]
for pred_bbox in pred_bboxes:
pred_by_class[int(pred_bbox[4])].append(pred_bbox)
preds_by_class.append(pred_by_class)
for i in range(targets.shape[0]):
bbox_annotations = targets[i, :, :]
# Exclude invalid label annotation.
bbox_annotations = bbox_annotations[bbox_annotations[:, 4] >= 0]
pred_by_class = preds_by_class[i]
applied_bbox_count_by_classes = [0 for _ in range(self._n_classes)]
for bbox_annotation in bbox_annotations:
label = int(bbox_annotation[4])
pred_bboxes = pred_by_class[label]
if pred_bboxes is None or len(pred_bboxes) == 0:
self.fn_by_classes[label] += 1
continue
# Explore max iou of bbox_annotation
is_matched = False
for pred_bbox in pred_bboxes:
overlap, union, iou = calc_bbox_overlap_union_iou(pred_bbox, bbox_annotation)
if iou >= 0.5:
applied_bbox_count_by_classes[label] += 1
self.tp_by_classes[label] += 1
is_matched = True
break
if not is_matched:
self.fn_by_classes[label] += 1
for label in range(self._n_classes):
self.fp_by_classes[label] += len(pred_by_class[label]) - applied_bbox_count_by_classes[label]
def compute(self):
epsilon = 1e-8
recall = self.tp_by_classes / (self.tp_by_classes + self.fn_by_classes + epsilon)
precision = self.tp_by_classes / (self.tp_by_classes + self.fp_by_classes + epsilon)
f_score = 2. * recall * precision / (recall + precision + epsilon)
if self._by_classes:
return recall, precision, f_score
return torch.mean(recall), torch.mean(precision), torch.mean(f_score)
class MeanAveragePrecision(pl.metrics.Metric):
def __init__(self, n_classes: int, by_classes=False):
super().__init__(compute_on_step=False)
self._n_classes = n_classes
# TODO want to implement using add_state
self.fp_list_by_classes = [[] for _ in range(n_classes)]
self.tp_list_by_classes = [[] for _ in range(n_classes)]
self.score_list_by_classes = [[] for _ in range(n_classes)]
self.num_annotations_by_classes = [0 for _ in range(n_classes)]
# self.add_state("fp_list_by_classes", default=[[] for _ in range(n_classes)], dist_reduce_fx="cat")
# self.add_state("tp_list_by_classes", default=[[] for _ in range(n_classes)], dist_reduce_fx="cat")
# self.add_state("score_list_by_classes", default=[[] for _ in range(n_classes)], dist_reduce_fx="cat")
# self.add_state("num_annotations_by_classes", default=[0 for _ in range(n_classes)], dist_reduce_fx="cat")
self._by_classes = by_classes
def update(self, preds: List[np.ndarray], targets: Union[np.ndarray, torch.Tensor]) -> None:
"""
:param preds: Sorted by score. (Batch size, bounding boxes by batch, 5(x_min, y_min, x_max, y_max, label))
:param targets: (batch size, bounding box count, 5(x_min, y_min, x_max, y_max, label))
:return:
"""
targets = targets.cpu().detach().numpy() if isinstance(targets, torch.Tensor) else targets
for i in range(len(preds)):
pred_bboxes, target_bboxes = preds[i], targets[i]
# exclude invalid annotations.
target_bboxes = target_bboxes[target_bboxes[:, 4] >= 0]
self._update_num_annotations(target_bboxes)
self._update_tp_fp_score(pred_bboxes, target_bboxes)
def compute(self):
ap_by_classes = [0 for _ in range(self._n_classes)]
for label in range(self._n_classes):
num_annotations = self.num_annotations_by_classes[label]
tp_list, fp_list = np.array(self.tp_list_by_classes[label]), np.array(self.fp_list_by_classes[label])
scores = np.array(self.score_list_by_classes[label])
indices = np.argsort(-scores)
# sort by score
tp_list, fp_list = tp_list[indices], fp_list[indices]
# cumulative sum
tp_list, fp_list = np.cumsum(tp_list), np.cumsum(fp_list)
if num_annotations == 0:
ap_by_classes[label] = 0
continue
recall_curve = tp_list / num_annotations
precision_curve = tp_list / np.maximum(tp_list + fp_list, np.finfo(np.float64).eps)
ap_by_classes[label] = self._compute_average_precision(recall_curve, precision_curve)
return ap_by_classes if self._by_classes else sum(ap_by_classes) / len(ap_by_classes)
def _update_tp_fp_score(self, pred_bboxes: np.ndarray, target_bboxes: np.ndarray):
"""
:param pred_bboxes: (N, 6(xmin, ymin, xmax, ymax, class, score))
:param target_bboxes: (N, 5(xmin, ymin, xmax, ymax, class))
"""
detected_indices = []
for i in range(pred_bboxes.shape[0]):
pred_label, pred_score = int(pred_bboxes[i][4]), pred_bboxes[i][5]
matched = False
for j in filter(lambda k: int(target_bboxes[k][4]) == pred_label and k not in detected_indices,
range(target_bboxes.shape[0])):
overlap, union, iou = calc_bbox_overlap_union_iou(pred_bboxes[i], target_bboxes[j])
if iou >= 0.5:
detected_indices.append(j)
self.fp_list_by_classes[pred_label].append(0)
self.tp_list_by_classes[pred_label].append(1)
matched = True
break
if not matched:
self.fp_list_by_classes[pred_label].append(1)
self.tp_list_by_classes[pred_label].append(0)
self.score_list_by_classes[pred_label].append(pred_score)
def _update_num_annotations(self, target_bboxes: np.ndarray):
"""
:param target_bboxes: (N, 5(xmin, ymin, xmax, ymax, class))
"""
counts = list(map(lambda i: np.count_nonzero(target_bboxes[:, 4] == i), range(self._n_classes)))
self.num_annotations_by_classes = list(
map(lambda i: counts[i] + self.num_annotations_by_classes[i], range(self._n_classes)))
def _compute_average_precision(self, recall_curve: np.ndarray, precision_curve: np.ndarray):
# Reference by https://github.com/toandaominh1997/EfficientDet.Pytorch/blob/master/eval.py
assert recall_curve.ndim == 1 and precision_curve.ndim == 1
# correct AP calculation
# first append sentinel values at the end
mean_recall = np.concatenate(([0.], recall_curve, [1.]))
mean_precision = np.concatenate(([0.], precision_curve, [0.]))
# compute the precision envelope
for i in range(mean_precision.size - 1, 0, -1):
mean_precision[i - 1] = np.maximum(mean_precision[i - 1], mean_precision[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mean_recall[1:] != mean_recall[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mean_recall[i + 1] - mean_recall[i]) * mean_precision[i + 1])
return ap
def reset(self):
self.fp_list_by_classes = [[] for _ in range(self._n_classes)]
self.tp_list_by_classes = [[] for _ in range(self._n_classes)]
self.score_list_by_classes = [[] for _ in range(self._n_classes)]
self.num_annotations_by_classes = [0 for _ in range(self._n_classes)]
| deepext_with_lightning/metrics/object_detection.py | 13,400 | :param target_bboxes: (N, 5(xmin, ymin, xmax, ymax, class))
:param pred_bboxes: (N, 6(xmin, ymin, xmax, ymax, class, score))
:param target_bboxes: (N, 5(xmin, ymin, xmax, ymax, class))
:param pred: ndarray (4, )
:param teacher: ndarray (4, )
:return: overlap, union, iou
:param preds: Sorted by score. (Batch size, bounding boxes by batch, 5(x_min, y_min, x_max, y_max, label))
:param targets: (batch size, bounding box count, 5(x_min, y_min, x_max, y_max, label))
:return:
:param preds: Sorted by score. (Batch size, bounding boxes by batch, 5(x_min, y_min, x_max, y_max, label))
:param targets: (batch size, bounding box count, 5(x_min, y_min, x_max, y_max, label))
:return:
:param preds: Sorted by score. (Batch size, bounding boxes by batch, 5(x_min, y_min, x_max, y_max, label))
:param targets: (batch size, bounding box count, 5(x_min, y_min, x_max, y_max, label))
:return:
全探索だと遅いのでクラスごとにまとめておく Explore every batch. Exclude invalid label annotation. Calculate area and overlap by class. Not exist label in this data. 全探索だと遅いのでクラスごとにまとめておく Exclude invalid label annotation. Explore max iou of bbox_annotation TODO want to implement using add_state self.add_state("fp_list_by_classes", default=[[] for _ in range(n_classes)], dist_reduce_fx="cat") self.add_state("tp_list_by_classes", default=[[] for _ in range(n_classes)], dist_reduce_fx="cat") self.add_state("score_list_by_classes", default=[[] for _ in range(n_classes)], dist_reduce_fx="cat") self.add_state("num_annotations_by_classes", default=[0 for _ in range(n_classes)], dist_reduce_fx="cat") exclude invalid annotations. sort by score cumulative sum Reference by https://github.com/toandaominh1997/EfficientDet.Pytorch/blob/master/eval.py correct AP calculation first append sentinel values at the end compute the precision envelope to calculate area under PR curve, look for points where X axis (recall) changes value and sum (\Delta recall) * prec | 1,919 | en | 0.498736 |
# coding: utf-8
import math
import random
import time
import asyncclick as click
@click.group()
def cli():
"""This script showcases different terminal UI helpers in Click."""
pass
@cli.command()
def colordemo():
"""Demonstrates ANSI color support."""
for color in "red", "green", "blue":
click.echo(click.style("I am colored {}".format(color), fg=color))
click.echo(click.style("I am background colored {}".format(color), bg=color))
@cli.command()
def pager():
"""Demonstrates using the pager."""
lines = []
for x in range(200):
lines.append("{}. Hello World!".format(click.style(str(x), fg="green")))
click.echo_via_pager("\n".join(lines))
@cli.command()
@click.option(
"--count",
default=8000,
type=click.IntRange(1, 100000),
help="The number of items to process.",
)
def progress(count):
"""Demonstrates the progress bar."""
items = range(count)
def process_slowly(item):
time.sleep(0.002 * random.random())
def filter(items):
for item in items:
if random.random() > 0.3:
yield item
with click.progressbar(
items, label="Processing accounts", fill_char=click.style("#", fg="green")
) as bar:
for item in bar:
process_slowly(item)
def show_item(item):
if item is not None:
return "Item #{}".format(item)
with click.progressbar(
filter(items),
label="Committing transaction",
fill_char=click.style("#", fg="yellow"),
item_show_func=show_item,
) as bar:
for item in bar:
process_slowly(item)
with click.progressbar(
length=count,
label="Counting",
bar_template="%(label)s %(bar)s | %(info)s",
fill_char=click.style(u"█", fg="cyan"),
empty_char=" ",
) as bar:
for item in bar:
process_slowly(item)
with click.progressbar(
length=count,
width=0,
show_percent=False,
show_eta=False,
fill_char=click.style("#", fg="magenta"),
) as bar:
for item in bar:
process_slowly(item)
# 'Non-linear progress bar'
steps = [math.exp(x * 1.0 / 20) - 1 for x in range(20)]
count = int(sum(steps))
with click.progressbar(
length=count,
show_percent=False,
label="Slowing progress bar",
fill_char=click.style(u"█", fg="green"),
) as bar:
for item in steps:
time.sleep(item)
bar.update(item)
@cli.command()
@click.argument("url")
def open(url):
"""Opens a file or URL In the default application."""
click.launch(url)
@cli.command()
@click.argument("url")
def locate(url):
"""Opens a file or URL In the default application."""
click.launch(url, locate=True)
@cli.command()
def edit():
"""Opens an editor with some text in it."""
MARKER = "# Everything below is ignored\n"
message = click.edit("\n\n{}".format(MARKER))
if message is not None:
msg = message.split(MARKER, 1)[0].rstrip("\n")
if not msg:
click.echo("Empty message!")
else:
click.echo("Message:\n{}".format(msg))
else:
click.echo("You did not enter anything!")
@cli.command()
def clear():
"""Clears the entire screen."""
click.clear()
@cli.command()
def pause():
"""Waits for the user to press a button."""
click.pause()
@cli.command()
def menu():
"""Shows a simple menu."""
menu = "main"
while 1:
if menu == "main":
click.echo("Main menu:")
click.echo(" d: debug menu")
click.echo(" q: quit")
char = click.getchar()
if char == "d":
menu = "debug"
elif char == "q":
menu = "quit"
else:
click.echo("Invalid input")
elif menu == "debug":
click.echo("Debug menu")
click.echo(" b: back")
char = click.getchar()
if char == "b":
menu = "main"
else:
click.echo("Invalid input")
elif menu == "quit":
return
| examples/termui/termui.py | 4,227 | Clears the entire screen.
This script showcases different terminal UI helpers in Click.
Demonstrates ANSI color support.
Opens an editor with some text in it.
Opens a file or URL In the default application.
Shows a simple menu.
Opens a file or URL In the default application.
Demonstrates using the pager.
Waits for the user to press a button.
Demonstrates the progress bar.
coding: utf-8 'Non-linear progress bar' | 416 | en | 0.742285 |
########################## FWMAV Simulation #########################
# Version 0.3
# Fan Fei Feb 2019
# Direct motor driven flapping wing MAV simulation
#######################################################################
import gym
import flappy
from stable_baselines.common.policies import MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines.common.vec_env import SubprocVecEnv
from stable_baselines.common import set_global_seeds
from flappy.envs.fwmav.controllers.arc_xy_arc_z import ARCController
from flappy.envs.fwmav.controllers.pid_controller import PIDController
import time
import argparse
import importlib
import numpy as np
def make_env(env_id, rank, seed=0, random_init = True, randomize_sim = True, phantom_sensor = False):
def _init():
env = gym.make(env_id)
env.config(random_init, randomize_sim, phantom_sensor)
if rank == 0:
env.enable_visualization()
env.enable_print()
env.seed(seed + rank)
return env
# set_global_seeds(seed)
return _init
class LazyModel:
def __init__(self,env,model_type):
self.action_lb = env.action_lb
self.action_ub = env.action_ub
self.observation_bound = env.observation_bound
if model_type == 'PID':
self.policy = PIDController(env.sim.dt_c)
elif model_type == 'ARC':
self.policy = ARCController(env.sim.dt_c)
else:
raise Exception('Error')
def predict(self, obs):
action = self.policy.get_action(obs[0]*self.observation_bound)
# scale action from [action_lb, action_ub] to [-1,1]
# since baseline does not support asymmetric action space
normalized_action = (action-self.action_lb)/(self.action_ub - self.action_lb)*2 - 1
action = np.array([normalized_action])
return action, None
def main(args):
env_id = 'fwmav_hover-v0'
env = DummyVecEnv([make_env(env_id, 0, random_init = args.rand_init, randomize_sim = args.rand_dynamics, phantom_sensor = args.phantom_sensor)])
if args.model_type != 'PID' and args.model_type != 'ARC':
try:
model_cls = getattr(
importlib.import_module('stable_baselines'), args.model_type)
except AttributeError:
print(args.model_type, "Error: wrong model type")
return
try:
model = model_cls.load(args.model_path)
except:
print(args.model_path, "Error: wrong model path")
else:
model = LazyModel(env.envs[0],args.model_type)
obs = env.reset()
while True:
if env.envs[0].is_sim_on == False:
env.envs[0].gui.cv.wait()
elif env.envs[0].is_sim_on:
action, _ = model.predict(obs)
obs, rewards, done, info = env.step(action)
# if done:
# obs = env.reset()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_type', required=True)
parser.add_argument('--model_path')
parser.add_argument(
'--policy_type', const='MlpPolicy', default='MlpPolicy', nargs='?')
parser.add_argument('--rand_init', action='store_true', default=False)
parser.add_argument('--rand_dynamics', action='store_true', default=False)
parser.add_argument('--phantom_sensor', action='store_true', default=False)
args = parser.parse_args()
main(args) | test.py | 3,098 | FWMAV Simulation Version 0.3 Fan Fei Feb 2019 Direct motor driven flapping wing MAV simulation set_global_seeds(seed) scale action from [action_lb, action_ub] to [-1,1] since baseline does not support asymmetric action space if done: obs = env.reset() | 255 | en | 0.750766 |
import os
import sys
__all__ = [
'lexsort','sort', 'argsort','argmin', 'argmax', 'searchsorted']
from pnumpy._pnumpy import getitem, lexsort32, lexsort64
import numpy as np
from numpy import asarray, array, asanyarray
from numpy import concatenate
#array_function_dispatch = functools.partial(
# overrides.array_function_dispatch, module='numpy')
# functions that are now methods
def _wrapit(obj, method, *args, **kwds):
try:
wrap = obj.__array_wrap__
except AttributeError:
wrap = None
result = getattr(asarray(obj), method)(*args, **kwds)
if wrap:
if not isinstance(result, mu.ndarray):
result = asarray(result)
result = wrap(result)
return result
def _wrapfunc(obj, method, *args, **kwds):
bound = getattr(obj, method, None)
if bound is None:
return _wrapit(obj, method, *args, **kwds)
try:
return bound(*args, **kwds)
except TypeError:
# A TypeError occurs if the object does have such a method in its
# class, but its signature is not identical to that of NumPy's. This
# situation has occurred in the case of a downstream library like
# 'pandas'.
#
# Call _wrapit from within the except clause to ensure a potential
# exception has a traceback chain.
return _wrapit(obj, method, *args, **kwds)
def sort(a, axis=-1, kind=None, order=None):
"""
Return a sorted copy of an array.
Parameters
----------
a : array_like
Array to be sorted.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
and 'mergesort' use timsort or radix sort under the covers and, in general,
the actual implementation will vary with data type. The 'mergesort' option
is retained for backwards compatibility.
.. versionchanged:: 1.15.0.
The 'stable' option was added.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
Threading
---------
Up to 8 threads
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
partition : Partial sort.
Notes
-----
The various sorting algorithms are characterized by their average speed,
worst case performance, work space size, and whether they are stable. A
stable sort keeps items with the same key in the same relative
order. The four algorithms implemented in NumPy have the following
properties:
=========== ======= ============= ============ ========
kind speed worst case work space stable
=========== ======= ============= ============ ========
'quicksort' 1 O(n^2) 0 no
'heapsort' 3 O(n*log(n)) 0 no
'mergesort' 2 O(n*log(n)) ~n/2 yes
'timsort' 2 O(n*log(n)) ~n/2 yes
=========== ======= ============= ============ ========
.. note:: The datatype determines which of 'mergesort' or 'timsort'
is actually used, even if 'mergesort' is specified. User selection
at a finer scale is not currently available.
All the sort algorithms make temporary copies of the data when
sorting along any but the last axis. Consequently, sorting along
the last axis is faster and uses less space than sorting along
any other axis.
The sort order for complex numbers is lexicographic. If both the real
and imaginary parts are non-nan then the order is determined by the
real parts except when they are equal, in which case the order is
determined by the imaginary parts.
Previous to numpy 1.4.0 sorting real and complex arrays containing nan
values led to undefined behaviour. In numpy versions >= 1.4.0 nan
values are sorted to the end. The extended sort order is:
* Real: [R, nan]
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
where R is a non-nan real value. Complex values with the same nan
placements are sorted according to the non-nan part if it exists.
Non-nan values are sorted as before.
.. versionadded:: 1.12.0
quicksort has been changed to `introsort <https://en.wikipedia.org/wiki/Introsort>`_.
When sorting does not make enough progress it switches to
`heapsort <https://en.wikipedia.org/wiki/Heapsort>`_.
This implementation makes quicksort O(n*log(n)) in the worst case.
'stable' automatically chooses the best stable sorting algorithm
for the data type being sorted.
It, along with 'mergesort' is currently mapped to
`timsort <https://en.wikipedia.org/wiki/Timsort>`_
or `radix sort <https://en.wikipedia.org/wiki/Radix_sort>`_
depending on the data type.
API forward compatibility currently limits the
ability to select the implementation and it is hardwired for the different
data types.
.. versionadded:: 1.17.0
Timsort is added for better performance on already or nearly
sorted data. On random data timsort is almost identical to
mergesort. It is now used for stable sort while quicksort is still the
default sort if none is chosen. For timsort details, refer to
`CPython listsort.txt <https://github.com/python/cpython/blob/3.7/Objects/listsort.txt>`_.
'mergesort' and 'stable' are mapped to radix sort for integer data types. Radix sort is an
O(n) sort instead of O(n log n).
.. versionchanged:: 1.18.0
NaT now sorts to the end of arrays for consistency with NaN.
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> dtype = [('name', 'S10'), ('height', float), ('age', int)]
>>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
... ('Galahad', 1.7, 38)]
>>> a = np.array(values, dtype=dtype) # create a structured array
>>> np.sort(a, order='height') # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.8999999999999999, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
Sort by age, then height if ages are equal:
>>> np.sort(a, order=['age', 'height']) # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),
('Arthur', 1.8, 41)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
"""
if axis is None:
# flatten returns (1, N) for np.matrix, so always use the last axis
a = asanyarray(a).flatten()
axis = -1
try:
# attempt a parallel sort
sort(a, kind=kind)
return a
except Exception:
pass
else:
a = asanyarray(a).copy(order="K")
# normal numpy code
a.sort(axis=axis, kind=kind, order=order)
return a
def lexsort(*args, **kwargs):
"""
Perform an indirect stable sort using a sequence of keys.
Given multiple sorting keys, which can be interpreted as columns in a
spreadsheet, lexsort returns an array of integer indices that describes
the sort order by multiple columns. The last key in the sequence is used
for the primary sort order, the second-to-last key for the secondary sort
order, and so on. The keys argument must be a sequence of objects that
can be converted to arrays of the same shape. If a 2D array is provided
for the keys argument, it's rows are interpreted as the sorting keys and
sorting is according to the last row, second last row etc.
Parameters
----------
keys : (k, N) array or tuple containing k (N,)-shaped sequences
The `k` different "columns" to be sorted. The last column (or row if
`keys` is a 2D array) is the primary sort key.
axis : int, optional
Axis to be indirectly sorted. By default, sort over the last axis.
Returns
-------
indices : (N,) ndarray of ints
Array of indices that sort the keys along the specified axis.
Threading
---------
Up to 8 threads
See Also
--------
argsort : Indirect sort.
ndarray.sort : In-place sort.
sort : Return a sorted copy of an array.
Examples
--------
Sort names: first by surname, then by name.
>>> surnames = ('Hertz', 'Galilei', 'Hertz')
>>> first_names = ('Heinrich', 'Galileo', 'Gustav')
>>> ind = np.lexsort((first_names, surnames))
>>> ind
array([1, 2, 0])
>>> [surnames[i] + ", " + first_names[i] for i in ind]
['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich']
Sort two columns of numbers:
>>> a = [1,5,1,4,3,4,4] # First column
>>> b = [9,4,0,4,0,2,1] # Second column
>>> ind = np.lexsort((b,a)) # Sort by a, then by b
>>> ind
array([2, 0, 4, 6, 5, 3, 1])
>>> [(a[i],b[i]) for i in ind]
[(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]
Note that sorting is first according to the elements of ``a``.
Secondary sorting is according to the elements of ``b``.
A normal ``argsort`` would have yielded:
>>> [(a[i],b[i]) for i in np.argsort(a)]
[(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]
Structured arrays are sorted lexically by ``argsort``:
>>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],
... dtype=np.dtype([('x', int), ('y', int)]))
>>> np.argsort(x) # or np.argsort(x, order=('x', 'y'))
array([2, 0, 4, 6, 5, 3, 1])
"""
try:
return lexsort32(*args, **kwargs)
except Exception:
return np.lexsort(*args, **kwargs)
def argsort(a, axis=-1, kind=None, order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : array_like
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
and 'mergesort' use timsort under the covers and, in general, the
actual implementation will vary with data type. The 'mergesort' option
is retained for backwards compatibility.
.. versionchanged:: 1.15.0.
The 'stable' option was added.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified `axis`.
If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
More generally, ``np.take_along_axis(a, index_array, axis=axis)``
always yields the sorted `a`, irrespective of dimensionality.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
argpartition : Indirect partial sort.
take_along_axis : Apply ``index_array`` from argsort
to an array as if by calling sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
As of NumPy 1.4.0 `argsort` works with real/complex arrays containing
nan values. The enhanced sort order is documented in `sort`.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> ind = np.argsort(x, axis=0) # sorts along first axis (down)
>>> ind
array([[0, 1],
[1, 0]])
>>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0)
array([[0, 2],
[2, 3]])
>>> ind = np.argsort(x, axis=1) # sorts along last axis (across)
>>> ind
array([[0, 1],
[0, 1]])
>>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1)
array([[0, 3],
[2, 2]])
Indices of the sorted elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape)
>>> ind
(array([0, 1, 1, 0]), array([0, 0, 1, 1]))
>>> x[ind] # same as np.sort(x, axis=None)
array([0, 2, 2, 3])
Sorting with keys:
>>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
>>> x
array([(1, 0), (0, 1)],
dtype=[('x', '<i4'), ('y', '<i4')])
>>> np.argsort(x, order=('x','y'))
array([1, 0])
>>> np.argsort(x, order=('y','x'))
array([0, 1])
"""
return _wrapfunc(a, 'argsort', axis=axis, kind=kind, order=order)
def _argmax_dispatcher(a, axis=None, out=None):
return (a, out)
def argmax(a, axis=None, out=None):
"""
Returns the indices of the maximum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmax, argmin
amax : The maximum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
take_along_axis : Apply ``np.expand_dims(index_array, axis)``
from argmax to an array as if by calling max.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10, 11, 12],
[13, 14, 15]])
>>> np.argmax(a)
5
>>> np.argmax(a, axis=0)
array([1, 1, 1])
>>> np.argmax(a, axis=1)
array([2, 2])
Indexes of the maximal elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape)
>>> ind
(1, 2)
>>> a[ind]
15
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0, 5, 2, 3, 4, 5])
>>> np.argmax(b) # Only the first occurrence is returned.
1
>>> x = np.array([[4,2,3], [1,0,3]])
>>> index_array = np.argmax(x, axis=-1)
>>> # Same as np.max(x, axis=-1, keepdims=True)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
array([[4],
[3]])
>>> # Same as np.max(x, axis=-1)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)
array([4, 3])
"""
return _wrapfunc(a, 'argmax', axis=axis, out=out)
def _argmin_dispatcher(a, axis=None, out=None):
return (a, out)
def argmin(a, axis=None, out=None):
"""
Returns the indices of the minimum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmin, argmax
amin : The minimum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
take_along_axis : Apply ``np.expand_dims(index_array, axis)``
from argmin to an array as if by calling min.
Notes
-----
In case of multiple occurrences of the minimum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10, 11, 12],
[13, 14, 15]])
>>> np.argmin(a)
0
>>> np.argmin(a, axis=0)
array([0, 0, 0])
>>> np.argmin(a, axis=1)
array([0, 0])
Indices of the minimum elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape)
>>> ind
(0, 0)
>>> a[ind]
10
>>> b = np.arange(6) + 10
>>> b[4] = 10
>>> b
array([10, 11, 12, 13, 10, 15])
>>> np.argmin(b) # Only the first occurrence is returned.
0
>>> x = np.array([[4,2,3], [1,0,3]])
>>> index_array = np.argmin(x, axis=-1)
>>> # Same as np.min(x, axis=-1, keepdims=True)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
array([[2],
[0]])
>>> # Same as np.max(x, axis=-1)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)
array([2, 0])
"""
return _wrapfunc(a, 'argmin', axis=axis, out=out)
def _searchsorted_dispatcher(a, v, side=None, sorter=None):
return (a, v, sorter)
def searchsorted(a, v, side='left', sorter=None):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `a` such that, if the
corresponding elements in `v` were inserted before the indices, the
order of `a` would be preserved.
Assuming that `a` is sorted:
====== ============================
`side` returned index `i` satisfies
====== ============================
left ``a[i-1] < v <= a[i]``
right ``a[i-1] <= v < a[i]``
====== ============================
Parameters
----------
a : 1-D array_like
Input array. If `sorter` is None, then it must be sorted in
ascending order, otherwise `sorter` must be an array of indices
that sort it.
v : array_like
Values to insert into `a`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `a`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort array a into ascending
order. They are typically the result of argsort.
.. versionadded:: 1.7.0
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `v`.
See Also
--------
sort : Return a sorted copy of an array.
histogram : Produce histogram from 1-D data.
Notes
-----
Binary search is used to find the required insertion points.
As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing
`nan` values. The enhanced sort order is documented in `sort`.
This function uses the same algorithm as the builtin python `bisect.bisect_left`
(``side='left'``) and `bisect.bisect_right` (``side='right'``) functions,
which is also vectorized in the `v` argument.
Examples
--------
>>> np.searchsorted([1,2,3,4,5], 3)
2
>>> np.searchsorted([1,2,3,4,5], 3, side='right')
3
>>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])
array([0, 5, 1, 2])
"""
return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter)
| src/pnumpy/sort.py | 21,082 | Returns the indices of the maximum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmax, argmin
amax : The maximum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
take_along_axis : Apply ``np.expand_dims(index_array, axis)``
from argmax to an array as if by calling max.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10, 11, 12],
[13, 14, 15]])
>>> np.argmax(a)
5
>>> np.argmax(a, axis=0)
array([1, 1, 1])
>>> np.argmax(a, axis=1)
array([2, 2])
Indexes of the maximal elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape)
>>> ind
(1, 2)
>>> a[ind]
15
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0, 5, 2, 3, 4, 5])
>>> np.argmax(b) # Only the first occurrence is returned.
1
>>> x = np.array([[4,2,3], [1,0,3]])
>>> index_array = np.argmax(x, axis=-1)
>>> # Same as np.max(x, axis=-1, keepdims=True)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
array([[4],
[3]])
>>> # Same as np.max(x, axis=-1)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)
array([4, 3])
Returns the indices of the minimum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmin, argmax
amin : The minimum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
take_along_axis : Apply ``np.expand_dims(index_array, axis)``
from argmin to an array as if by calling min.
Notes
-----
In case of multiple occurrences of the minimum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10, 11, 12],
[13, 14, 15]])
>>> np.argmin(a)
0
>>> np.argmin(a, axis=0)
array([0, 0, 0])
>>> np.argmin(a, axis=1)
array([0, 0])
Indices of the minimum elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape)
>>> ind
(0, 0)
>>> a[ind]
10
>>> b = np.arange(6) + 10
>>> b[4] = 10
>>> b
array([10, 11, 12, 13, 10, 15])
>>> np.argmin(b) # Only the first occurrence is returned.
0
>>> x = np.array([[4,2,3], [1,0,3]])
>>> index_array = np.argmin(x, axis=-1)
>>> # Same as np.min(x, axis=-1, keepdims=True)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
array([[2],
[0]])
>>> # Same as np.max(x, axis=-1)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)
array([2, 0])
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : array_like
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
and 'mergesort' use timsort under the covers and, in general, the
actual implementation will vary with data type. The 'mergesort' option
is retained for backwards compatibility.
.. versionchanged:: 1.15.0.
The 'stable' option was added.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified `axis`.
If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
More generally, ``np.take_along_axis(a, index_array, axis=axis)``
always yields the sorted `a`, irrespective of dimensionality.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
argpartition : Indirect partial sort.
take_along_axis : Apply ``index_array`` from argsort
to an array as if by calling sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
As of NumPy 1.4.0 `argsort` works with real/complex arrays containing
nan values. The enhanced sort order is documented in `sort`.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> ind = np.argsort(x, axis=0) # sorts along first axis (down)
>>> ind
array([[0, 1],
[1, 0]])
>>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0)
array([[0, 2],
[2, 3]])
>>> ind = np.argsort(x, axis=1) # sorts along last axis (across)
>>> ind
array([[0, 1],
[0, 1]])
>>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1)
array([[0, 3],
[2, 2]])
Indices of the sorted elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape)
>>> ind
(array([0, 1, 1, 0]), array([0, 0, 1, 1]))
>>> x[ind] # same as np.sort(x, axis=None)
array([0, 2, 2, 3])
Sorting with keys:
>>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
>>> x
array([(1, 0), (0, 1)],
dtype=[('x', '<i4'), ('y', '<i4')])
>>> np.argsort(x, order=('x','y'))
array([1, 0])
>>> np.argsort(x, order=('y','x'))
array([0, 1])
Perform an indirect stable sort using a sequence of keys.
Given multiple sorting keys, which can be interpreted as columns in a
spreadsheet, lexsort returns an array of integer indices that describes
the sort order by multiple columns. The last key in the sequence is used
for the primary sort order, the second-to-last key for the secondary sort
order, and so on. The keys argument must be a sequence of objects that
can be converted to arrays of the same shape. If a 2D array is provided
for the keys argument, it's rows are interpreted as the sorting keys and
sorting is according to the last row, second last row etc.
Parameters
----------
keys : (k, N) array or tuple containing k (N,)-shaped sequences
The `k` different "columns" to be sorted. The last column (or row if
`keys` is a 2D array) is the primary sort key.
axis : int, optional
Axis to be indirectly sorted. By default, sort over the last axis.
Returns
-------
indices : (N,) ndarray of ints
Array of indices that sort the keys along the specified axis.
Threading
---------
Up to 8 threads
See Also
--------
argsort : Indirect sort.
ndarray.sort : In-place sort.
sort : Return a sorted copy of an array.
Examples
--------
Sort names: first by surname, then by name.
>>> surnames = ('Hertz', 'Galilei', 'Hertz')
>>> first_names = ('Heinrich', 'Galileo', 'Gustav')
>>> ind = np.lexsort((first_names, surnames))
>>> ind
array([1, 2, 0])
>>> [surnames[i] + ", " + first_names[i] for i in ind]
['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich']
Sort two columns of numbers:
>>> a = [1,5,1,4,3,4,4] # First column
>>> b = [9,4,0,4,0,2,1] # Second column
>>> ind = np.lexsort((b,a)) # Sort by a, then by b
>>> ind
array([2, 0, 4, 6, 5, 3, 1])
>>> [(a[i],b[i]) for i in ind]
[(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]
Note that sorting is first according to the elements of ``a``.
Secondary sorting is according to the elements of ``b``.
A normal ``argsort`` would have yielded:
>>> [(a[i],b[i]) for i in np.argsort(a)]
[(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]
Structured arrays are sorted lexically by ``argsort``:
>>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],
... dtype=np.dtype([('x', int), ('y', int)]))
>>> np.argsort(x) # or np.argsort(x, order=('x', 'y'))
array([2, 0, 4, 6, 5, 3, 1])
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `a` such that, if the
corresponding elements in `v` were inserted before the indices, the
order of `a` would be preserved.
Assuming that `a` is sorted:
====== ============================
`side` returned index `i` satisfies
====== ============================
left ``a[i-1] < v <= a[i]``
right ``a[i-1] <= v < a[i]``
====== ============================
Parameters
----------
a : 1-D array_like
Input array. If `sorter` is None, then it must be sorted in
ascending order, otherwise `sorter` must be an array of indices
that sort it.
v : array_like
Values to insert into `a`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `a`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort array a into ascending
order. They are typically the result of argsort.
.. versionadded:: 1.7.0
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `v`.
See Also
--------
sort : Return a sorted copy of an array.
histogram : Produce histogram from 1-D data.
Notes
-----
Binary search is used to find the required insertion points.
As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing
`nan` values. The enhanced sort order is documented in `sort`.
This function uses the same algorithm as the builtin python `bisect.bisect_left`
(``side='left'``) and `bisect.bisect_right` (``side='right'``) functions,
which is also vectorized in the `v` argument.
Examples
--------
>>> np.searchsorted([1,2,3,4,5], 3)
2
>>> np.searchsorted([1,2,3,4,5], 3, side='right')
3
>>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])
array([0, 5, 1, 2])
Return a sorted copy of an array.
Parameters
----------
a : array_like
Array to be sorted.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
and 'mergesort' use timsort or radix sort under the covers and, in general,
the actual implementation will vary with data type. The 'mergesort' option
is retained for backwards compatibility.
.. versionchanged:: 1.15.0.
The 'stable' option was added.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
Threading
---------
Up to 8 threads
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
partition : Partial sort.
Notes
-----
The various sorting algorithms are characterized by their average speed,
worst case performance, work space size, and whether they are stable. A
stable sort keeps items with the same key in the same relative
order. The four algorithms implemented in NumPy have the following
properties:
=========== ======= ============= ============ ========
kind speed worst case work space stable
=========== ======= ============= ============ ========
'quicksort' 1 O(n^2) 0 no
'heapsort' 3 O(n*log(n)) 0 no
'mergesort' 2 O(n*log(n)) ~n/2 yes
'timsort' 2 O(n*log(n)) ~n/2 yes
=========== ======= ============= ============ ========
.. note:: The datatype determines which of 'mergesort' or 'timsort'
is actually used, even if 'mergesort' is specified. User selection
at a finer scale is not currently available.
All the sort algorithms make temporary copies of the data when
sorting along any but the last axis. Consequently, sorting along
the last axis is faster and uses less space than sorting along
any other axis.
The sort order for complex numbers is lexicographic. If both the real
and imaginary parts are non-nan then the order is determined by the
real parts except when they are equal, in which case the order is
determined by the imaginary parts.
Previous to numpy 1.4.0 sorting real and complex arrays containing nan
values led to undefined behaviour. In numpy versions >= 1.4.0 nan
values are sorted to the end. The extended sort order is:
* Real: [R, nan]
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
where R is a non-nan real value. Complex values with the same nan
placements are sorted according to the non-nan part if it exists.
Non-nan values are sorted as before.
.. versionadded:: 1.12.0
quicksort has been changed to `introsort <https://en.wikipedia.org/wiki/Introsort>`_.
When sorting does not make enough progress it switches to
`heapsort <https://en.wikipedia.org/wiki/Heapsort>`_.
This implementation makes quicksort O(n*log(n)) in the worst case.
'stable' automatically chooses the best stable sorting algorithm
for the data type being sorted.
It, along with 'mergesort' is currently mapped to
`timsort <https://en.wikipedia.org/wiki/Timsort>`_
or `radix sort <https://en.wikipedia.org/wiki/Radix_sort>`_
depending on the data type.
API forward compatibility currently limits the
ability to select the implementation and it is hardwired for the different
data types.
.. versionadded:: 1.17.0
Timsort is added for better performance on already or nearly
sorted data. On random data timsort is almost identical to
mergesort. It is now used for stable sort while quicksort is still the
default sort if none is chosen. For timsort details, refer to
`CPython listsort.txt <https://github.com/python/cpython/blob/3.7/Objects/listsort.txt>`_.
'mergesort' and 'stable' are mapped to radix sort for integer data types. Radix sort is an
O(n) sort instead of O(n log n).
.. versionchanged:: 1.18.0
NaT now sorts to the end of arrays for consistency with NaN.
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> dtype = [('name', 'S10'), ('height', float), ('age', int)]
>>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
... ('Galahad', 1.7, 38)]
>>> a = np.array(values, dtype=dtype) # create a structured array
>>> np.sort(a, order='height') # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.8999999999999999, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
Sort by age, then height if ages are equal:
>>> np.sort(a, order=['age', 'height']) # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),
('Arthur', 1.8, 41)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') functions that are now methods A TypeError occurs if the object does have such a method in its class, but its signature is not identical to that of NumPy's. This situation has occurred in the case of a downstream library like 'pandas'. Call _wrapit from within the except clause to ensure a potential exception has a traceback chain. flatten returns (1, N) for np.matrix, so always use the last axis attempt a parallel sort normal numpy code | 17,146 | en | 0.684731 |
#!/usr/bin/env python
"""html2text: Turn HTML into equivalent Markdown-structured text."""
__version__ = "3.200.3"
__author__ = "Aaron Swartz (me@aaronsw.com)"
__copyright__ = "(C) 2004-2008 Aaron Swartz. GNU GPL 3."
__contributors__ = ["Martin 'Joey' Schulze", "Ricardo Reyes", "Kevin Jay North"]
# TODO:
# Support decoded entities with unifiable.
try:
True
except NameError:
setattr(__builtins__, 'True', 1)
setattr(__builtins__, 'False', 0)
def has_key(x, y):
if hasattr(x, 'has_key'): return x.has_key(y)
else: return y in x
try:
import htmlentitydefs
import urlparse
import HTMLParser
except ImportError: #Python3
import html.entities as htmlentitydefs
import urllib.parse as urlparse
import html.parser as HTMLParser
try: #Python3
import urllib.request as urllib
except:
import urllib
import optparse, re, sys, codecs, types
try: from textwrap import wrap
except: pass
# Use Unicode characters instead of their ascii psuedo-replacements
UNICODE_SNOB = 0
# Escape all special characters. Output is less readable, but avoids corner case formatting issues.
ESCAPE_SNOB = 0
# Put the links after each paragraph instead of at the end.
LINKS_EACH_PARAGRAPH = 0
# Wrap long lines at position. 0 for no wrapping. (Requires Python 2.3.)
BODY_WIDTH = 78
# Don't show internal links (href="#local-anchor") -- corresponding link targets
# won't be visible in the plain text file anyway.
SKIP_INTERNAL_LINKS = True
# Use inline, rather than reference, formatting for images and links
INLINE_LINKS = True
# Number of pixels Google indents nested lists
GOOGLE_LIST_INDENT = 36
IGNORE_ANCHORS = False
IGNORE_IMAGES = False
IGNORE_EMPHASIS = False
### Entity Nonsense ###
def name2cp(k):
if k == 'apos': return ord("'")
if hasattr(htmlentitydefs, "name2codepoint"): # requires Python 2.3
return htmlentitydefs.name2codepoint[k]
else:
k = htmlentitydefs.entitydefs[k]
if k.startswith("&#") and k.endswith(";"): return int(k[2:-1]) # not in latin-1
return ord(codecs.latin_1_decode(k)[0])
unifiable = {'rsquo':"'", 'lsquo':"'", 'rdquo':'"', 'ldquo':'"',
'copy':'(C)', 'mdash':'--', 'nbsp':' ', 'rarr':'->', 'larr':'<-', 'middot':'*',
'ndash':'-', 'oelig':'oe', 'aelig':'ae',
'agrave':'a', 'aacute':'a', 'acirc':'a', 'atilde':'a', 'auml':'a', 'aring':'a',
'egrave':'e', 'eacute':'e', 'ecirc':'e', 'euml':'e',
'igrave':'i', 'iacute':'i', 'icirc':'i', 'iuml':'i',
'ograve':'o', 'oacute':'o', 'ocirc':'o', 'otilde':'o', 'ouml':'o',
'ugrave':'u', 'uacute':'u', 'ucirc':'u', 'uuml':'u',
'lrm':'', 'rlm':''}
unifiable_n = {}
for k in unifiable.keys():
unifiable_n[name2cp(k)] = unifiable[k]
### End Entity Nonsense ###
def onlywhite(line):
"""Return true if the line does only consist of whitespace characters."""
for c in line:
if c != ' ' and c != ' ':
return c == ' '
return line
def hn(tag):
if tag[0] == 'h' and len(tag) == 2:
try:
n = int(tag[1])
if n in range(1, 10): return n
except ValueError: return 0
def dumb_property_dict(style):
"""returns a hash of css attributes"""
return dict([(x.strip(), y.strip()) for x, y in [z.split(':', 1) for z in style.split(';') if ':' in z]]);
def dumb_css_parser(data):
"""returns a hash of css selectors, each of which contains a hash of css attributes"""
# remove @import sentences
data += ';'
importIndex = data.find('@import')
while importIndex != -1:
data = data[0:importIndex] + data[data.find(';', importIndex) + 1:]
importIndex = data.find('@import')
# parse the css. reverted from dictionary compehension in order to support older pythons
elements = [x.split('{') for x in data.split('}') if '{' in x.strip()]
try:
elements = dict([(a.strip(), dumb_property_dict(b)) for a, b in elements])
except ValueError:
elements = {} # not that important
return elements
def element_style(attrs, style_def, parent_style):
"""returns a hash of the 'final' style attributes of the element"""
style = parent_style.copy()
if 'class' in attrs:
for css_class in attrs['class'].split():
css_style = style_def['.' + css_class]
style.update(css_style)
if 'style' in attrs:
immediate_style = dumb_property_dict(attrs['style'])
style.update(immediate_style)
return style
def google_list_style(style):
"""finds out whether this is an ordered or unordered list"""
if 'list-style-type' in style:
list_style = style['list-style-type']
if list_style in ['disc', 'circle', 'square', 'none']:
return 'ul'
return 'ol'
def google_has_height(style):
"""check if the style of the element has the 'height' attribute explicitly defined"""
if 'height' in style:
return True
return False
def google_text_emphasis(style):
"""return a list of all emphasis modifiers of the element"""
emphasis = []
if 'text-decoration' in style:
emphasis.append(style['text-decoration'])
if 'font-style' in style:
emphasis.append(style['font-style'])
if 'font-weight' in style:
emphasis.append(style['font-weight'])
return emphasis
def google_fixed_width_font(style):
"""check if the css of the current element defines a fixed width font"""
font_family = ''
if 'font-family' in style:
font_family = style['font-family']
if 'Courier New' == font_family or 'Consolas' == font_family:
return True
return False
def list_numbering_start(attrs):
"""extract numbering from list element attributes"""
if 'start' in attrs:
return int(attrs['start']) - 1
else:
return 0
class HTML2Text(HTMLParser.HTMLParser):
def __init__(self, out=None, baseurl=''):
HTMLParser.HTMLParser.__init__(self)
# Config options
self.unicode_snob = UNICODE_SNOB
self.escape_snob = ESCAPE_SNOB
self.links_each_paragraph = LINKS_EACH_PARAGRAPH
self.body_width = BODY_WIDTH
self.skip_internal_links = SKIP_INTERNAL_LINKS
self.inline_links = INLINE_LINKS
self.google_list_indent = GOOGLE_LIST_INDENT
self.ignore_links = IGNORE_ANCHORS
self.ignore_images = IGNORE_IMAGES
self.ignore_emphasis = IGNORE_EMPHASIS
self.google_doc = False
self.ul_item_mark = '*'
self.emphasis_mark = '_'
self.strong_mark = '**'
if out is None:
self.out = self.outtextf
else:
self.out = out
self.outtextlist = [] # empty list to store output characters before they are "joined"
try:
self.outtext = unicode()
except NameError: # Python3
self.outtext = str()
self.quiet = 0
self.p_p = 0 # number of newline character to print before next output
self.outcount = 0
self.start = 1
self.space = 0
self.a = []
self.astack = []
self.maybe_automatic_link = None
self.absolute_url_matcher = re.compile(r'^[a-zA-Z+]+://')
self.acount = 0
self.list = []
self.blockquote = 0
self.pre = 0
self.startpre = 0
self.code = False
self.br_toggle = ''
self.lastWasNL = 0
self.lastWasList = False
self.style = 0
self.style_def = {}
self.tag_stack = []
self.emphasis = 0
self.drop_white_space = 0
self.inheader = False
self.abbr_title = None # current abbreviation definition
self.abbr_data = None # last inner HTML (for abbr being defined)
self.abbr_list = {} # stack of abbreviations to write later
self.baseurl = baseurl
try: del unifiable_n[name2cp('nbsp')]
except KeyError: pass
unifiable['nbsp'] = ' _place_holder;'
def feed(self, data):
data = data.replace("</' + 'script>", "</ignore>")
HTMLParser.HTMLParser.feed(self, data)
def handle(self, data):
self.feed(data)
self.feed("")
return self.optwrap(self.close())
def outtextf(self, s):
self.outtextlist.append(s)
if s: self.lastWasNL = s[-1] == '\n'
def close(self):
HTMLParser.HTMLParser.close(self)
self.pbr()
self.o('', 0, 'end')
self.outtext = self.outtext.join(self.outtextlist)
if self.unicode_snob:
nbsp = unichr(name2cp('nbsp'))
else:
nbsp = u' '
self.outtext = self.outtext.replace(u' _place_holder;', nbsp)
return self.outtext
def handle_charref(self, c):
self.o(self.charref(c), 1)
def handle_entityref(self, c):
self.o(self.entityref(c), 1)
def handle_starttag(self, tag, attrs):
self.handle_tag(tag, attrs, 1)
def handle_endtag(self, tag):
self.handle_tag(tag, None, 0)
def previousIndex(self, attrs):
""" returns the index of certain set of attributes (of a link) in the
self.a list
If the set of attributes is not found, returns None
"""
if not has_key(attrs, 'href'): return None
i = -1
for a in self.a:
i += 1
match = 0
if has_key(a, 'href') and a['href'] == attrs['href']:
if has_key(a, 'title') or has_key(attrs, 'title'):
if (has_key(a, 'title') and has_key(attrs, 'title') and
a['title'] == attrs['title']):
match = True
else:
match = True
if match: return i
def drop_last(self, nLetters):
if not self.quiet:
self.outtext = self.outtext[:-nLetters]
def handle_emphasis(self, start, tag_style, parent_style):
"""handles various text emphases"""
tag_emphasis = google_text_emphasis(tag_style)
parent_emphasis = google_text_emphasis(parent_style)
# handle Google's text emphasis
strikethrough = 'line-through' in tag_emphasis and self.hide_strikethrough
bold = 'bold' in tag_emphasis and not 'bold' in parent_emphasis
italic = 'italic' in tag_emphasis and not 'italic' in parent_emphasis
fixed = google_fixed_width_font(tag_style) and not \
google_fixed_width_font(parent_style) and not self.pre
if start:
# crossed-out text must be handled before other attributes
# in order not to output qualifiers unnecessarily
if bold or italic or fixed:
self.emphasis += 1
if strikethrough:
self.quiet += 1
if italic:
self.o(self.emphasis_mark)
self.drop_white_space += 1
if bold:
self.o(self.strong_mark)
self.drop_white_space += 1
if fixed:
self.o('`')
self.drop_white_space += 1
self.code = True
else:
if bold or italic or fixed:
# there must not be whitespace before closing emphasis mark
self.emphasis -= 1
self.space = 0
self.outtext = self.outtext.rstrip()
if fixed:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_last(1)
self.drop_white_space -= 1
else:
self.o('`')
self.code = False
if bold:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_last(2)
self.drop_white_space -= 1
else:
self.o(self.strong_mark)
if italic:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_last(1)
self.drop_white_space -= 1
else:
self.o(self.emphasis_mark)
# space is only allowed after *all* emphasis marks
if (bold or italic) and not self.emphasis:
self.o(" ")
if strikethrough:
self.quiet -= 1
def handle_tag(self, tag, attrs, start):
#attrs = fixattrs(attrs)
if attrs is None:
attrs = {}
else:
attrs = dict(attrs)
if self.google_doc:
# the attrs parameter is empty for a closing tag. in addition, we
# need the attributes of the parent nodes in order to get a
# complete style description for the current element. we assume
# that google docs export well formed html.
parent_style = {}
if start:
if self.tag_stack:
parent_style = self.tag_stack[-1][2]
tag_style = element_style(attrs, self.style_def, parent_style)
self.tag_stack.append((tag, attrs, tag_style))
else:
dummy, attrs, tag_style = self.tag_stack.pop()
if self.tag_stack:
parent_style = self.tag_stack[-1][2]
if hn(tag):
self.p()
if start:
self.inheader = True
self.o(hn(tag)*"#" + ' ')
else:
self.inheader = False
return # prevent redundant emphasis marks on headers
if tag in ['p', 'div']:
if self.google_doc:
if start and google_has_height(tag_style):
self.p()
else:
self.soft_br()
else:
self.p()
if tag == "br" and start: self.o(" \n")
if tag == "hr" and start:
self.p()
self.o("* * *")
self.p()
if tag in ["head", "style", 'script']:
if start: self.quiet += 1
else: self.quiet -= 1
if tag == "style":
if start: self.style += 1
else: self.style -= 1
if tag in ["body"]:
self.quiet = 0 # sites like 9rules.com never close <head>
if tag == "blockquote":
if start:
self.p(); self.o('> ', 0, 1); self.start = 1
self.blockquote += 1
else:
self.blockquote -= 1
self.p()
if tag in ['em', 'i', 'u'] and not self.ignore_emphasis: self.o(self.emphasis_mark)
if tag in ['strong', 'b'] and not self.ignore_emphasis: self.o(self.strong_mark)
if tag in ['del', 'strike', 's']:
if start:
self.o("<"+tag+">")
else:
self.o("</"+tag+">")
if self.google_doc:
if not self.inheader:
# handle some font attributes, but leave headers clean
self.handle_emphasis(start, tag_style, parent_style)
if tag in ["code", "tt"] and not self.pre: self.o('`') #TODO: `` `this` ``
if tag == "abbr":
if start:
self.abbr_title = None
self.abbr_data = ''
if has_key(attrs, 'title'):
self.abbr_title = attrs['title']
else:
if self.abbr_title != None:
self.abbr_list[self.abbr_data] = self.abbr_title
self.abbr_title = None
self.abbr_data = ''
if tag == "a" and not self.ignore_links:
if start:
if has_key(attrs, 'href') and not (self.skip_internal_links and attrs['href'].startswith('#')):
self.astack.append(attrs)
self.maybe_automatic_link = attrs['href']
else:
self.astack.append(None)
else:
if self.astack:
a = self.astack.pop()
if self.maybe_automatic_link:
self.maybe_automatic_link = None
elif a:
if self.inline_links:
self.o("](" + escape_md(a['href']) + ")")
else:
i = self.previousIndex(a)
if i is not None:
a = self.a[i]
else:
self.acount += 1
a['count'] = self.acount
a['outcount'] = self.outcount
self.a.append(a)
self.o("][" + str(a['count']) + "]")
if tag == "img" and start and not self.ignore_images:
if has_key(attrs, 'src'):
attrs['href'] = attrs['src']
alt = attrs.get('alt', '')
self.o("![" + escape_md(alt) + "]")
if self.inline_links:
self.o("(" + escape_md(attrs['href']) + ")")
else:
i = self.previousIndex(attrs)
if i is not None:
attrs = self.a[i]
else:
self.acount += 1
attrs['count'] = self.acount
attrs['outcount'] = self.outcount
self.a.append(attrs)
self.o("[" + str(attrs['count']) + "]")
if tag == 'dl' and start: self.p()
if tag == 'dt' and not start: self.pbr()
if tag == 'dd' and start: self.o(' ')
if tag == 'dd' and not start: self.pbr()
if tag in ["ol", "ul"]:
# Google Docs create sub lists as top level lists
if (not self.list) and (not self.lastWasList):
self.p()
if start:
if self.google_doc:
list_style = google_list_style(tag_style)
else:
list_style = tag
numbering_start = list_numbering_start(attrs)
self.list.append({'name':list_style, 'num':numbering_start})
else:
if self.list: self.list.pop()
self.lastWasList = True
else:
self.lastWasList = False
if tag == 'li':
self.pbr()
if start:
if self.list: li = self.list[-1]
else: li = {'name':'ul', 'num':0}
if self.google_doc:
nest_count = self.google_nest_count(tag_style)
else:
nest_count = len(self.list)
self.o(" " * nest_count) #TODO: line up <ol><li>s > 9 correctly.
if li['name'] == "ul": self.o(self.ul_item_mark + " ")
elif li['name'] == "ol":
li['num'] += 1
self.o(str(li['num'])+". ")
self.start = 1
if tag in ["table", "tr"] and start: self.p()
if tag == 'td': self.pbr()
if tag == "pre":
if start:
self.startpre = 1
self.pre = 1
else:
self.pre = 0
self.p()
def pbr(self):
if self.p_p == 0:
self.p_p = 1
def p(self):
self.p_p = 2
def soft_br(self):
self.pbr()
self.br_toggle = ' '
def o(self, data, puredata=0, force=0):
if self.abbr_data is not None:
self.abbr_data += data
if not self.quiet:
if self.google_doc:
# prevent white space immediately after 'begin emphasis' marks ('**' and '_')
lstripped_data = data.lstrip()
if self.drop_white_space and not (self.pre or self.code):
data = lstripped_data
if lstripped_data != '':
self.drop_white_space = 0
if puredata and not self.pre:
data = re.sub('\s+', ' ', data)
if data and data[0] == ' ':
self.space = 1
data = data[1:]
if not data and not force: return
if self.startpre:
#self.out(" :") #TODO: not output when already one there
if not data.startswith("\n"): # <pre>stuff...
data = "\n" + data
bq = (">" * self.blockquote)
if not (force and data and data[0] == ">") and self.blockquote: bq += " "
if self.pre:
if not self.list:
bq += " "
#else: list content is already partially indented
for i in xrange(len(self.list)):
bq += " "
data = data.replace("\n", "\n"+bq)
if self.startpre:
self.startpre = 0
if self.list:
data = data.lstrip("\n") # use existing initial indentation
if self.start:
self.space = 0
self.p_p = 0
self.start = 0
if force == 'end':
# It's the end.
self.p_p = 0
self.out("\n")
self.space = 0
if self.p_p:
self.out((self.br_toggle+'\n'+bq)*self.p_p)
self.space = 0
self.br_toggle = ''
if self.space:
if not self.lastWasNL: self.out(' ')
self.space = 0
if self.a and ((self.p_p == 2 and self.links_each_paragraph) or force == "end"):
if force == "end": self.out("\n")
newa = []
for link in self.a:
if self.outcount > link['outcount']:
self.out(" ["+ str(link['count']) +"]: " + urlparse.urljoin(self.baseurl, link['href']))
if has_key(link, 'title'): self.out(" ("+link['title']+")")
self.out("\n")
else:
newa.append(link)
if self.a != newa: self.out("\n") # Don't need an extra line when nothing was done.
self.a = newa
if self.abbr_list and force == "end":
for abbr, definition in self.abbr_list.items():
self.out(" *[" + abbr + "]: " + definition + "\n")
self.p_p = 0
self.out(data)
self.outcount += 1
def handle_data(self, data):
if r'\/script>' in data: self.quiet -= 1
if self.style:
self.style_def.update(dumb_css_parser(data))
if not self.maybe_automatic_link is None:
href = self.maybe_automatic_link
if href == data and self.absolute_url_matcher.match(href):
self.o("<" + data + ">")
return
else:
self.o("[")
self.maybe_automatic_link = None
if not self.code and not self.pre:
data = escape_md_section(data, snob=self.escape_snob)
self.o(data, 1)
def unknown_decl(self, data): pass
def charref(self, name):
if name[0] in ['x','X']:
c = int(name[1:], 16)
else:
c = int(name)
if not self.unicode_snob and c in unifiable_n.keys():
return unifiable_n[c]
else:
try:
return unichr(c)
except NameError: #Python3
return chr(c)
def entityref(self, c):
if not self.unicode_snob and c in unifiable.keys():
return unifiable[c]
else:
try: name2cp(c)
except KeyError: return "&" + c + ';'
else:
try:
return unichr(name2cp(c))
except NameError: #Python3
return chr(name2cp(c))
def replaceEntities(self, s):
s = s.group(1)
if s[0] == "#":
return self.charref(s[1:])
else: return self.entityref(s)
r_unescape = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
def unescape(self, s):
return self.r_unescape.sub(self.replaceEntities, s)
def google_nest_count(self, style):
"""calculate the nesting count of google doc lists"""
nest_count = 0
if 'margin-left' in style:
nest_count = int(style['margin-left'][:-2]) / self.google_list_indent
return nest_count
def optwrap(self, text):
"""Wrap all paragraphs in the provided text."""
if not self.body_width:
return text
assert wrap, "Requires Python 2.3."
result = ''
newlines = 0
for para in text.split("\n"):
if len(para) > 0:
if not skipwrap(para):
result += "\n".join(wrap(para, self.body_width))
if para.endswith(' '):
result += " \n"
newlines = 1
else:
result += "\n\n"
newlines = 2
else:
if not onlywhite(para):
result += para + "\n"
newlines = 1
else:
if newlines < 2:
result += "\n"
newlines += 1
return result
ordered_list_matcher = re.compile(r'\d+\.\s')
unordered_list_matcher = re.compile(r'[-\*\+]\s')
md_chars_matcher = re.compile(r"([\\\[\]\(\)])")
md_chars_matcher_all = re.compile(r"([`\*_{}\[\]\(\)#!])")
md_dot_matcher = re.compile(r"""
^ # start of line
(\s*\d+) # optional whitespace and a number
(\.) # dot
(?=\s) # lookahead assert whitespace
""", re.MULTILINE | re.VERBOSE)
md_plus_matcher = re.compile(r"""
^
(\s*)
(\+)
(?=\s)
""", flags=re.MULTILINE | re.VERBOSE)
md_dash_matcher = re.compile(r"""
^
(\s*)
(-)
(?=\s|\-) # followed by whitespace (bullet list, or spaced out hr)
# or another dash (header or hr)
""", flags=re.MULTILINE | re.VERBOSE)
slash_chars = r'\`*_{}[]()#+-.!'
md_backslash_matcher = re.compile(r'''
(\\) # match one slash
(?=[%s]) # followed by a char that requires escaping
''' % re.escape(slash_chars),
flags=re.VERBOSE)
def skipwrap(para):
# If the text begins with four spaces or one tab, it's a code block; don't wrap
if para[0:4] == ' ' or para[0] == '\t':
return True
# If the text begins with only two "--", possibly preceded by whitespace, that's
# an emdash; so wrap.
stripped = para.lstrip()
if stripped[0:2] == "--" and len(stripped) > 2 and stripped[2] != "-":
return False
# I'm not sure what this is for; I thought it was to detect lists, but there's
# a <br>-inside-<span> case in one of the tests that also depends upon it.
if stripped[0:1] == '-' or stripped[0:1] == '*':
return True
# If the text begins with a single -, *, or +, followed by a space, or an integer,
# followed by a ., followed by a space (in either case optionally preceeded by
# whitespace), it's a list; don't wrap.
if ordered_list_matcher.match(stripped) or unordered_list_matcher.match(stripped):
return True
return False
def wrapwrite(text):
text = text.encode('utf-8')
try: #Python3
sys.stdout.buffer.write(text)
except AttributeError:
sys.stdout.write(text)
def html2text(html, baseurl=''):
h = HTML2Text(baseurl=baseurl)
return h.handle(html)
def unescape(s, unicode_snob=False):
h = HTML2Text()
h.unicode_snob = unicode_snob
return h.unescape(s)
def escape_md(text):
"""Escapes markdown-sensitive characters within other markdown constructs."""
return md_chars_matcher.sub(r"\\\1", text)
def escape_md_section(text, snob=False):
"""Escapes markdown-sensitive characters across whole document sections."""
text = md_backslash_matcher.sub(r"\\\1", text)
if snob:
text = md_chars_matcher_all.sub(r"\\\1", text)
text = md_dot_matcher.sub(r"\1\\\2", text)
text = md_plus_matcher.sub(r"\1\\\2", text)
text = md_dash_matcher.sub(r"\1\\\2", text)
return text
def main():
baseurl = ''
p = optparse.OptionParser('%prog [(filename|url) [encoding]]',
version='%prog ' + __version__)
p.add_option("--ignore-emphasis", dest="ignore_emphasis", action="store_true",
default=IGNORE_EMPHASIS, help="don't include any formatting for emphasis")
p.add_option("--ignore-links", dest="ignore_links", action="store_true",
default=IGNORE_ANCHORS, help="don't include any formatting for links")
p.add_option("--ignore-images", dest="ignore_images", action="store_true",
default=IGNORE_IMAGES, help="don't include any formatting for images")
p.add_option("-g", "--google-doc", action="store_true", dest="google_doc",
default=False, help="convert an html-exported Google Document")
p.add_option("-d", "--dash-unordered-list", action="store_true", dest="ul_style_dash",
default=False, help="use a dash rather than a star for unordered list items")
p.add_option("-e", "--asterisk-emphasis", action="store_true", dest="em_style_asterisk",
default=False, help="use an asterisk rather than an underscore for emphasized text")
p.add_option("-b", "--body-width", dest="body_width", action="store", type="int",
default=BODY_WIDTH, help="number of characters per output line, 0 for no wrap")
p.add_option("-i", "--google-list-indent", dest="list_indent", action="store", type="int",
default=GOOGLE_LIST_INDENT, help="number of pixels Google indents nested lists")
p.add_option("-s", "--hide-strikethrough", action="store_true", dest="hide_strikethrough",
default=False, help="hide strike-through text. only relevant when -g is specified as well")
p.add_option("--escape-all", action="store_true", dest="escape_snob",
default=False, help="Escape all special characters. Output is less readable, but avoids corner case formatting issues.")
(options, args) = p.parse_args()
# process input
encoding = "utf-8"
if len(args) > 0:
file_ = args[0]
if len(args) == 2:
encoding = args[1]
if len(args) > 2:
p.error('Too many arguments')
if file_.startswith('http://') or file_.startswith('https://'):
baseurl = file_
j = urllib.urlopen(baseurl)
data = j.read()
if encoding is None:
try:
from feedparser import _getCharacterEncoding as enc
except ImportError:
enc = lambda x, y: ('utf-8', 1)
encoding = enc(j.headers, data)[0]
if encoding == 'us-ascii':
encoding = 'utf-8'
else:
data = open(file_, 'rb').read()
if encoding is None:
try:
from chardet import detect
except ImportError:
detect = lambda x: {'encoding': 'utf-8'}
encoding = detect(data)['encoding']
else:
data = sys.stdin.read()
data = data.decode(encoding)
h = HTML2Text(baseurl=baseurl)
# handle options
if options.ul_style_dash: h.ul_item_mark = '-'
if options.em_style_asterisk:
h.emphasis_mark = '*'
h.strong_mark = '__'
h.body_width = options.body_width
h.list_indent = options.list_indent
h.ignore_emphasis = options.ignore_emphasis
h.ignore_links = options.ignore_links
h.ignore_images = options.ignore_images
h.google_doc = options.google_doc
h.hide_strikethrough = options.hide_strikethrough
h.escape_snob = options.escape_snob
wrapwrite(h.handle(data))
if __name__ == "__main__":
main()
| dev/html2text.py | 32,113 | returns a hash of css selectors, each of which contains a hash of css attributes
returns a hash of css attributes
returns a hash of the 'final' style attributes of the element
Escapes markdown-sensitive characters within other markdown constructs.
Escapes markdown-sensitive characters across whole document sections.
check if the css of the current element defines a fixed width font
check if the style of the element has the 'height' attribute explicitly defined
finds out whether this is an ordered or unordered list
calculate the nesting count of google doc lists
return a list of all emphasis modifiers of the element
handles various text emphases
extract numbering from list element attributes
Return true if the line does only consist of whitespace characters.
Wrap all paragraphs in the provided text.
returns the index of certain set of attributes (of a link) in the
self.a list
If the set of attributes is not found, returns None
html2text: Turn HTML into equivalent Markdown-structured text.
!/usr/bin/env python TODO: Support decoded entities with unifiable.Python3Python3 Use Unicode characters instead of their ascii psuedo-replacements Escape all special characters. Output is less readable, but avoids corner case formatting issues. Put the links after each paragraph instead of at the end. Wrap long lines at position. 0 for no wrapping. (Requires Python 2.3.) Don't show internal links (href="local-anchor") -- corresponding link targets won't be visible in the plain text file anyway. Use inline, rather than reference, formatting for images and links Number of pixels Google indents nested lists Entity Nonsense requires Python 2.3 not in latin-1 End Entity Nonsense remove @import sentences parse the css. reverted from dictionary compehension in order to support older pythons not that important Config options empty list to store output characters before they are "joined" Python3 number of newline character to print before next output current abbreviation definition last inner HTML (for abbr being defined) stack of abbreviations to write later handle Google's text emphasis crossed-out text must be handled before other attributes in order not to output qualifiers unnecessarily there must not be whitespace before closing emphasis mark empty emphasis, drop it empty emphasis, drop it empty emphasis, drop it space is only allowed after *all* emphasis marksattrs = fixattrs(attrs) the attrs parameter is empty for a closing tag. in addition, we need the attributes of the parent nodes in order to get a complete style description for the current element. we assume that google docs export well formed html. prevent redundant emphasis marks on headers sites like 9rules.com never close <head> handle some font attributes, but leave headers cleanTODO: `` `this` `` Google Docs create sub lists as top level listsTODO: line up <ol><li>s > 9 correctly. prevent white space immediately after 'begin emphasis' marks ('**' and '_')self.out(" :") TODO: not output when already one there <pre>stuff...else: list content is already partially indented use existing initial indentation It's the end. Don't need an extra line when nothing was done.Python3Python3 If the text begins with four spaces or one tab, it's a code block; don't wrap If the text begins with only two "--", possibly preceded by whitespace, that's an emdash; so wrap. I'm not sure what this is for; I thought it was to detect lists, but there's a <br>-inside-<span> case in one of the tests that also depends upon it. If the text begins with a single -, *, or +, followed by a space, or an integer, followed by a ., followed by a space (in either case optionally preceeded by whitespace), it's a list; don't wrap.Python3 process input handle options | 3,742 | en | 0.789426 |
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="pyk3x",
author="Roming22",
author_email="roming22@gmail.com",
description="API to simplify k3d deployments",
keywords="kuberbetes, k3s, k3d, k3x, cluster",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Roming22/pyk3x",
project_urls={
"Documentation": "https://github.com/Roming22/pyk3x",
"Bug Reports": "https://github.com/Roming22/pyk3x/issues",
"Source Code": "https://github.com/Roming22/pyk3x",
# 'Funding': '',
# 'Say Thanks!': '',
},
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
classifiers=[
# see https://pypi.org/classifiers/
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3 :: Only",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.7",
)
| setup.py | 1,381 | 'Funding': '', 'Say Thanks!': '', see https://pypi.org/classifiers/ | 67 | en | 0.417752 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
import os.path
import math
import tensorflow as tf
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
from sklearn.metrics import mean_absolute_error
LOGDIR = "/tmp/cnn_backbone_angles/"
# Parameters
batch_size = 5
training_epochs = 10
display_step = 1
internal_channels_1 = 100
internal_channels_2 = 100
internal_channels_3 = 100
internal_channels_4 = 50
window_size = 11
beta = 0.001
values_to_predict = 2
num_splits = 10
alpha = 0.2
dropout_keep_rate = 0.5
learning_rate = 1E-3
keep_prob = tf.placeholder_with_default(1.0, shape=(), name="keep_prob")
keep_prob_input = tf.placeholder_with_default(1.0, shape=(), name="keep_prob_input")
def fc_layer(input, size_in, size_out, name="fc"):
with tf.name_scope(name):
w = tf.Variable(tf.truncated_normal([window_size, size_in, size_out], stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[size_out]), name="B")
act = conv1d(input, w) + b
tf.summary.histogram("weights", w)
tf.summary.histogram("biases", b)
tf.summary.histogram("activations", act)
return act, w
def convnn(x, channels_num, layers_num, window_size = 11):
W_arr = []
layers = []
# First convolutional layer
input_dimensions = x.get_shape().as_list()[1:]
filter_shape = [window_size, input_dimensions[-1], channels_num]
W_input = weight_variable(filter_shape)
W_arr.append(W_input)
b_input = bias_variable([input_dimensions[0], channels_num])
input_layer = tf.nn.relu(conv1d(x, W_input) + b_input)
dropout_input = tf.nn.dropout(input_layer, keep_prob_input)
layers.append(dropout_input)
# Hidden layers
filter_shape = [window_size, channels_num, channels_num]
W_hidden = tf.constant([], dtype=tf.float32)
for i in range(layers_num):
with tf.name_scope("conv"):
W_hidden = weight_variable(filter_shape)
W_arr.append(W_hidden)
b_hidden = bias_variable([input_dimensions[0], channels_num])
conv_layer = tf.nn.tanh(alpha*conv1d(layers[i], W_hidden) + b_hidden)
tf.summary.histogram("weights", W_hidden)
tf.summary.histogram("biases", b_hidden)
tf.summary.histogram("activations", conv_layer)
with tf.name_scope("dropout"):
dropout = tf.nn.dropout(conv_layer, keep_prob)
layers.append(dropout)
# Output convolutional layer
layer_out, W_out = fc_layer(layers[-1], channels_num, values_to_predict)
W_arr.append(W_out)
# layer_out = tf.atan2(tf.sin(layer_out), tf.cos(layer_out))
# Loss function with L2 Regularization with beta=0.001
regularizers = tf.nn.l2_loss(W_input) + tf.nn.l2_loss(W_hidden) * layers_num + tf.nn.l2_loss(W_out)
# regularizers = tf.constant(0, dtype=tf.float32)
# for W in W_arr:
# regularizers += tf.nn.l2_loss(W)
return layer_out, regularizers
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name="W")
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name="B")
def conv1d(x, W):
"""conv1d returns a 1d convolution layer."""
return tf.nn.conv1d(x, W, 1, 'SAME')
def avgpool2d(x, k=2):
# MaxPool2D wrapper
return tf.nn.avg_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
padding='SAME')
def calculate_accuracy(predictions, labels):
num_proteins = predictions.shape[0]
protein_accuracy = np.zeros(num_proteins, dtype=np.float32)
label_accuracy = {1: {"total": 0, "correct": 0}, 2: {"total": 0, "correct": 0},
3: {"total": 0, "correct": 0}}
for i in range(num_proteins):
total_predictions = 0
correct_predictions = 0
for j in range(predictions.shape[1]):
phi = math.degrees(labels[i][j][0])
phi0 = math.degrees(predictions[i][j][0])
psi = math.degrees(labels[i][j][1])
psi0 = math.degrees(predictions[i][j][1])
if (phi != 0) or (psi != 0):
total_predictions += 1
expected_state = get_backbone_distribution(labels[i][j])
predicted_state = get_backbone_distribution(predictions[i][j])
label_accuracy[predicted_state]["total"] += 1
if (predicted_state == expected_state):
# correct_predictions += 1
label_accuracy[predicted_state]["correct"] += 1
# print("REAL PHI->>>>>"+str(labels[i][j][0]))
# print("PREDICTED PHI->>>>>" + str(predictions[i][j][0]))
diff = math.sqrt(math.pow(phi - phi0, 2)+math.pow(psi - psi0, 2))
diff_phi = phi0 - phi0
diff_psi = psi - psi0
criteria_1 = (np.abs(diff_phi) < 60) & (np.abs(diff_psi) < 60)
criteria_2 = (np.abs(diff_phi+diff_psi) < 60) & (np.abs(diff_psi) < 90) & (np.abs(diff_phi) < 90)
if (diff < 60):
correct_predictions += 1
# print("CORRECT->>>>>"+str(correct_predictions))
# print("TOTAL->>>>>" + str(total_predictions))
if (total_predictions > 0):
protein_accuracy[i] = correct_predictions / float(total_predictions)
accuracy_dist = {}
total = 0
correct = 0
for label, val in label_accuracy.iteritems():
if (val["total"] > 0):
accuracy_dist[label] = val["correct"]/val["total"]
total += val["total"]
correct += val["correct"]
if (total > 0):
accuracy_dist["total"] = correct/total
return protein_accuracy, accuracy_dist
def get_backbone_distribution(angles):
phi = math.degrees(angles[0])
psi = math.degrees(angles[1])
# A: -160 < phi <0 and -70 < psi < 60
if (-160 < phi < 0) & (-70 < psi < 60):
return 1
# P: 0 < phi < 160 and -60 < psi < 95
elif (0 < phi < 160) & (-60 < psi < 95):
return 2
else:
return 3
def plot_ramachandran(predictions, title):
phi_angles = predictions[:][:][0].flatten()
phi_angles = list(map(lambda x: math.degrees(x), phi_angles))
psi_angles = predictions[:][:][1].flatten()
psi_angles = list(map(lambda x: math.degrees(x), psi_angles))
colors = np.random.rand(len(psi_angles))
fig = plt.figure()
plt.xlim([-180, 180])
plt.ylim([-180, 180])
plt.title(title)
plt.xlabel('phi')
plt.ylabel('psi')
plt.grid()
plt.scatter(phi_angles, psi_angles, alpha=0.5, c=colors)
fig.savefig("./plots/" + title + ".png", bbox_inches='tight')
# plt.show()
# fig.savefig("./plots/" + title + ".png", bbox_inches='tight')
plt.close()
def plot_loss(loss_arr):
l = plt.figure()
plt.plot(loss_arr)
plt.title('Loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(plot_legend, loc='upper left')
l.show()
def make_hparam_string(layers_num, channels_num, test_session):
return "nl_%s,nc_%s, session%s" % (layers_num, channels_num, test_session)
def convert_to_degrees(arr):
"""Covert all phi and psi angles to degrees"""
arr[0] = math.degrees(arr[0])
arr[1] = math.degrees(arr[1])
return arr
data = np.load('phipsi_features.npz')['features']
all_data = data.reshape(data.shape[0],700,69)
# all_data = all_data[0:300]
all_sets = all_data[:,:,0:21]
all_sets = np.concatenate([all_sets, all_data[:,:,21:42]], axis=-1)
all_sets = np.concatenate([all_sets, all_data[:,:,42:63]], axis=-1)
# all_labels = all_data[:,:,63:67]
all_angles = all_data[:,:,67:69]
where_are_NaNs = np.isnan(all_angles)
all_angles[where_are_NaNs] = 0.0
k_fold = KFold(n_splits=num_splits)
layers_channels = [(6, 100), (7, 100)]
# Build the convolutional network
for layers_num, channels_num in layers_channels:
for use_l2 in [False, True]:
for use_early_stopping in [True, False]:
crossvalidation_train_accuracy = 0
crossvalidation_test_accuracy = 0
crossvalidation_accuracy_distr = {'total': 0, 1: 0, 2: 0, 3: 0}
crossvalidation_test_mae = 0
executed_epochs = 0
train_session = 0
test_session = 0
learning_rate_type = 1
for train_index, test_index in k_fold.split(all_sets):
train_set, test_set = all_sets[train_index], all_sets[test_index]
train_labels, test_labels = all_angles[train_index], all_angles[test_index]
train_size = train_set.shape[0]
train_y = train_labels
test_y = test_labels
test_session += 1
# Create the model
x = tf.placeholder(tf.float32, [None, 700, train_set[0].shape[-1]], name="x")
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 700, values_to_predict], name="labels")
y_nn, regularizers = convnn(x, channels_num, layers_num, window_size)
prediction = y_nn
with tf.name_scope("loss"):
deviations = tf.subtract(prediction, y_)
ae = tf.abs(deviations)
mae = tf.reduce_mean(ae)
atan2 = tf.atan2(tf.sin(deviations), tf.cos(deviations))
loss = tf.square(atan2, name="loss")
mean_loss = tf.reduce_mean(loss)
loss_summary = tf.summary.scalar("loss", mean_loss)
with tf.name_scope("loss2"):
# print(tf.shape(prediction))
# print(tf.shape(y_))
phi = prediction[:, :, 0]
phi0 = y_[:, :, 0]
psi = prediction[:, :, 1]
psi0 = y_[:,:, 1]
# cos_phi_diff = tf.square(tf.subtract(tf.cos(phi), tf.cos(phi0)))
# sin_phi_diff = tf.square(tf.subtract(tf.sin(phi), tf.sin(phi0)))
# cos_psi_diff = tf.square(tf.subtract(tf.cos(psi), tf.cos(psi0)))
# sin_psi_diff = tf.square(tf.subtract(tf.sin(psi), tf.sin(psi0)))
# phi_squared_sum = tf.add(cos_phi_diff, sin_phi_diff)
# psi_squared_sum = tf.add(cos_psi_diff, sin_psi_diff)
phi_diff = tf.reduce_sum(tf.squared_difference(phi, phi0))/2
psi_diff = tf.reduce_sum(tf.squared_difference(psi, psi0))/2
loss2 = tf.add(phi_diff, psi_diff)
with tf.name_scope("mse"):
mse = tf.squared_difference(prediction, y_)
mse_summary = tf.summary.scalar("mse", mse)
with tf.name_scope("l2_loss"):
l2_loss = beta * regularizers
if (use_l2):
loss = loss + l2_loss
loss = tf.reduce_mean(loss)
l2_summary = tf.summary.scalar("l2_loss", l2_loss)
with tf.name_scope("train"):
# Use Adam optimizer
optimization = tf.train.AdamOptimizer(learning_rate).minimize(loss)
# with tf.name_scope("accuracy"):
# correct_prediction = tf.equal(prediction, y)
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# tf.summary.scalar("accuracy", accuracy)
summ = tf.summary.merge_all()
print("Window size: " + str(window_size))
print("Layers: " + str(layers_num))
print("Channels: " + str(channels_num))
print("Beta: " + str(beta))
print("Use L2: " + str(use_l2))
print("Use Early stopping: " + str(use_early_stopping))
sess = tf.InteractiveSession()
init = tf.global_variables_initializer()
sess.run(init)
saver = tf.train.Saver()
min_delta = 0.01
plot_legend = []
previous_epoch_min = 100
min_validation_loss = 100
for epoch in range(training_epochs):
train_session += 1
loss_arr = []
previous_batch_loss = 0.0
patience = 6
patience_cnt = 0
hparam = make_hparam_string(layers_num, channels_num, train_session)
writer = tf.summary.FileWriter(LOGDIR + hparam)
writer.add_graph(sess.graph)
total_batches = int(train_size/batch_size)
# Loop over all batches
for i in range(total_batches):
start_index = i * batch_size
stop_index = (i+1) * batch_size
batch_x = train_set[start_index:stop_index]
batch_y = train_y[start_index:stop_index]
# Run optimization op
# backprop and cost op (to get loss value)
if i % 5 == 0:
batch_predictions, l_summ, batch_loss = sess.run([prediction, loss_summary, loss], feed_dict={x: batch_x, y_: batch_y, keep_prob: dropout_keep_rate, keep_prob_input: 0.8})
writer.add_summary(l_summ, i+1)
loss_arr.append(batch_loss)
saver.save(sess, os.path.join(LOGDIR, "model.ckpt"), i)
# batch_predictions = np.apply_along_axis(convert_to_degrees, 2, batch_predictions)
batch_accuracy, batch_distr = calculate_accuracy(batch_predictions, batch_y)
# print('step %d, training accuracy %g' % (i, np.average(batch_accuracy)))
# early stopping
if(use_early_stopping):
if (epoch > 2 and i > total_batches / 2 and batch_loss < previous_epoch_min):
previous_epoch_min = min(loss_arr)
print("Early stopping!!")
break
optimization.run(feed_dict={x: batch_x, y_: batch_y})
previous_epoch_min = min(loss_arr)
# Display logs per epoch step
if epoch % display_step == 0:
predictions, train_loss = sess.run([prediction,loss], feed_dict={x: train_set, y_: train_y, keep_prob: dropout_keep_rate, keep_prob_input: 0.8})
# predictions = np.apply_along_axis(convert_to_degrees, 2, predictions)
# plot_ramachandran(train_y, "Real values_"+str(epoch))
# raw_input()
train_accuracy, train_acc_distr = calculate_accuracy(predictions, train_y)
train_accuracy = np.average(train_accuracy)
crossvalidation_train_accuracy += train_accuracy
plot_legend.append('train_' + str(epoch))
# plot_loss(loss_arr)
# print("Training accuracy: ", \
# "{:.6f}".format(train_accuracy))
if (epoch > training_epochs / 2):
valid_predictions, valid_loss, valid_mae = sess.run([prediction, loss, mae], feed_dict={x: test_set, y_: test_y})
# valid_predictions = np.apply_along_axis(convert_to_degrees, 2, valid_predictions)
valid_accuracy, valid_acc_distr = calculate_accuracy(valid_predictions, test_y)
valid_accuracy = np.average(valid_accuracy)
if (epoch >= training_epochs - 1):
if (valid_loss < min_validation_loss):
training_epochs += 1
print("INCREASING EPOCHS")
else:
crossvalidation_test_accuracy += valid_accuracy
crossvalidation_test_mae += valid_mae
for label in valid_acc_distr:
crossvalidation_accuracy_distr[label] += valid_acc_distr[label]
print(crossvalidation_accuracy_distr)
if (epoch >= training_epochs - 2):
min_validation_loss = valid_loss
print(valid_acc_distr)
print("Validation accuracy: ", \
"{:.6f}".format(valid_accuracy))
executed_epochs += 1
# Test trained model
test_predictions, test_summ, test_mae = sess.run([prediction, loss_summary, mae], feed_dict={x: test_set, y_: test_y})
writer.add_summary(test_summ, i + 1)
test_accuracy, test_acc_distr = calculate_accuracy(test_predictions, test_y)
plot_ramachandran(test_predictions, "Predictions Fold "+str(test_session))
plot_ramachandran(test_y, "Real values Fold "+str(test_session))
# plot_legend.append('validation')
print(test_acc_distr)
# test_accuracy = np.average(test_accuracy)
# crossvalidation_test_accuracy += test_accuracy
# crossvalidation_test_mae += test_mae
# print("Testing accuracy: ", \
# "{:.6f}".format(test_accuracy))
for label in crossvalidation_accuracy_distr:
crossvalidation_accuracy_distr[label] /= num_splits
print(crossvalidation_accuracy_distr)
# print("Final Testing DISTR: ", \
# "{:.6f}".format(crossvalidation_test_mae / num_splits))
print("Final Testing MAE: ", \
"{:.6f}".format(crossvalidation_test_mae / num_splits))
# print("Final Training accuracy: ", \
# "{:.6f}".format(crossvalidation_train_accuracy / (num_splits*training_epochs)))
print("Final Test accuracy: ", \
"{:.6f}".format(crossvalidation_test_accuracy / num_splits))
print('Run `tensorboard --logdir=%s` to see the results.' % LOGDIR)
# valid_predictions = sess.run(tf.argmax(prediction, 2), feed_dict={x: valid_x, y_: valid_y})
# valid_labels = np.argmax(valid_y, 2)
# valid_accuracy = calculate_accuracy(valid_predictions, valid_labels)
# print("Validation accuracy: ", \
# "{:.6f}".format(valid_accuracy)) | cnn_phi_psi.py | 19,024 | bias_variable generates a bias variable of a given shape.
conv1d returns a 1d convolution layer.
Covert all phi and psi angles to degrees
weight_variable generates a weight variable of a given shape.
Parameters First convolutional layer Hidden layers Output convolutional layer layer_out = tf.atan2(tf.sin(layer_out), tf.cos(layer_out)) Loss function with L2 Regularization with beta=0.001 regularizers = tf.constant(0, dtype=tf.float32) for W in W_arr: regularizers += tf.nn.l2_loss(W) MaxPool2D wrapper correct_predictions += 1 print("REAL PHI->>>>>"+str(labels[i][j][0])) print("PREDICTED PHI->>>>>" + str(predictions[i][j][0])) print("CORRECT->>>>>"+str(correct_predictions)) print("TOTAL->>>>>" + str(total_predictions)) A: -160 < phi <0 and -70 < psi < 60 P: 0 < phi < 160 and -60 < psi < 95 plt.show() fig.savefig("./plots/" + title + ".png", bbox_inches='tight') all_data = all_data[0:300] all_labels = all_data[:,:,63:67] Build the convolutional network Create the model Define loss and optimizer print(tf.shape(prediction)) print(tf.shape(y_)) cos_phi_diff = tf.square(tf.subtract(tf.cos(phi), tf.cos(phi0))) sin_phi_diff = tf.square(tf.subtract(tf.sin(phi), tf.sin(phi0))) cos_psi_diff = tf.square(tf.subtract(tf.cos(psi), tf.cos(psi0))) sin_psi_diff = tf.square(tf.subtract(tf.sin(psi), tf.sin(psi0))) phi_squared_sum = tf.add(cos_phi_diff, sin_phi_diff) psi_squared_sum = tf.add(cos_psi_diff, sin_psi_diff) Use Adam optimizer with tf.name_scope("accuracy"): correct_prediction = tf.equal(prediction, y) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) tf.summary.scalar("accuracy", accuracy) Loop over all batches Run optimization op backprop and cost op (to get loss value) batch_predictions = np.apply_along_axis(convert_to_degrees, 2, batch_predictions) print('step %d, training accuracy %g' % (i, np.average(batch_accuracy))) early stopping Display logs per epoch step predictions = np.apply_along_axis(convert_to_degrees, 2, predictions) plot_ramachandran(train_y, "Real values_"+str(epoch)) raw_input() plot_loss(loss_arr) print("Training accuracy: ", \ "{:.6f}".format(train_accuracy)) valid_predictions = np.apply_along_axis(convert_to_degrees, 2, valid_predictions) Test trained model plot_legend.append('validation') test_accuracy = np.average(test_accuracy) crossvalidation_test_accuracy += test_accuracy crossvalidation_test_mae += test_mae print("Testing accuracy: ", \ "{:.6f}".format(test_accuracy)) print("Final Testing DISTR: ", \ "{:.6f}".format(crossvalidation_test_mae / num_splits)) print("Final Training accuracy: ", \ "{:.6f}".format(crossvalidation_train_accuracy / (num_splits*training_epochs))) valid_predictions = sess.run(tf.argmax(prediction, 2), feed_dict={x: valid_x, y_: valid_y}) valid_labels = np.argmax(valid_y, 2) valid_accuracy = calculate_accuracy(valid_predictions, valid_labels) print("Validation accuracy: ", \ "{:.6f}".format(valid_accuracy)) | 2,975 | en | 0.432655 |
"""desafio URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('vagas.urls')),
]
| desafio/urls.py | 796 | desafio URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) | 623 | en | 0.621021 |
import os
import random
import threading
from time import sleep
from unittest import TestCase
import asn1tools
import wx
import asn1editor
from asn1editor.wxPython.ViewSelect import ViewType
from tests import testHelper
def actions(main_window: asn1editor.wxPython.MainWindow):
def get_children(window: wx.Window):
my_children = window.GetChildren()
if my_children is not None:
their_children = []
for my_child in my_children:
their_children += get_children(my_child)
return list(my_children) + their_children
else:
return []
sleep(1)
key_codes = [wx.WXK_TAB, wx.WXK_DOWN, wx.WXK_UP, wx.WXK_LEFT, wx.WXK_RIGHT, wx.WXK_SPACE] + [c for c in range(ord('1'), ord('9'))]
ui_sim = wx.UIActionSimulator()
for _ in range(1000):
main_window.SetFocus()
key_code = random.choice(key_codes)
ui_sim.KeyDown(key_code)
ui_sim.KeyUp(key_code)
try:
main_window.save_data_to_file('test.json')
except asn1tools.ConstraintsError:
pass
main_window.Close(True)
wx.GetApp().ExitMainLoop()
class MonkeyTest(TestCase):
@staticmethod
def test_monkey():
if os.getenv('TRAVIS') is not None or os.getenv('GITHUB_ACTIONS') is not None:
return
# noinspection PyUnusedLocal
app = testHelper.get_wx_app()
main_window = asn1editor.wxPython.MainWindow()
main_window.select_view(ViewType.GROUPS)
test_types = [('example/example.asn', 'EXAMPLE.Sequence')]
for spec, type_ in test_types:
main_window.load_spec(spec, type_)
action_thread = threading.Thread(target=actions, args=[main_window])
action_thread.start()
main_window.Show()
app.MainLoop()
action_thread.join(timeout=0.0)
| tests/test_MonkeyTest.py | 1,875 | noinspection PyUnusedLocal | 26 | en | 0.123598 |
from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
import os
required_conan_version = ">=1.33.0"
class SDL2Conan(ConanFile):
# TODO: When porting to CCI rename this package to SDL (without 2)
name = "sdl2"
description = "Access to audio, keyboard, mouse, joystick, and graphics hardware via OpenGL, Direct3D and Vulkan"
topics = ("sdl2", "audio", "keyboard", "graphics", "opengl")
url = "https://github.com/bincrafters/conan-sdl2"
homepage = "https://www.libsdl.org"
license = "Zlib"
exports_sources = ["CMakeLists.txt", "patches/*"]
generators = ["cmake", "pkg_config"]
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"directx": [True, False],
"alsa": [True, False],
"jack": [True, False],
"pulse": [True, False],
"sndio": [True, False],
"nas": [True, False],
"esd": [True, False],
"arts": [True, False],
"x11": [True, False],
"xcursor": [True, False],
"xinerama": [True, False],
"xinput": [True, False],
"xrandr": [True, False],
"xscrnsaver": [True, False],
"xshape": [True, False],
"xvm": [True, False],
"wayland": [True, False],
"directfb": [True, False],
"iconv": [True, False],
"video_rpi": [True, False],
"sdl2main": [True, False],
"opengl": [True, False],
"opengles": [True, False],
"vulkan": [True, False],
"libunwind": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"directx": True,
"alsa": True,
"jack": True,
"pulse": True,
"sndio": False,
"nas": True,
"esd": False,
"arts": False,
"x11": True,
"xcursor": True,
"xinerama": True,
"xinput": True,
"xrandr": True,
"xscrnsaver": True,
"xshape": True,
"xvm": True,
"wayland": False,
"directfb": False,
"iconv": True,
"video_rpi": False,
"sdl2main": True,
"opengl": True,
"opengles": True,
"vulkan": True,
"libunwind": True,
}
_source_subfolder = "source_subfolder"
_build_subfolder = "build_subfolder"
_cmake = None
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
if self.settings.os != "Linux":
del self.options.alsa
del self.options.jack
del self.options.pulse
del self.options.sndio
del self.options.nas
del self.options.esd
del self.options.arts
del self.options.x11
del self.options.xcursor
del self.options.xinerama
del self.options.xinput
del self.options.xrandr
del self.options.xscrnsaver
del self.options.xshape
del self.options.xvm
del self.options.wayland
del self.options.directfb
del self.options.video_rpi
del self.options.libunwind
if self.settings.os != "Windows":
del self.options.directx
def configure(self):
if self.options.shared:
del self.options.fPIC
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
if self.settings.os == "Macos" and not self.options.iconv:
raise ConanInvalidConfiguration("On macOS iconv can't be disabled")
def requirements(self):
if self.options.iconv:
self.requires("libiconv/1.16")
if self.settings.os == "Linux":
self.requires("xorg/system")
if self.options.alsa:
self.requires("libalsa/1.2.4")
if self.options.pulse:
self.requires("pulseaudio/13.0")
if self.options.opengl:
self.requires("opengl/system")
if self.options.get_safe("libunwind", False):
self.requires("libunwind/1.5.0")
def package_id(self):
del self.info.options.sdl2main
def build_requirements(self):
if self.settings.os == "Linux":
self.build_requires("pkgconf/1.7.3")
def system_requirements(self):
if self.settings.os == "Linux" and tools.os_info.is_linux:
if tools.os_info.with_apt or tools.os_info.with_yum:
installer = tools.SystemPackageTool()
packages = []
packages_apt = []
packages_yum = []
packages_apt.append("libgbm-dev")
packages_yum.append("mesa-libgbm-devel")
if self.options.jack:
packages_apt.append("libjack-dev")
packages_yum.append("jack-audio-connection-kit-devel")
if self.options.sndio:
packages_apt.append("libsndio-dev")
if self.options.nas:
packages_apt.append("libaudio-dev")
packages_yum.append("nas-devel")
if self.options.esd:
packages_apt.append("libesd0-dev")
packages_yum.append("esound-devel")
if self.options.arts:
packages_apt.append("artsc0-dev")
if self.options.wayland:
packages_apt.extend(["libwayland-dev",
"wayland-protocols"])
packages_yum.extend(["wayland-devel",
"wayland-protocols-devel"])
if self.options.directfb:
packages_apt.append("libdirectfb-dev")
if tools.os_info.with_apt:
packages = packages_apt
elif tools.os_info.with_yum:
packages = packages_yum
for package in packages:
installer.install(package)
def source(self):
tools.get(**self.conan_data["sources"][self.version], strip_root=True, destination=self._source_subfolder)
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
if tools.Version(self.version) >= "2.0.14":
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeLists.txt"),
'check_library_exists(c iconv_open "" HAVE_BUILTIN_ICONV)',
'# check_library_exists(c iconv_open "" HAVE_BUILTIN_ICONV)')
self._build_cmake()
def _check_pkg_config(self, option, package_name):
if option:
pkg_config = tools.PkgConfig(package_name)
if not pkg_config.provides:
raise ConanInvalidConfiguration("package %s is not available" % package_name)
def _check_dependencies(self):
if self.settings.os == "Linux":
self._check_pkg_config(self.options.jack, "jack")
self._check_pkg_config(self.options.esd, "esound")
self._check_pkg_config(self.options.wayland, "wayland-client")
self._check_pkg_config(self.options.wayland, "wayland-protocols")
self._check_pkg_config(self.options.directfb, "directfb")
def _configure_cmake(self):
if not self._cmake:
self._check_dependencies()
self._cmake = CMake(self)
# FIXME: self.install_folder not defined? Neccessary?
self._cmake.definitions["CONAN_INSTALL_FOLDER"] = self.install_folder
if self.settings.os != "Windows":
if not self.options.shared:
self._cmake.definitions["SDL_STATIC_PIC"] = self.options.fPIC
if self.settings.compiler == "Visual Studio" and not self.options.shared:
self._cmake.definitions["HAVE_LIBC"] = True
self._cmake.definitions["SDL_SHARED"] = self.options.shared
self._cmake.definitions["SDL_STATIC"] = not self.options.shared
self._cmake.definitions["VIDEO_OPENGL"] = self.options.opengl
self._cmake.definitions["VIDEO_OPENGLES"] = self.options.opengles
self._cmake.definitions["VIDEO_VULKAN"] = self.options.vulkan
if self.settings.os == "Linux":
# See https://github.com/bincrafters/community/issues/696
self._cmake.definitions["SDL_VIDEO_DRIVER_X11_SUPPORTS_GENERIC_EVENTS"] = 1
self._cmake.definitions["ALSA"] = self.options.alsa
if self.options.alsa:
self._cmake.definitions["HAVE_ASOUNDLIB_H"] = True
self._cmake.definitions["HAVE_LIBASOUND"] = True
self._cmake.definitions["JACK"] = self.options.jack
self._cmake.definitions["PULSEAUDIO"] = self.options.pulse
self._cmake.definitions["SNDIO"] = self.options.sndio
self._cmake.definitions["NAS"] = self.options.nas
self._cmake.definitions["VIDEO_X11"] = self.options.x11
if self.options.x11:
self._cmake.definitions["HAVE_XEXT_H"] = True
self._cmake.definitions["VIDEO_X11_XCURSOR"] = self.options.xcursor
if self.options.xcursor:
self._cmake.definitions["HAVE_XCURSOR_H"] = True
self._cmake.definitions["VIDEO_X11_XINERAMA"] = self.options.xinerama
if self.options.xinerama:
self._cmake.definitions["HAVE_XINERAMA_H"] = True
self._cmake.definitions["VIDEO_X11_XINPUT"] = self.options.xinput
if self.options.xinput:
self._cmake.definitions["HAVE_XINPUT_H"] = True
self._cmake.definitions["VIDEO_X11_XRANDR"] = self.options.xrandr
if self.options.xrandr:
self._cmake.definitions["HAVE_XRANDR_H"] = True
self._cmake.definitions["VIDEO_X11_XSCRNSAVER"] = self.options.xscrnsaver
if self.options.xscrnsaver:
self._cmake.definitions["HAVE_XSS_H"] = True
self._cmake.definitions["VIDEO_X11_XSHAPE"] = self.options.xshape
if self.options.xshape:
self._cmake.definitions["HAVE_XSHAPE_H"] = True
self._cmake.definitions["VIDEO_X11_XVM"] = self.options.xvm
if self.options.xvm:
self._cmake.definitions["HAVE_XF86VM_H"] = True
self._cmake.definitions["VIDEO_WAYLAND"] = self.options.wayland
self._cmake.definitions["VIDEO_DIRECTFB"] = self.options.directfb
self._cmake.definitions["VIDEO_RPI"] = self.options.video_rpi
elif self.settings.os == "Windows":
self._cmake.definitions["DIRECTX"] = self.options.directx
self._cmake.definitions["HAVE_LIBUNWIND_H"] = self.options.get_safe("libunwind")
self._cmake.configure(build_dir=self._build_subfolder)
return self._cmake
def _build_cmake(self):
if self.options.get_safe("pulse"):
tools.rename("libpulse.pc", "libpulse-simple.pc")
lib_paths = [lib for dep in self.deps_cpp_info.deps for lib in self.deps_cpp_info[dep].lib_paths]
with tools.environment_append({"LIBRARY_PATH": os.pathsep.join(lib_paths)}):
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy(pattern="COPYING.txt", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
tools.remove_files_by_mask(os.path.join(self.package_folder, "bin"), "sdl2-config")
tools.rmdir(os.path.join(self.package_folder, "cmake"))
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "libdata"))
tools.rmdir(os.path.join(self.package_folder, "share"))
def _add_libraries_from_pc(self, library, static=None):
if static is None:
static = not self.options.shared
pkg_config = tools.PkgConfig(library, static=static)
libs = [lib[2:] for lib in pkg_config.libs_only_l] # cut -l prefix
lib_paths = [lib[2:] for lib in pkg_config.libs_only_L] # cut -L prefix
self.cpp_info.components["libsdl2"].system_libs.extend(libs)
self.cpp_info.components["libsdl2"].libdirs.extend(lib_paths)
self.cpp_info.components["libsdl2"].sharedlinkflags.extend(pkg_config.libs_only_other)
self.cpp_info.components["libsdl2"].exelinkflags.extend(pkg_config.libs_only_other)
def package_info(self):
self.cpp_info.names["cmake_find_package"] = "SDL2"
self.cpp_info.names["cmake_find_package_multi"] = "SDL2"
postfix = "d" if self.settings.build_type == "Debug" else ""
# SDL2
sdl2_cmake_target = "SDL2" if self.options.shared else "SDL2-static"
self.cpp_info.components["libsdl2"].names["cmake_find_package"] = sdl2_cmake_target
self.cpp_info.components["libsdl2"].names["cmake_find_package_multi"] = sdl2_cmake_target
self.cpp_info.components["libsdl2"].includedirs.append(os.path.join("include", "SDL2"))
self.cpp_info.components["libsdl2"].libs = ["SDL2" + postfix]
if self.options.iconv:
self.cpp_info.components["libsdl2"].requires.append("libiconv::libiconv")
if self.settings.os == "Linux":
self.cpp_info.components["libsdl2"].system_libs = ["dl", "rt", "pthread"]
self.cpp_info.components["libsdl2"].requires.append("xorg::xorg")
if self.options.alsa:
self.cpp_info.components["libsdl2"].requires.append("libalsa::libalsa")
if self.options.pulse:
self.cpp_info.components["libsdl2"].requires.append("pulseaudio::pulseaudio")
if self.options.opengl:
self.cpp_info.components["libsdl2"].requires.append("opengl::opengl")
if self.options.jack:
self._add_libraries_from_pc("jack")
if self.options.sndio:
self._add_libraries_from_pc("sndio")
if self.options.nas:
self.cpp_info.components["libsdl2"].system_libs.append("audio")
if self.options.esd:
self._add_libraries_from_pc("esound")
if self.options.directfb:
self._add_libraries_from_pc("directfb")
if self.options.video_rpi:
self.cpp_info.components["libsdl2"].system_libs.append("bcm_host")
self.cpp_info.components["libsdl2"].includedirs.extend([
"/opt/vc/include",
"/opt/vc/include/interface/vcos/pthreads",
"/opt/vc/include/interface/vmcs_host/linux"
])
self.cpp_info.components["libsdl2"].libdirs.append("/opt/vc/lib")
self.cpp_info.components["libsdl2"].sharedlinkflags.append("-Wl,-rpath,/opt/vc/lib")
self.cpp_info.components["libsdl2"].exelinkflags.append("-Wl,-rpath,/opt/vc/lib")
elif self.settings.os == "Macos":
self.cpp_info.components["libsdl2"].frameworks = ["Cocoa", "Carbon", "IOKit", "CoreVideo", "CoreAudio", "AudioToolbox", "ForceFeedback"]
if tools.Version(self.version) >= "2.0.14":
self.cpp_info.components["libsdl2"].frameworks.append("Metal")
elif self.settings.os == "Windows":
self.cpp_info.components["libsdl2"].system_libs = ["user32", "gdi32", "winmm", "imm32", "ole32", "oleaut32", "version", "uuid", "advapi32", "setupapi", "shell32"]
if self.settings.compiler == "gcc":
self.cpp_info.components["libsdl2"].system_libs.append("mingw32")
if self.options.get_safe("libunwind"):
self.cpp_info.components["libsdl2"].requires.append("libunwind::libunwind")
# SDL2main
if self.options.sdl2main:
self.cpp_info.components["sdl2main"].names["cmake_find_package"] = "SDL2main"
self.cpp_info.components["sdl2main"].names["cmake_find_package_multi"] = "SDL2main"
self.cpp_info.components["sdl2main"].libs = ["SDL2main" + postfix]
self.cpp_info.components["sdl2main"].requires = ["libsdl2"]
| recipes/sdl2/all/conanfile.py | 16,556 | TODO: When porting to CCI rename this package to SDL (without 2) FIXME: self.install_folder not defined? Neccessary? See https://github.com/bincrafters/community/issues/696 cut -l prefix cut -L prefix SDL2 SDL2main | 214 | en | 0.741069 |
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from sqlalchemy.ext.declarative import declared_attr
from indico.core.db.sqlalchemy import db
from indico.core.db.sqlalchemy.util.models import auto_table_args
from indico.core.settings.models.base import JSONSettingsBase, PrincipalSettingsBase
from indico.util.decorators import strict_classproperty
from indico.util.string import return_ascii
class CoreSettingsMixin(object):
@strict_classproperty
@staticmethod
def __auto_table_args():
return (db.Index(None, 'module', 'name'),
{'schema': 'indico'})
class Setting(JSONSettingsBase, CoreSettingsMixin, db.Model):
@strict_classproperty
@staticmethod
def __auto_table_args():
return db.UniqueConstraint('module', 'name'),
@declared_attr
def __table_args__(cls):
return auto_table_args(cls)
@return_ascii
def __repr__(self):
return '<Setting({}, {}, {!r})>'.format(self.module, self.name, self.value)
class SettingPrincipal(PrincipalSettingsBase, CoreSettingsMixin, db.Model):
principal_backref_name = 'in_settings_acls'
@declared_attr
def __table_args__(cls):
return auto_table_args(cls)
@return_ascii
def __repr__(self):
return '<SettingPrincipal({}, {}, {!r})>'.format(self.module, self.name, self.principal)
| indico/core/settings/models/settings.py | 1,551 | This file is part of Indico. Copyright (C) 2002 - 2021 CERN Indico is free software; you can redistribute it and/or modify it under the terms of the MIT License; see the LICENSE file for more details. | 200 | en | 0.789267 |
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
train_data = np.load("E:\\quant_research\\train the rank of ten points\\RNN_point\\data\\train_data_10num.npy")
train_aim = np.load("E:\\quant_research\\train the rank of ten points\\RNN_point\\data\\train_label_10num.npy")
train_data = train_data.reshape(train_data.shape[0],10,1)
train_data = train_data.swapaxes(0, 1)
train_data = torch.from_numpy(train_data).type(torch.FloatTensor)
train_aim = torch.from_numpy(train_aim).type(torch.FloatTensor)
test_data = np.load("E:\\quant_research\\train the rank of ten points\\RNN_point\\data\\test_data_10num.npy")
test_aim = np.load("E:\\quant_research\\train the rank of ten points\\RNN_point\\data\\test_label_10num.npy")
test_data = test_data.reshape(test_data.shape[0],10,1)
test_data = test_data.swapaxes(0, 1)
test_data = torch.from_numpy(test_data).type(torch.FloatTensor)
test_aim = torch.from_numpy(test_aim).type(torch.FloatTensor)
class Encoder(nn.Module):
def __init__(self, input_size, hidden_size, batch_size, bidirectional=True):
super(Encoder, self).__init__()
self.hidden_size = hidden_size
self.input_size = input_size
self.batch_size = batch_size
self.bidirectional = bidirectional
self.lstm = nn.LSTM(input_size, hidden_size, batch_first=False, bidirectional=bidirectional)
def forward(self, inputs, hidden):
output, hidden = self.lstm(inputs, hidden)
return output, hidden
def init_hidden(self):
return (torch.zeros(1 + int(self.bidirectional), self.batch_size, self.hidden_size),
torch.zeros(1 + int(self.bidirectional), self.batch_size, self.hidden_size)) #(num_layers * num_directions, batch, hidden_size)
class AttentionDecoder(nn.Module):
def __init__(self, hidden_size, output_size, batch_size, vocab_size,seq_len):
super(AttentionDecoder, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.batch_size = batch_size
self.seq_len = seq_len
self.vocab_size = vocab_size
self.attn = nn.Linear(hidden_size + output_size + vocab_size, 1)
self.lstm = nn.LSTM(hidden_size + vocab_size, output_size)
self.final = nn.Linear(output_size, vocab_size)
def init_hidden(self):
return (torch.zeros(1, self.batch_size, self.output_size),
torch.zeros(1, self.batch_size, self.output_size))
def forward(self, decoder_hidden, encoder_outputs, input):
seq = 0
weights= []
i = 0
output = torch.zeros(self.batch_size, self.vocab_size)
for i in range(len(encoder_outputs)):
weights.append(self.attn(torch.cat((decoder_hidden[0][:].squeeze(0),encoder_outputs[i],output), dim=1)))
normalized_weight = F.softmax(torch.cat(weights, 1), 1)
normalized_weights = normalized_weight
attn_applied = torch.bmm(normalized_weight.unsqueeze(1),
encoder_outputs.transpose(0,1))
input_lstm = torch.cat((attn_applied.transpose(0,1)[0], output),
dim=1) # if we are using embedding, use embedding of input here instead
output_, hidden = self.lstm(input_lstm.unsqueeze(0), decoder_hidden)
output = self.final(output_[0]) #output 为(vocab_size, output_size)
#output = self.final2(output)
# hidden0 = hidden[0].transpose(0, 1).reshape(batch_size, 1, -1).transpose(0, 1)
# hidden1 = hidden[1].transpose(0, 1).reshape(batch_size, 1, -1).transpose(0, 1)
# decoder_hidden = (hidden0, hidden1)
# decoder_hiddens = decoder_hidden
out = F.softmax(output,1)
return out
seq_len = 10
input_size = 1
hidden_size = 2
batch_size = train_data.shape[1]
bidirectional = True
output_size = hidden_size * (1 + bidirectional)
vocal_size = 10
input = []
for i in range(10):
m = np.ones((10000,10))*i
input.append(m)
input = np.array(input)
input = torch.from_numpy(input).type(torch.FloatTensor)
class pointer_atten(nn.Module):
def __init__(self):
super(pointer_atten, self).__init__()
self.layer1 = Encoder(input_size = input_size,
hidden_size = hidden_size,
batch_size = batch_size,
bidirectional=True)
self.layer2 = AttentionDecoder(
hidden_size = hidden_size * (1 + bidirectional),
output_size = output_size,
batch_size = batch_size,
vocab_size = vocal_size,
seq_len = 1
)
def forward(self,x):
output, hidden = self.layer1.forward(x, self.layer1.init_hidden())
hidden0 = hidden[0].transpose(0, 1).reshape(batch_size, 1, -1).transpose(0, 1)
hidden1 = hidden[1].transpose(0, 1).reshape(batch_size, 1, -1).transpose(0, 1)
decoder_hidden = (hidden0, hidden1)
encoder_outputs = output
last_output = self.layer2.forward(decoder_hidden, output, input)
return last_output
Net = pointer_atten()
learning_rate = 0.05
Loss = nn.MSELoss(reduction='mean')
optimizer = torch.optim.Adam(Net.parameters(), lr=learning_rate)
###########################################
# train
###########################################
loss_list = []
True_list = []
num_epochs = 10000
epoch = 10000
batch = train_aim.detach().numpy().size
Net.load_state_dict(torch.load('E:\\quant_research\\train the rank of ten points\\RNN_point\\net_10num\\net720.pkl'))
for epoch in range(1000):
train_data = Variable(train_data,requires_grad=True)
train_aim = Variable(train_aim,requires_grad=True)
# Forward pass
outputs = Net(train_data)
loss = Loss(outputs, train_aim)
loss_list.append(loss)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch) % 10 == 0:
print ('Epoch [{}/{}], Loss: {:.4f}'
.format(epoch+1,num_epochs,loss.item()))
is_not = outputs.detach().numpy() - train_aim.detach().numpy()
is_not = np.where(is_not < -0.1, 10, is_not)
is_not = np.where(is_not < 0.1, 1, 0)
T_pre = np.nansum(is_not)
True_rate = T_pre / batch
True_list.append(True_rate)
print('accuracy of prediction in training data:', True_rate)
if epoch % 10 ==0:
torch.save(Net.state_dict(), 'E:\\quant_research\\train the rank of ten points\\\RNN_point\\net_10num\\net{}.pkl'.format(epoch))
loss_array = np.array(loss_list)
true_array = np.array(True_list)
np.save('E:\\quant_research\\train the rank of ten points\\\RNN_point\\loss',loss_array)
np.save('E:\\quant_research\\train the rank of ten points\\\RNN_point\\true',true_array)
loss_array = np.load('E:\\quant_research\\train the rank of ten points\\\RNN_point\\loss.npy',allow_pickle=True)
true_array = np.load('E:\\quant_research\\train the rank of ten points\\\RNN_point\\true.npy')
outputs = Net(train_data)
loss = Loss(outputs, train_aim)
label = np.argmax(outputs.detach().numpy(),axis = 1)
label_aim = np.argmax(train_aim.detach().numpy(),axis = 1)
True_rate = np.sum(label == label_aim) / 10000
print('loss in testing data:%.5f,accuracy of prediction in testing data:%.5f'%(loss,True_rate))
outputs = Net(test_data)
loss = Loss(outputs, test_aim)
label = np.argmax(outputs.detach().numpy(),axis = 1)
label_aim = np.argmax(test_aim.detach().numpy(),axis = 1)
True_rate = np.sum(label == label_aim) / 10000
print('loss in training data:%.5f,accuracy of prediction in training data:%.5f'%(loss,True_rate))
| pointer_network.py | 7,979 | (num_layers * num_directions, batch, hidden_size) if we are using embedding, use embedding of input here insteadoutput 为(vocab_size, output_size)output = self.final2(output) hidden0 = hidden[0].transpose(0, 1).reshape(batch_size, 1, -1).transpose(0, 1) hidden1 = hidden[1].transpose(0, 1).reshape(batch_size, 1, -1).transpose(0, 1) decoder_hidden = (hidden0, hidden1) decoder_hiddens = decoder_hidden train Forward pass Backward and optimize | 441 | en | 0.49538 |
from projections import *
from urllib2 import urlopen
from httplib import HTTPConnection
from threading import Thread
from kivy.logger import Logger
from kivy.loader import Loader
from os.path import join, dirname
import time, os
import hashlib
GMLNS = "http://www.opengis.net/gml"
try:
from pyproj import Proj
from lxml.etree import ElementTree as ET
except:
# try:
from xml.etree import ElementTree as ET
# except:
# pass
class WFSOverlayServer(object):
cache = {}
available_maptype = dict(roadmap = 'Roadmap') # default
type = "wfs" # TODO: replace handling in mapviewer with action handlers in the overlay class
def __init__(self, progress_callback=None):
self.progress_callback = progress_callback
def setProgressCallback(self, progress_callback):
self.progress_callback = progress_callback
def load(self, url):
# read from internet
blocksize = 4096
self.progress_callback(0)
fd = urlopen(url)
idata = fd.read(blocksize)
loaded = blocksize
while True:
bdata = fd.read(blocksize)
if not bdata: break
loaded += blocksize
if self.progress_callback:
self.progress_callback(loaded)
idata += bdata
fd.close()
self.progress_callback(-1)
return idata
def findGeometry(self, elem):
geoms = elem.find("{%s}Point" % GMLNS)
if geoms is not None:
return geoms
geoms = elem.find("{%s}LinearRing" % GMLNS)
if geoms is not None:
return geoms
for c in elem.getchildren():
geom = self.findGeometry(c)
if geom is not None:
return geom
def findGeometries(self, members):
geoms = []
for m in members:
geom = self.findGeometry(m)
if geom is not None:
geoms.append(geom)
return geoms
def get(self, parent, width, height):
self.bl = parent.bottom_left
self.tr = parent.top_right
self.zoom = parent.zoom
url = self.geturl(self.bl[0], self.bl[1], self.tr[0], self.tr[1])
if not url:
return None
key = hashlib.md5(url).hexdigest()
if key in self.cache:
return self.cache[key]
try:
xml = self.load('http://' + self.provider_host + url)
tree = ET.fromstring(xml)
members = tree.findall("{%s}featureMember" % GMLNS)
self.geometries = self.findGeometries(members)
self.cache[key] = self.geometries
return self.geometries
except Exception,e:
Logger.error('OverlayServer could not find (or read) WFS from %s [%s]' % (url, e))
image = None
def getInfoText(self, member):
fields = member.getchildren()[0].getchildren()
info = ""
for field in fields:
if field.text is not None and field.text.strip() != "":
info += "%s: %s\n" % (field.tag[field.tag.index("}")+1:], field.text)
return info
def getInfo(self, lat, lon, epsilon):
try:
url = self.geturl(lat-epsilon, lon-epsilon, lat+epsilon, lon+epsilon)
except:
return None
try:
xml = self.load('http://' + self.provider_host + url)
tree = ET.fromstring(xml)
member = tree.find("{%s}featureMember" % GMLNS)
if member is not None:
infotext = self.getInfoText(member)
return infotext
except Exception,e:
Logger.error('OverlayServer could not find (or read) WFS from %s [%s]' % (url, e))
return None
def xy_to_co(self, lat, lon):
if self.customBounds:
x, y = latlon_to_custom(lat, lon, self.bounds)
elif self.isPLatLon: # patch for android - does not require pyproj library
x, y = lon, lat
elif self.isPGoogle: # patch for android - does not require pyproj library
x, y = latlon_to_google (lat, lon)
else:
x, y = transform(pLatlon, self.projection, lon, lat)
return x,y
def co_to_ll(self,x,y):
if self.customBounds:
l, m = custom_to_latlon(x, y, self.bounds)
elif self.isPLatLon: # patch for android - does not require pyproj library
l, m = y, x
elif self.isPGoogle: # patch for android - does not require pyproj library
l, m = google_to_latlon (y, x)
else:
l, m = transform(self.projection, pLatlon, y, x)
return l, m
def geturl(self, lat1, lon1, lat2, lon2):
try:
x1, y1 = self.xy_to_co(lat1, lon1)
x2, y2 = self.xy_to_co(lat2, lon2)
return self.url + "&bbox=%f,%f,%f,%f" % (x1, y1, x2, y2)
except RuntimeError, e:
return None
def parseFeature(self, feature, data):
try:
name = feature.find("Name").text
title = feature.find("Title").text
except:
name = None
title = None
srss = feature.findall("DefaultSRS")
if name:# and srss:
data[name] = map(lambda x:x.text, srss)
if self.debug:
print "Provider %s provides feature %s in projections %s" % (self.provider_host, name, data[name])
def initFromGetCapabilities(self, host, baseurl, feature = None, index = 0, srs = None):
self.debug = (feature == None) and (index == 0)
# GetCapabilities (Features + SRS)
capabilities = urlopen(host + baseurl + "?SERVICE=WFS&Request=GetCapabilities").read().strip()
try:
tree = ET.fromstring(capabilities)
if self.debug:
ET.dump(tree)
features = tree.findall("FeatureType") #TODO: proper parsing of cascading layers and their SRS
data = {}
for f in features:
self.parseFeature(f, data)
# Choose Feature and SRS by (alphabetical) index
if feature is None:
feature = sorted(data.keys())[index]
if srs is None:
srs = sorted(data[feature])[0]
except:
pass
print "Displaying from %s/%s: feature %s in SRS %s." % (host, baseurl, feature, srs)
# generate tile URL and init projection by EPSG code
self.feature = feature
self.url = baseurl + "?typeName=namespace:%s&SERVICE=WFS&VERSION=1.1.0&REQUEST=GetFeature&maxFeatures=50" % (feature)
self.isPGoogle = False
self.isPLatLon = False
if srs=="EPSG:4326":
self.isPLatLon = True
elif srs=="EPSG:900913" or srs == "EPSG:3857":
self.isPGoogle = True
try:
self.projection = pGoogle
except:
pass
else:
try:
self.projection = Proj(init=srs)
except:
pass | WFSOverlayServer.py | 6,795 | try: except: pass default TODO: replace handling in mapviewer with action handlers in the overlay class read from internet patch for android - does not require pyproj library patch for android - does not require pyproj library patch for android - does not require pyproj library patch for android - does not require pyproj library and srss: GetCapabilities (Features + SRS)TODO: proper parsing of cascading layers and their SRS Choose Feature and SRS by (alphabetical) index generate tile URL and init projection by EPSG code | 536 | en | 0.777273 |
'''
Created on 2012/09/03
@author: amake
'''
from __future__ import print_function
import os
import sys
import urllib
import codecs
from datetime import datetime
from xml.etree import ElementTree
import putio
CACHE_FILE = "cache.txt"
FEEDS_FILE = "feeds.txt"
DEBUG = True
PUTIOAPI = None
# Stupid CloudFlare decided to block "non-standard" browsers.
# Spoofing the user-agent gets around it.
class CustomURLopener(urllib.FancyURLopener):
version = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_5) '
'AppleWebKit/536.26.17 (KHTML like Gecko) Version/6.0.2 Safari/536.26.17'
urllib._urlopener = CustomURLopener()
def log(message):
if DEBUG:
print(message.encode('utf-8'))
class feedputter():
'''
Grab torrent files from an RSS feed.
'''
def __init__(self, feed):
'''
Constructor
'''
self.feed = feed
self.cache = []
if os.path.isfile(CACHE_FILE):
self.cache = [line.strip() for line in codecs.open(
CACHE_FILE, 'r', 'utf-8').readlines()]
def __get_items(self):
log("Fetching feed from: %s" % self.feed)
data = urllib.urlopen(self.feed).read()
tree = ElementTree.fromstring(data)
return tree.findall(".//item")
def save_torrent(self, link, target, title):
torrent = urllib.urlopen(link)
if (torrent.getcode() != 200):
log("Error " + torrent.getcode())
return False
with open(os.path.join(target, title + ".torrent"), "w") as out:
out.write(torrent.read())
return True
def putio(self, link, target, title):
api = putio.get_api(target_folder=target)
try:
api.add(link, putio.CALLBACK_URL + '?amk_type=tv')
except Exception as e:
print(e)
print('Skipping.')
return False
return True
def get_to(self, target, method):
'''
Fetch linked torrents and save to the specified output folder.
'''
for item in self.__get_items():
title = item.find('title').text.strip()
link = item.find('link').text
log("Found " + title)
if title in self.cache:
log("Already gotten. Skipping.")
continue
log("Getting ... ")
if not method(link, target, title):
continue
with codecs.open(CACHE_FILE, "a", "utf-8") as tmp:
tmp.write(title + "\n")
log("Done")
def usage():
print('Usage: {0} TARGET_DIR'.format(os.path.basename(__file__)))
def main():
if len(sys.argv) < 2:
usage()
sys.exit(1)
if not os.path.isdir(sys.argv[1]):
print('Directory not found or not a directory:', sys.argv[1])
print()
usage()
sys.exit(1)
os.chdir(os.path.dirname(__file__))
feeds = [line.strip() for line in open(FEEDS_FILE).readlines()]
log(datetime.now().isoformat(" ") +
" Starting feedputter with {0} feeds".format(len(feeds)))
for feed in feeds:
getter = feedputter(feed)
getter.get_to(sys.argv[1], getter.putio)
log(datetime.now().isoformat(" ") + " Finished feedputter")
if __name__ == "__main__":
main()
| feedputter.py | 3,311 | Grab torrent files from an RSS feed.
Constructor
Fetch linked torrents and save to the specified output folder.
Created on 2012/09/03
@author: amake
Stupid CloudFlare decided to block "non-standard" browsers. Spoofing the user-agent gets around it. | 251 | en | 0.804123 |
# -*- coding: utf-8 -*-
from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from .models import OembedVideoPlugin, OembedRichPlugin
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
class CMSOembedVideoPlugin(CMSPluginBase):
name = _('Video (embedded)')
model = OembedVideoPlugin
render_template = 'djangocms_oembed/plugins/video.html'
admin_preview = False
text_enabled = True
fieldsets = (
(None, {'fields': ('oembed_url', ('width', 'height',), 'autoplay', 'loop', 'show_related',)}),
('advanced', {'fields': ('type', 'provider', 'html', 'data'), 'classes': ['collapse']}),
)
readonly_fields = ('type', 'provider', 'html', 'data',)
def icon_src(self, instance):
return settings.STATIC_URL + u"cms/images/plugins/snippet.png"
plugin_pool.register_plugin(CMSOembedVideoPlugin)
class CMSOembedRichPlugin(CMSPluginBase):
name = _('Rich Content (embedded)')
model = OembedRichPlugin
render_template = 'djangocms_oembed/plugins/rich.html'
admin_preview = False
text_enabled = True
fieldsets = (
(None, {'fields': ('oembed_url',)}),
('advanced', {'fields': ('type', 'provider', 'html', 'data'), 'classes': ['collapse']}),
)
readonly_fields = ('type', 'provider', 'html', 'data',)
def icon_src(self, instance):
return settings.STATIC_URL + u"cms/images/plugins/snippet.png"
plugin_pool.register_plugin(CMSOembedRichPlugin)
| djangocms_oembed/cms_plugins.py | 1,514 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
from typing import List
import json
import hashlib
from time import time
from base64 import b64decode, b64encode
import ecdsa
from config import ECDSA_CURVE
from .constants import BLOCK_COUNT_FREEZE_WALLET_LOTTERY_AFTER_WIN, DEVELOPER_KEY
from .transaction import Transaction
from .exceptions import (
ValidationError,
NonLotteryMemberError,
WalletLotteryFreezeError,
GenesisIsNotValidError,
NonSequentialBlockIndexError,
NonMatchingHashError
)
class Block:
def __init__(
self,
index,
previous_hash,
timestamp=None,
forger=None,
transactions: List[Transaction] = None,
signature=None,
**kwargs,
):
"""
Create block
:param index: the block index at the chain (0 for the genesis block and so on)
:param previous_hash: hash of previous block
:param timestamp: block creation time
:param forger: public_address of forger wallet
:param transactions: list of transactions
:param signature: signature of the block hash by the forger
"""
if timestamp is None:
timestamp = time()
if transactions is None:
transactions = []
self.index = index
self.previous_hash = previous_hash
self.timestamp = timestamp
self.forger = forger
self.transactions = transactions
self.signature = signature
@property
def forger_public_key(self) -> ecdsa.VerifyingKey:
forger_public_key_string = bytes.fromhex(self.forger)
return ecdsa.VerifyingKey.from_string(forger_public_key_string, curve=ECDSA_CURVE)
def _raw_data(self):
return {
"index": self.index,
"timestamp": self.timestamp,
"transactions": sorted([
transaction.to_dict() for transaction in self.transactions
], key=lambda t: t["nonce"]),
"previous_hash": self.previous_hash,
"forger": self.forger,
}
def hash(self):
"""
Calculate the block hash (block number, previous hash, transactions)
:return: String hash of block data (hex)
"""
block_dict = self._raw_data()
# We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes
block_string = json.dumps(block_dict, sort_keys=True).encode()
return hashlib.sha256(block_string).hexdigest()
def to_dict(self):
return {
**self._raw_data(),
"hash": self.hash(),
"signature": b64encode(self.signature).decode(),
}
def add_transaction(self, transaction: Transaction):
"""
Add transaction to block
:param transaction: Transaction object (see transaction.py)
:raise Validation error if transaction isn't valid.
:return: None
"""
self.transactions.append(transaction)
def is_signature_verified(self) -> bool:
"""
Check if block signature is valid
:return: bool
"""
try:
return self.forger_public_key.verify(self.signature, self.hash().encode())
except ecdsa.BadSignatureError:
return False
def create_signature(self, forger_private_address: str):
"""
Create block signature for this block
:param forger_private_address: base64(wallet private address)
:return: None
"""
forger_private_key_string = bytes.fromhex(forger_private_address)
forger_private_key = ecdsa.SigningKey.from_string(forger_private_key_string, curve=ECDSA_CURVE)
if forger_private_key.get_verifying_key() != self.forger_public_key:
raise ValueError("The forger is not the one signing")
self.signature = self.sign(forger_private_key)
def sign(self, forger_private_key: ecdsa.SigningKey):
return forger_private_key.sign(self.hash().encode())
def validate(self, blockchain_state, is_test_net=False):
"""
Validate block
1. check block index (is the next block in the blockchain state)
2. check previous hash (is the hash of the previous block)
3. check forger wallet (is lottery member?)
4. check block signature
5. validate transactions
:param is_test_net: if True ignore InsufficientBalanceError and NonLotteryMemberError
:param blockchain_state: Blockchain state object
:raises ValidationError
:return: None
"""
if self.index == 0 and blockchain_state.length == 0:
genesis_is_valid = self.forger == DEVELOPER_KEY and self.is_signature_verified()
if not genesis_is_valid:
raise GenesisIsNotValidError()
return
# TODO: check in production if hash if equal to hard coded hash
if self.index != blockchain_state.length:
raise NonSequentialBlockIndexError(
f"block index not sequential index: {self.index} chain: {blockchain_state.length}"
)
if self.previous_hash != blockchain_state.last_block_hash:
raise NonMatchingHashError("previous hash not match previous block hash")
forger_wallet = blockchain_state.wallets.get(self.forger, None)
if forger_wallet is None or forger_wallet.balance < 100:
if not is_test_net:
raise NonLotteryMemberError()
if not self.is_signature_verified():
raise ValidationError("invalid signature")
for transaction in self.transactions:
transaction.validate(
blockchain_state=blockchain_state, is_test_net=is_test_net
) # raises ValidationError
# TODO: Add timestamp validation
@classmethod
def from_dict(
cls,
index: int,
previous_hash,
forger,
transactions: dict,
signature: str,
**kwargs,
):
transactions = list(map(lambda t: Transaction.from_dict(**t), transactions))
signature = b64decode(signature.encode())
return cls(
index=index,
previous_hash=previous_hash,
forger=forger,
transactions=transactions,
signature=signature,
**kwargs,
)
def __getitem__(self, item):
return getattr(self, item)
| src/blockchain/block.py | 6,390 | Create block
:param index: the block index at the chain (0 for the genesis block and so on)
:param previous_hash: hash of previous block
:param timestamp: block creation time
:param forger: public_address of forger wallet
:param transactions: list of transactions
:param signature: signature of the block hash by the forger
Add transaction to block
:param transaction: Transaction object (see transaction.py)
:raise Validation error if transaction isn't valid.
:return: None
Create block signature for this block
:param forger_private_address: base64(wallet private address)
:return: None
Calculate the block hash (block number, previous hash, transactions)
:return: String hash of block data (hex)
Check if block signature is valid
:return: bool
Validate block
1. check block index (is the next block in the blockchain state)
2. check previous hash (is the hash of the previous block)
3. check forger wallet (is lottery member?)
4. check block signature
5. validate transactions
:param is_test_net: if True ignore InsufficientBalanceError and NonLotteryMemberError
:param blockchain_state: Blockchain state object
:raises ValidationError
:return: None
We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes TODO: check in production if hash if equal to hard coded hash raises ValidationError TODO: Add timestamp validation | 1,355 | en | 0.669497 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('focus', '0006_auto_20160209_1200'),
]
operations = [
migrations.AlterField(
model_name='remedial',
name='focusRoom',
field=models.ForeignKey(help_text=b'The focusroom that this remedial is assigned to', to='focus.FocusRoom'),
),
]
| focus/migrations/0007_auto_20160209_1201.py | 474 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import fields, models
class HrJobTask(models.Model):
_name = 'hr.job.task'
name = fields.Char(string='Description')
job_id = fields.Many2one(comodel_name='hr.job', string='Job')
categ_id = fields.Many2one(comodel_name='hr.task.categ', string='Category')
| models/hr_job_task.py | 1,264 | -*- coding: utf-8 -*- OpenERP, Open Source Management Solution Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. | 783 | en | 0.889938 |
import numpy as np
from sklearn import preprocessing
class DataTransformation:
"""
A generic class for the transformation of data
"""
def __init__(self):
pass
def transform_X(self, X):
"""
transforms X
:param
X: Input X
:return
transformed X
"""
raise NotImplementedError()
def transform_Y(self, Y):
"""
transforms Y
:param
Y: Input Y
:return
transformed Y
"""
raise NotImplementedError()
def untransform_X(self, X):
"""
Untransforms X to its original values
:param
X: transformed X
:return
untransformed X
"""
raise NotImplementedError()
def untransform_Y(self, Y):
"""
Untransforms Y
:param
Y: transformed Y
:return
untransfomred Y
"""
raise NotImplementedError()
def untransform_Y_var(self, Yvar):
raise NotImplementedError()
def untransform_NLPD(self, NLPD):
"""
Untransfomrs NLPD to the original Y space
:param
NLPD: transfomred NLPD
:return
untransformed NLPD
"""
raise NotImplementedError()
class IdentityTransformation:
"""
Identity transformation. No transformation will be applied to data.
"""
def __init__(self):
pass
def transform_X(self, X):
return X
def transform_Y(self, Y):
return Y
def untransform_X(self, X):
return X
def untransform_Y(self, Y):
return Y
def untransform_Y_var(self, Yvar):
return Yvar
@staticmethod
def get_transformation(Y, X):
return IdentityTransformation()
def untransform_NLPD(self, NLPD):
return NLPD
class MeanTransformation(object, DataTransformation):
"""
Only transforms Y as follows:
transformed Y = untransformed Y - mean(Y)
"""
def __init__(self, mean):
super(MeanTransformation, self).__init__()
self.mean = mean
def transform_X(self, X):
return X
def transform_Y(self, Y):
return Y - self.mean
def untransform_X(self, X):
return X
def untransform_Y(self, Y):
return Y + self.mean
def untransform_Y_var(self, Yvar):
return Yvar
def untransform_NLPD(self, NLPD):
return NLPD
@staticmethod
def get_transformation(Y, X):
return MeanTransformation(Y.mean(axis=0))
class MeanStdYTransformation(object, DataTransformation):
"""
Transforms only Y in a way that the transformed Y has mean = 0 and std =1
"""
def __init__(self, scalar):
super(MeanStdYTransformation, self).__init__()
self.scalar = scalar
def transform_X(self, X):
return X
def transform_Y(self, Y):
return self.scalar.transform(Y)
def untransform_X(self, X):
return X
def untransform_Y(self, Y):
return self.scalar.inverse_transform(Y)
def untransform_Y_var(self, Yvar):
return Yvar
def untransform_NLPD(self, NLPD):
return NLPD + np.hstack((np.array([np.log(self.scalar.std_).sum()]), np.log(self.scalar.std_)))
@staticmethod
def get_transformation(Y, X):
return MeanStdYTransformation(preprocessing.StandardScaler().fit(Y))
class MinTransformation(object, DataTransformation):
"""
Transforms only Y.
transformed Y = (Y - min(Y)) / (max(Y) - min(Y)) - 0.5
"""
def __init__(self, min, max, offset):
super(MinTransformation, self).__init__()
self.min = min
self.max = max
self.offset = offset
def transform_X(self, X):
return X
def transform_Y(self, Y):
return (Y-self.min).astype('float')/(self.max-self.min) - self.offset
def untransform_X(self, X):
return X
def untransform_Y(self, Y):
return (Y+self.offset)*(self.max-self.min) + self.min
def untransform_Y_var(self, Yvar):
return Yvar * (self.max-self.min) ** 2
def untransform_NLPD(self, NLPD):
return NLPD + np.log(self.max - self.min)
@staticmethod
def get_transformation(Y, X):
return MinTransformation(Y.min(), Y.max(), 0.5)
| GP/data_transformation.py | 4,326 | A generic class for the transformation of data
Identity transformation. No transformation will be applied to data.
Transforms only Y in a way that the transformed Y has mean = 0 and std =1
Only transforms Y as follows:
transformed Y = untransformed Y - mean(Y)
Transforms only Y.
transformed Y = (Y - min(Y)) / (max(Y) - min(Y)) - 0.5
transforms X
:param
X: Input X
:return
transformed X
transforms Y
:param
Y: Input Y
:return
transformed Y
Untransfomrs NLPD to the original Y space
:param
NLPD: transfomred NLPD
:return
untransformed NLPD
Untransforms X to its original values
:param
X: transformed X
:return
untransformed X
Untransforms Y
:param
Y: transformed Y
:return
untransfomred Y | 702 | en | 0.510758 |
# encoding: utf-8
from functools import partial
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from config import config
from base_model import resnet50
from seg_opr.seg_oprs import ConvBnRelu
class CPNet(nn.Module):
def __init__(self, out_planes, criterion, pretrained_model=None,
norm_layer=nn.BatchNorm2d):
super(CPNet, self).__init__()
self.backbone = resnet50(pretrained_model, norm_layer=norm_layer,
bn_eps=config.bn_eps,
bn_momentum=config.bn_momentum,
deep_stem=True, stem_width=64)
self.backbone.layer3.apply(partial(self._nostride_dilate, dilate=2))
self.backbone.layer4.apply(partial(self._nostride_dilate, dilate=4))
self.business_layer = []
self.context = ObjectContext(2048, 512, norm_layer)
self.head_layer = nn.Sequential(
ConvBnRelu(2048 + 1024, 512, 3, 1, 1,
has_bn=True,
has_relu=True, has_bias=False, norm_layer=norm_layer),
nn.Dropout2d(0.1, inplace=False),
nn.Conv2d(512, out_planes, kernel_size=1)
)
self.aux_layer = nn.Sequential(
ConvBnRelu(1024, 512, 3, 1, 1,
has_bn=True,
has_relu=True, has_bias=False, norm_layer=norm_layer),
nn.Dropout2d(0.1, inplace=False),
nn.Conv2d(512, out_planes, kernel_size=1)
)
self.business_layer.append(self.context)
self.business_layer.append(self.head_layer)
self.business_layer.append(self.aux_layer)
self.criterion = criterion
self.bce_criterion = nn.BCELoss(reduction='mean')
def forward(self, data, label=None, aux_label=None):
blocks = self.backbone(data)
fm, intra_sim_map = self.context(blocks[-1])
fm = self.head_layer(fm)
fm = F.interpolate(fm, scale_factor=8, mode='bilinear',
align_corners=True)
softmax_fm = F.log_softmax(fm, dim=1)
aux_fm = self.aux_layer(blocks[-2])
aux_fm = F.interpolate(aux_fm, scale_factor=8, mode='bilinear',
align_corners=True)
if label is not None:
main_loss = self.criterion(fm, label)
aux_loss = self.criterion(aux_fm, label)
intra_sim_loss = self.bce_criterion(intra_sim_map, aux_label)
loss = main_loss + 0.4 * aux_loss + intra_sim_loss
return loss
return softmax_fm
# @staticmethod
def _nostride_dilate(self, m, dilate):
if isinstance(m, nn.Conv2d):
if m.stride == (2, 2):
m.stride = (1, 1)
if m.kernel_size == (3, 3):
m.dilation = (dilate // 2, dilate // 2)
m.padding = (dilate // 2, dilate // 2)
else:
if m.kernel_size == (3, 3):
m.dilation = (dilate, dilate)
m.padding = (dilate, dilate)
class ObjectContext(nn.Module):
def __init__(self, in_channels, inner_channel, norm_layer=nn.BatchNorm2d):
super(ObjectContext, self).__init__()
self.in_channels = in_channels
self.inner_channel = inner_channel
self.reduce_conv = ConvBnRelu(self.in_channels, self.inner_channel,
1, 1, 0,
has_bn=True, has_relu=True,
has_bias=False, norm_layer=norm_layer)
self.intra_similarity_branch = nn.Sequential(
ConvBnRelu(self.inner_channel, self.inner_channel, 1, 1, 0,
has_bn=True, has_relu=True,
has_bias=False, norm_layer=norm_layer),
ConvBnRelu(self.inner_channel, 3600, 1, 1, 0,
has_bn=True, has_relu=False,
has_bias=False, norm_layer=norm_layer),
)
self.intra_post_conv = ConvBnRelu(self.inner_channel,
self.inner_channel,
1, 1, 0, has_bn=True, has_relu=True,
has_bias=False, norm_layer=norm_layer)
self.inter_post_conv = ConvBnRelu(self.inner_channel,
self.inner_channel,
1, 1, 0, has_bn=True, has_relu=True,
has_bias=False, norm_layer=norm_layer)
def forward(self, x):
b, h, w = x.size(0), x.size(2), x.size(3)
value = self.reduce_conv(x)
intra_similarity_map = self.intra_similarity_branch(value)
intra_similarity_map = intra_similarity_map.view(b, h * w, -1)
intra_similarity_map = intra_similarity_map.permute(0, 2, 1)
intra_similarity_map = torch.sigmoid(intra_similarity_map)
inter_similarity_map = 1 - intra_similarity_map
value = value.view(b, self.inner_channel, -1)
value = value.permute(0, 2, 1)
intra_context = torch.bmm(intra_similarity_map, value)
intra_mask = torch.ge(intra_similarity_map, 0.5).float()
intra_mask_count = intra_mask.sum(dim=-1, keepdim=True)
intra_mask_count = intra_mask_count.masked_fill_(intra_mask_count.eq(0),
1)
intra_context = intra_context.div(intra_mask_count)
intra_context = intra_context.permute(0, 2, 1).contiguous()
intra_context = intra_context.view(b, self.inner_channel, *x.size()[2:])
intra_context = self.intra_post_conv(intra_context)
inter_context = torch.bmm(inter_similarity_map, value)
inter_mask = torch.ge(inter_similarity_map, 0.5).float()
inter_mask_count = inter_mask.sum(dim=-1, keepdim=True)
inter_mask_count = inter_mask_count.masked_fill_(inter_mask_count.eq(0),
1)
inter_context = inter_context.div(inter_mask_count)
inter_context = inter_context.permute(0, 2, 1).contiguous()
inter_context = inter_context.view(b, self.inner_channel, *x.size()[2:])
inter_context = self.inter_post_conv(inter_context)
output = torch.cat([x, intra_context, inter_context], dim=1)
return output, intra_similarity_map
if __name__ == "__main__":
model = PSPNet(150, None)
print(model)
| model/cpn/ade.cpn.R50_v1c.v7/network.py | 6,566 | encoding: utf-8 @staticmethod | 29 | en | 0.495495 |
#!/usr/bin/env python3
# Copyright 2019 The University of Manchester UK
# Copyright 2019 RO-Crate contributors <https://github.com/ResearchObject/ro-crate/graphs/contributors>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script retrieves the schema.org properties to generate
the corresponding simplified @context for RO-Crate
adding our additional properties.
Run as:
./schema-context.py 0.3-DRAFT > ../docs/0.3-DRAFT/context.jsonld
"""
import sys
import json
import requests
from collections import OrderedDict
import urllib.request
# Our own version
ROCRATE_VERSION="1.1-DRAFT"
# Update version from http://schema.org/docs/releases.html
# NOTE: Breaks due to https://github.com/schemaorg/schemaorg/issues/2805
SCHEMA_VERSION="10.0"
# Update from https://bioschemas.org/profiles/Workflow/
BIOSCHEMA_WORKFLOW_PROFILE = "https://bioschemas.org/profiles/ComputationalWorkflow/0.5-DRAFT-2020_07_21"
BIOSCHEMA_WORKFLOW_NS = "https://bioschemas.org/ComputationalWorkflow"
BIOSCHEMA_FORMAL_PARAMETER_NS = "https://bioschemas.org/FormalParameter"
BIOSCHEMA_FORMAL_PARAMETER_PROFILE = "https://bioschemas.org/profiles/FormalParameter/0.1-DRAFT-2020_07_21"
def main():
#url="http://schema.org/version/%s/schemaorgcontext.jsonld" % SCHEMA_VERSION
# Workaround for https://github.com/schemaorg/schemaorg/issues/2805
url="https://raw.githubusercontent.com/schemaorg/schemaorg/V%s-release/data/releases/%s/schemaorgcontext.jsonld" % (SCHEMA_VERSION, SCHEMA_VERSION)
with urllib.request.urlopen(url) as f:
schema = json.load(f)
if len(sys.argv) > 2:
version = sys.argv[1]
tag = sys.argv[2]
elif len(sys.argv) > 1:
tag = version = sys.argv[1]
else:
tag = version = ROCRATE_VERSION
schemakeys = list(schema["@context"].keys())
schemakeys.sort() # they are usually sorted anyway
j = OrderedDict()
j["@id"] = "https://w3id.org/ro/crate/%s/context" % version
j["name"] = "RO-Crate JSON-LD Context",
j["version"] = tag
j["url"] = {"@id": "https://w3id.org/ro/crate/%s" % version}
j["schemaVersion"] = {"@id": "http://schema.org/version/%s/" % SCHEMA_VERSION}
j["isBasedOn"] = [
{"@id": "http://schema.org/version/%s/" % SCHEMA_VERSION},
{"@id": "https://pcdm.org/2016/04/18/models"},
{"@id": BIOSCHEMA_WORKFLOW_PROFILE },
{"@id": BIOSCHEMA_FORMAL_PARAMETER_PROFILE }
]
j["license"] = {"@id": "https://creativecommons.org/publicdomain/zero/1.0/"}
context = OrderedDict()
j["@context"] = context
for k in schemakeys:
if ":" in k: # URL like https://www.w3.org/wiki/WebSchemas/SchemaDotOrgSources#TP
continue
if "@" in k: # @vocab?
continue
definition = schema["@context"][k]
if not "@id" in definition or isinstance(definition, str):
continue # bibo etc.
context[k] = schema["@context"][k]["@id"].replace("schema:", "http://schema.org/")
context.update(ADDITIONAL)
json.dump(j, sys.stdout, ensure_ascii=False, indent=5) # indent4 to match existing!
print() ## newline
# Ordered so we keep a somewhat ordered presentation in the JSON
ADDITIONAL = OrderedDict([
# This list should correspond to listing in
# https://researchobject.github.io/ro-crate/0.3-DRAFT/#additional-metadata-standards
("File", "http://schema.org/MediaObject"),
("path", "http://schema.org/contentUrl"),
("Journal", "http://schema.org/Periodical"),
("cite-as", "https://www.w3.org/ns/iana/link-relations/relation#cite-as"),
("hasFile", "http://pcdm.org/models#hasFile"),
("hasMember", "http://pcdm.org/models#hasMember"),
("RepositoryCollection", "http://pcdm.org/models#Collection"),
("RepositoryObject", "http://pcdm.org/models#object"),
# Temporary namespace for properties/types
# proposed https://bioschemas.org/profiles/Workflow/ draft 0.5
# Remove if/when added to schema.org release!
## BEGIN
("ComputationalWorkflow", BIOSCHEMA_WORKFLOW_NS),
("input", BIOSCHEMA_WORKFLOW_NS + "#input"),
("output", BIOSCHEMA_WORKFLOW_NS + "#output"),
("FormalParameter", BIOSCHEMA_FORMAL_PARAMETER_NS),
# https://github.com/schemaorg/schemaorg/issues/383#issuecomment-651040576
("funding", "http://schema.org/funding"),
## END
("wasDerivedFrom", "http://www.w3.org/ns/prov#wasDerivedFrom"),
("importedFrom", "http://purl.org/pav/importedFrom"),
("importedOn", "http://purl.org/pav/importedOn"),
("importedBy", "http://purl.org/pav/importedBy"),
("retrievedFrom", "http://purl.org/pav/retrievedFrom"),
("retrievedOn", "http://purl.org/pav/retrievedOn"),
("retrievedBy", "http://purl.org/pav/retrievedBy"),
("conformsTo", "http://purl.org/dc/terms/conformsTo"),
("@label", "http://www.w3.org/2000/01/rdf-schema#label"),
("pcdm", "http://pcdm.org/models#"),
("bibo", "http://purl.org/ontology/bibo/"),
("cc", "http://creativecommons.org/ns#"),
("dct", "http://purl.org/dc/terms/"),
("foaf", "http://xmlns.com/foaf/0.1/"),
("rdf", "http://www.w3.org/1999/02/22-rdf-syntax-ns#"),
("rdfa", "http://www.w3.org/ns/rdfa#"),
("rdfs", "http://www.w3.org/2000/01/rdf-schema#"),
("schema", "http://schema.org/"),
("frapo", "http://purl.org/cerif/frapo/"),
("rel", "https://www.w3.org/ns/iana/link-relations/relation#"),
("pav", "http://purl.org/pav/"),
("prov", "http://www.w3.org/ns/prov#"),
("wfdesc", "http://purl.org/ro/wfdesc#"),
("wfprov", "http://purl.org/ro/wfprov#"),
("roterms", "http://purl.org/ro/roterms#"),
("wf4ever", "http://purl.org/ro/wf4ever#"),
# Disabled, see https://github.com/ResearchObject/ro-crate/pull/73
# ("@base", None)
])
if __name__=="__main__":
if "-v" in sys.argv or "--version" in sys.argv:
print("schema-context.py %s" % ROCRATE_VERSION)
print("schema.org %s" % SCHEMA_VERSION)
sys.exit(0)
elif "-h" in sys.argv or "--help" in sys.argv:
print("schema-context.py [VERSION] [TAG]")
print("")
print("Generates context.jsonld from schema.org and additional terms")
print(" VERSION is RO-Crate Specification version (default: %s)" % ROCRATE_VERSION)
print(" TAG is RO-Crate Semantic Versioning tag (default same as VERSION)")
sys.exit(0)
else:
main()
| scripts/schema-context.py | 7,130 | This script retrieves the schema.org properties to generate
the corresponding simplified @context for RO-Crate
adding our additional properties.
Run as:
./schema-context.py 0.3-DRAFT > ../docs/0.3-DRAFT/context.jsonld
!/usr/bin/env python3 Copyright 2019 The University of Manchester UK Copyright 2019 RO-Crate contributors <https://github.com/ResearchObject/ro-crate/graphs/contributors> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Our own version Update version from http://schema.org/docs/releases.html NOTE: Breaks due to https://github.com/schemaorg/schemaorg/issues/2805 Update from https://bioschemas.org/profiles/Workflow/url="http://schema.org/version/%s/schemaorgcontext.jsonld" % SCHEMA_VERSION Workaround for https://github.com/schemaorg/schemaorg/issues/2805 they are usually sorted anyway URL like https://www.w3.org/wiki/WebSchemas/SchemaDotOrgSourcesTP @vocab? bibo etc. indent4 to match existing! newline Ordered so we keep a somewhat ordered presentation in the JSON This list should correspond to listing in https://researchobject.github.io/ro-crate/0.3-DRAFT/additional-metadata-standards Temporary namespace for properties/types proposed https://bioschemas.org/profiles/Workflow/ draft 0.5 Remove if/when added to schema.org release! BEGIN https://github.com/schemaorg/schemaorg/issues/383issuecomment-651040576 END Disabled, see https://github.com/ResearchObject/ro-crate/pull/73 ("@base", None) | 1,929 | en | 0.747897 |
import struct
import socket
import ipaddress
from .utils import calculate_checksum
IPV4_HEAD_FMT="!BBHHHBBHII" #H is unsigned short (2 bytes) ! is for network (big-endian)
class IPV4Datagram:
"""
This class contains 20 bytes IPV4 Datagram
https://en.wikipedia.org/wiki/IPv4
|0|1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29|30|31|
---------------------------------------------------------------------------------------
|version| IHL | DSCP | ECN | Total Length |
---------------------------------------------------------------------------------------
| identification | flags | Fragemnt Offset |
---------------------------------------------------------------------------------------
| TTL | Protocol | Header Checksum |
---------------------------------------------------------------------------------------
| Source Ip Address |
---------------------------------------------------------------------------------------
| Destination Ip Address |
---------------------------------------------------------------------------------------
"""
def __init__(self, source_ip="1.1.1.1",destination_ip="1.1.1.1" , version=4, ihl=5, tos=0,identification=54321,fragment_offset = 0,
ttl=253,protocol = socket.IPPROTO_UDP,data='', checksum=0):
self.version = version
self.ihl = ihl
self.version_ihl = (self.version << 4) + self.ihl
self.tos = tos
self.identification=identification
self.fragment_offset = fragment_offset
self.ttl = ttl
self.protocol = protocol
self.checksum = checksum
self.source_ip =int(ipaddress.IPv4Address( source_ip )) # convert into integer
self.destination_ip = int(ipaddress.IPv4Address(destination_ip ))
self.data = data
self.length= 4 * self.ihl + len(self.data)
def __repr__(self):
return 'ICMPDatagram({},{},({},{}))'.format(self.type,self.code,self.checksum, self.data)
def pack(self):
ipv4_header = struct.pack(IPV4_HEAD_FMT, self.version_ihl,self.tos,self.length, self.identification,
self.fragment_offset, self.ttl, self.protocol, self.checksum, self.source_ip, self.destination_ip)
self.checksum = calculate_checksum(ipv4_header)
ipv4_header = struct.pack(IPV4_HEAD_FMT, self.version_ihl,self.tos,self.length, self.identification,
self.fragment_offset, self.ttl, self.protocol, self.checksum, self.source_ip, self.destination_ip)
return ipv4_header
def unpack(self, buffer):
ipv4_header_size = struct.calcsize(IPV4_HEAD_FMT)
ipv4_header_packed = buffer[:ipv4_header_size]
ipv4_header_unpacked = struct.unpack(IPV4_HEAD_FMT,ipv4_header_packed)
self.version_ihl = ipv4_header_unpacked[0]
self.ihl = self.version_ihl & 0xf
self.version = self.version_ihl >> 4
self.tos = ipv4_header_unpacked[1]
self.length = ipv4_header_unpacked[2]
self.identification = ipv4_header_unpacked[3]
self.fragment_offset = ipv4_header_unpacked[4]
self.ttl = ipv4_header_unpacked[5]
self.protocol = ipv4_header_unpacked[6]
self.checksum = ipv4_header_unpacked[7]
self.source_ip = str(ipaddress.IPv4Address(ipv4_header_unpacked[8] ))
self.destination_ip= str(ipaddress.IPv4Address(ipv4_header_unpacked[9] ))
self.data = buffer[ipv4_header_size:]
#print ("source ip == " + str( ipaddress.IPv4Address(self.source_ip)))
#print ("destination ip == " + str( ipaddress.IPv4Address(self.destination_ip)))
#print ("checksum = "+ str(self.checksum))
#print ("ttl == " + str(self.ttl))
| Raw_Socket_Protos/rawIPV4.py | 4,030 | This class contains 20 bytes IPV4 Datagram
https://en.wikipedia.org/wiki/IPv4
|0|1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29|30|31|
---------------------------------------------------------------------------------------
|version| IHL | DSCP | ECN | Total Length |
---------------------------------------------------------------------------------------
| identification | flags | Fragemnt Offset |
---------------------------------------------------------------------------------------
| TTL | Protocol | Header Checksum |
---------------------------------------------------------------------------------------
| Source Ip Address |
---------------------------------------------------------------------------------------
| Destination Ip Address |
---------------------------------------------------------------------------------------
H is unsigned short (2 bytes) ! is for network (big-endian) convert into integerprint ("source ip == " + str( ipaddress.IPv4Address(self.source_ip)))print ("destination ip == " + str( ipaddress.IPv4Address(self.destination_ip)))print ("checksum = "+ str(self.checksum))print ("ttl == " + str(self.ttl)) | 1,440 | en | 0.314451 |
# -*- coding:utf-8 -*-
"""
Copyright (c) 2013-2016 SYPH, All Rights Reserved.
-----------------------------------------------------------
Author: S.JunPeng
Date: 2016/12/22
Change Activity:
"""
import logging
import json
from vendor.utils.encrypt import Cryption
from apps.common.models import ClientOverview
from apps.remote.models import FeatureFieldRel
from apps.etl.context import ApplyContext
from vendor.errors.api_errors import *
logger = logging.getLogger('apps.featureapi')
class Judger(object):
"""
1.authentication (_check_identity)
2.data decryption (_decrypt)
3.check availability of arguments (_args_useful_check)
4.throw the Exceptions
5.finally check all works
"""
def __init__(self, client_code, data):
self.client_code = client_code
self.client_id = ''
self.client_secret = ''
self.des_key = ''
self.origin_data = data
self.cryption = Cryption()
self.apply_id = ''
self.target_features = []
self.arguments = {}
self.ret_msg = []
def _check_sum(self):
if self.client_id and self.client_secret and self.des_key and self.target_features and self.arguments \
and (len(self.target_features) == len(self.ret_msg)):
return True
else:
return False
def _check_identity(self):
client_package = ClientOverview.objects.filter(client_code=self.client_code)
if not client_package:
logger.error('Response from the function of `judge._check_identity`, error_msg=%s, rel_err_msg=%s'
% (UserIdentityError.message, 'No data in ClientOverview'), exc_info=True)
raise UserIdentityError # E02
client_package = client_package[0]
self.client_id = client_package.client_id
self.client_secret = client_package.client_secret
self.des_key = client_package.des_key
def encrypt(self, data):
json_data = json.dumps(data)
des_data = Cryption.aes_base64_encrypt(json_data, self.des_key)
return des_data
def _decrypt(self):
try:
json_data = Cryption.aes_base64_decrypt(self.origin_data, self.des_key)
message = json.loads(json_data)
except Exception as e:
logger.error('Response from the function of `judge._decrypt`, error_msg=%s, rel_err_msg=%s'
% (EncryptError.message, e.message), exc_info=True)
raise EncryptError # E03
self.apply_id = message.get('apply_id', None)
if not self.apply_id:
logger.error('Response from the function of `judge._decrypt`, error_msg=%s, rel_err_msg=%s'
% (GetApplyIdError.message, "Missing apply_id in the post_data"), exc_info=True)
raise GetApplyIdError # E04
self.target_features = message.get('res_keys', None)
if not self.target_features:
logger.error('Response from the function of `judge._decrypt`, error_msg=%s, rel_err_msg=%s'
% (GetResKeysError.message, "Missing res_keys in the post_data"), exc_info=True)
raise GetResKeysError # E05
apply_base = ApplyContext(self.apply_id)
self.arguments = apply_base.load()
if not self.arguments:
logger.error('Response from the function of `judge._decrypt`, error_msg=%s, rel_err_msg=%s'
% (GetArgumentsError.message, "Missing arguments in the post_data"), exc_info=True)
raise GetArgumentsError # E06
def _args_useful_check(self):
"""
need sql which mapping the target features and arguments
:return:
"""
arg_msg_list = FeatureFieldRel.objects.filter(
feature_name__in=self.target_features,
is_delete=False,
)
for arg_msg in arg_msg_list:
if arg_msg.raw_field_name in self.arguments.keys():
if self.ret_msg and (arg_msg.feature_name == (self.ret_msg[-1])['target_field_name']):
sub_msg = self.ret_msg[-1]
if arg_msg.feature_name == sub_msg['target_field_name']:
sub_msg['arguments'].update({
arg_msg.raw_field_name: self.arguments[arg_msg.raw_field_name],
})
self.ret_msg[-1] = sub_msg
else:
temp_msg = {
'data_identity': arg_msg.data_identity,
'target_field_name': arg_msg.feature_name,
'arguments': {
arg_msg.raw_field_name: self.arguments[arg_msg.raw_field_name],
}
}
self.ret_msg.append(temp_msg)
else:
logger.error('Response from the function of `judge._args_useful_check`, error_msg=%s, rel_err_msg=%s'
% (ArgumentsAvailableError.message, "Arguments are not enough to get all res_keys"),
exc_info=True)
raise ArgumentsAvailableError # E07
def work_stream(self):
self._check_identity()
self._decrypt()
self._args_useful_check()
return self._check_sum()
| procuratorate/dataocean_judger.py | 5,375 | 1.authentication (_check_identity)
2.data decryption (_decrypt)
3.check availability of arguments (_args_useful_check)
4.throw the Exceptions
5.finally check all works
need sql which mapping the target features and arguments
:return:
Copyright (c) 2013-2016 SYPH, All Rights Reserved.
-----------------------------------------------------------
Author: S.JunPeng
Date: 2016/12/22
Change Activity:
-*- coding:utf-8 -*- E02 E03 E04 E05 E06 E07 | 444 | en | 0.47357 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pytest
import os
import numpy as np
import glob
from psrsigsim.signal.fb_signal import FilterBankSignal
from psrsigsim.pulsar.pulsar import Pulsar
from psrsigsim.pulsar.portraits import DataPortrait
from psrsigsim.pulsar.profiles import DataProfile
from psrsigsim.ism.ism import ISM
from psrsigsim.telescope.telescope import Telescope
from psrsigsim.telescope.receiver import Receiver
from psrsigsim.telescope.backend import Backend
from psrsigsim.io.psrfits import PSRFITS
from psrsigsim.utils.utils import make_quant
from psrsigsim.io.txtfile import TxtFile
from psrsigsim.simulate.simulate import Simulation
@pytest.fixture
def j1713_profile():
"""
Numpy array of J1713+0747 profile.
"""
path = 'psrsigsim/data/J1713+0747_profile.npy'
return np.load(path)
@pytest.fixture
def PSRfits():
"""
Fixture psrfits class
"""
fitspath = "data/test.fits"
tempfits = "data/B1855+09.L-wide.PUPPI.11y.x.sum.sm"
return PSRFITS(path=fitspath, template=tempfits, fits_mode='copy')
@pytest.fixture
def param_dict():
"""
Fixture parameter dictionary.
"""
pdict = {'fcent' : 430,
'bandwidth' : 100,
'sample_rate' : 1.5625,
'dtype' : np.float32,
'Npols' : 1,
'Nchan' : 64,
'sublen' : 2.0,
'fold' : True,
'period' : 1.0,
'Smean' : 1.0,
'profiles' : [0.5, 0.5, 1.0], # Gaussian
'tobs' : 4.0,
'name' : 'J0000+0000',
'dm' : 10.0,
'tau_d' : 50e-9,
'tau_d_ref_f' : 1500.0,
'aperture' : 100.0,
'area' : 5500.0,
'Tsys' : 35.0,
'tscope_name' : "TestScope",
'system_name' : "TestSys",
'rcvr_fcent' : 430,
'rcvr_bw' : 100,
'rcvr_name' : "TestRCVR",
'backend_samprate' : 1.5625,
'backend_name' : "TestBack",
'tempfile' : None,
'parfile' : None,
}
return pdict
@pytest.fixture
def simulation():
"""
Fixture Simulation class. Cannot be the only simulation tested.
"""
sim = Simulation(fcent = 430,
bandwidth = 100,
sample_rate = 1.0*2048*10**-6,
dtype = np.float32,
Npols = 1,
Nchan = 64,
sublen = 2.0,
fold = True,
period = 1.0,
Smean = 1.0,
profiles = None,
tobs = 4.0,
name = 'J0000+0000',
dm = 10.0,
tau_d = 50e-9,
tau_d_ref_f = 1500.0,
aperture = 100.0,
area = 5500.0,
Tsys = 35.0,
tscope_name = "TestScope",
system_name = "TestSys",
rcvr_fcent = 430,
rcvr_bw = 100,
rcvr_name ="TestRCVR",
backend_samprate = 1.5625,
backend_name = "TestBack",
tempfile = "data/B1855+09.L-wide.PUPPI.11y.x.sum.sm",
parfile = None,
psrdict = None)
return sim
def test_initsim(param_dict):
"""
Test initializing the simulation from dictionary, parfile
"""
sim = Simulation(psrdict = param_dict)
with pytest.raises(NotImplementedError):
sim2 = Simulation(parfile = "testpar.par")
def test_initsig(simulation):
"""
Test init_signal function.
"""
# Test from input params
simulation.init_signal()
# Test from template file
simulation.init_signal(from_template = True)
def test_initprof(simulation, j1713_profile):
"""
Test init_profile function.
"""
# Test no input
simulation.init_profile()
# Test function input
with pytest.raises(NotImplementedError):
def gprof(x, p0):
return p0[0]* np.exp(-0.5*((x-p0[1])/(p0[2]))**2)
simulation._profiles = gprof
simulation.init_profile()
# Test Gaussian as input
simulation._profiles = [0.5, 0.5, 1.0]
simulation.init_profile()
# Test data array as input
simulation._profiles = j1713_profile
simulation.init_profile()
# Test array that's not long enough
with pytest.raises(RuntimeError):
simulation._profiles = [0.5, 0.5]
simulation.init_profile()
# Test profile class as input
pr = DataProfile(j1713_profile,phases=None)
print(type(pr), pr)
simulation._profiles = pr
simulation.init_profile()
def test_initpsr(simulation):
"""
Test init_pulsar function.
"""
simulation.init_pulsar()
def test_initism(simulation):
"""
Test init_ism function.
"""
simulation.init_ism()
def test_inittscope(simulation):
"""
Test init_telescope function.
"""
# Test init GBT
simulation._tscope_name = "GBT"
simulation.init_telescope()
# Test init Arecibo
simulation._tscope_name = "Arecibo"
simulation.init_telescope()
# Test input telescope
simulation._tscope_name = "TestScope"
simulation.init_telescope()
# Test list of systems for telescope
simulation._system_name = ["Sys1", "Sys2"]
simulation._rcvr_fcent = [430, 800]
simulation._rcvr_bw = [100, 200]
simulation._rcvr_name = ["R1", "R2"]
simulation._backend_samprate = [1.5625, 12.5]
simulation._backend_name = ["B1", "B2"]
simulation.init_telescope()
# And the catch with multiple systems
with pytest.raises(RuntimeError):
simulation._backend_name = ["B1", "B2", "B3"]
simulation.init_telescope()
def test_simulate(simulation):
"""
Test simulate function.
"""
simulation.simulate()
@pytest.mark.filterwarnings('ignore::fitsio.FITSRuntimeWarning')
def test_savesim(simulation, PSRfits):
"""
Test save simulation function.
"""
simulation._Nchan = 1
simulation._tobs = 2.0
#S = PSRfits.make_signal_from_psrfits()
#simulation._tobs = PSRfits.tsubint.value*PSRfits.nsubint
simulation.simulate(from_template = True)
# Try pdv format
simulation.save_simulation(out_format = "pdv")
# Try psrfits format
simulation.save_simulation(out_format = "psrfits", phaseconnect = False)
os.remove("sim_fits.fits")
# Try psrfits format with phaseconnect = True
#parfile = "data/test_parfile.par"
#simulation._parfile = parfile
#simulation.save_simulation(out_format = "psrfits", phaseconnect = True)
#os.remove("sim_fits.fits")
dfs = glob.glob("simfits*")
for df in dfs:
os.remove(df)
# Try psrfits with runtime error
# Try wrong output file type
with pytest.raises(RuntimeError):
simulation.save_simulation(out_format = "wrong_fmt")
simulation._tempfile = None
simulation.save_simulation(out_format = "psrfits")
| tests/test_simulate.py | 6,983 | Fixture psrfits class
Numpy array of J1713+0747 profile.
Fixture parameter dictionary.
Fixture Simulation class. Cannot be the only simulation tested.
Test init_ism function.
Test init_profile function.
Test init_pulsar function.
Test init_signal function.
Test initializing the simulation from dictionary, parfile
Test init_telescope function.
Test save simulation function.
Test simulate function.
!/usr/bin/env python3 -*- coding: utf-8 -*- Gaussian Test from input params Test from template file Test no input Test function input Test Gaussian as input Test data array as input Test array that's not long enough Test profile class as input Test init GBT Test init Arecibo Test input telescope Test list of systems for telescope And the catch with multiple systemsS = PSRfits.make_signal_from_psrfits()simulation._tobs = PSRfits.tsubint.value*PSRfits.nsubint Try pdv format Try psrfits format Try psrfits format with phaseconnect = Trueparfile = "data/test_parfile.par"simulation._parfile = parfilesimulation.save_simulation(out_format = "psrfits", phaseconnect = True)os.remove("sim_fits.fits") Try psrfits with runtime error Try wrong output file type | 1,157 | en | 0.542264 |
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 10 22:59:51 2019
@author: Sravan
"""
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 14 22:36:21 2019
@author: Sravan
"""
import csv
import numpy as np
from scipy.spatial.distance import pdist, squareform, euclidean, cdist
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
import scipy.integrate as integrate
import matplotlib.animation as animation
"""
Variables: Wind speed, Air traffic (# of drones), Obstacles (Trees, Buildings)
Fixed: Distance, Air Resistance, Gravity, Battery level
Rules: Drone Speed (Air traffic, Wind speed, Battery level), Collisions (Drone position)
Study: Time, Speed
Movement: v_air = sqrt(mg/(nAρ)), p = 1.22 kg m^-3, A = 1 m^2
½cρAv2 = mgtanθ, c = drag coefficient
P = ½ρnAv_air(v_air2 – v2sin2θ)
Collisions: Drone - Increase/Decrease Speed, 2) Change path- increasing elevation
https://www.research-drone.com/en/extreme_climb_rate.html
https://en.wikipedia.org/wiki/Amazon_Prime_Air
https://homepages.abdn.ac.uk/nph120/meteo/DroneFlight.pdf
"""
class ParticleBox:
"""Orbits class
init_state is an [N x 6] array, where N is the number of particles:
[[xi1, yi1, zi1, xf1, yf1, zf1, vx1, vy1, vz1, t1],
[xi2, yi2, zi2, xf2, yf2, zf2, vx2, vy2, vz2, t2],
... ]
bounds is the size of the box: [xmin, xmax, ymin, ymax, zmin, zmax]
"""
def __init__(self,
drones = 1,
wind = [0, 0, 0],
obstacles = 0,
bounds = [-32000, 32000, -32000, 32000, 0, 150],
size = 1.5,
max_height = 122,
max_speed = 22.34,
acc = 7,
M = 25.0,
G = 9.81):
self.drones = drones
self.wind = wind
self.size = size
self.G = G
self.max_height = max_height
self.max_speed = max_speed
self.acc_vert = acc
self.acc_vert_eff = acc + G
self.acc_hor = acc
self.obstacles = 0
self.obstacles_size = 40
self.time_elapsed = 0
self.bounds = bounds
np.random.seed(0)
init_state = np.random.random((drones, 10))
init_state[:, :2] -= 0.5
init_state[:, :2] *= bounds[1]*2
init_state[:, 2:] = 0.0
for i in range(len(init_state)):
vecs = [64000.0, 64000.0]
while vecs[0] > bounds[1] or vecs[0] < bounds[0] or vecs[1] > bounds[3] or vecs[1] < bounds[2]:
vecs = np.random.standard_normal(2)
mags = np.linalg.norm(vecs)
vecs /= mags
vecs *= 16000
vecs += init_state[i, :2]
init_state[i, 3:5] =vecs
if obstacles > 0:
np.random.seed(1)
obs_state = np.random.random((obstacles, 3))
obs_state[:, :3] -= 0.5
obs_state[:, :2] *= bounds[1]*2
obs_state[:, 2] *= bounds[5]*2
self.init_state = np.asarray(init_state, dtype=float)
#self.obs_state = np.asarray(obs_state, dtype=float)
self.M = M * np.ones(self.init_state.shape[0])
self.state = self.init_state.copy()
#update velocity
self.state[:, 6] = self.wind[0]
self.state[:, 7] = self.wind[1]
self.state[:, 8] = self.wind[2]
def step(self, dt):
"""step once by dt seconds"""
self.time_elapsed += dt
# find distance to goal
D = cdist(self.state[:, :3], self.state[:, 3:6], 'euclidean')
ind, din = np.where(D > 122)
uniqua = (ind == din)
ind = ind[uniqua]
# update velocities of individual drones
for i in zip(ind):
#velocity vector
v = self.state[i, 8]
v_avg = v
a_ver = self.acc_vert
a_ver_eff = self.acc_vert_eff
height = self.max_height - self.state[i, 2]
print(height)
if height > 0:
n = 1
if v > 0:
n = v / abs(v)
stop = n * v**2/(2 * a_ver)
t_end = abs(v / a_ver)
b1 = (v**2 + t_end**2)**(0.5)
b2 = ((v + n * a_ver * dt)**2 + (t_end + dt)**2)**(0.5)
s1 = ((a_ver * dt)**2 + dt**2)**(0.5)
s2 = dt * 2
P = (b2 - b1) + s1 + s2
t = ((P/2) * (P/2 - s1) * (P/2 - s2) * (P/2 - b2 + b1))**(0.5)
h = 2 * t / (b2 - b1)
area = n * (t + (b2 - b1) * h)
if (t_end <= dt and stop > (height - area)):
v_avg = 0
self.state[i, 8] = 0
self.state[i, 2] = self.max_height
elif (stop > (height - area)):
t_max = 0
if stop < height:
a = 2 * (a_ver)**2
b = 4 * (a_ver) * v
c = v**2 - 2 * a_ver * height
t_max = (-b + (b**2 - 4 * a * c)**(0.5)) / (2 * a)
v_max = v + a_ver * (t_max / dt)
v_end = 2 * v_max - v - a_ver * dt
v_avg = ((v_max + v) / 2) * (t_max / dt) + ((v_max + v_end) / 2) * ((dt - t_max) / dt)
self.state[i, 8] = v_end
else:
v_avg = v + a_ver * dt / 2
self.state[i, 8] += a_ver * dt
elif height < 0:
n = v / abs(v)
stop = n * v**2/(2 * a_ver_eff)
t_end = abs(v / a_ver_eff)
b1 = (v**2 + t_end**2)**(0.5)
b2 = ((v + n * a_ver_eff * dt)**2 + (t_end + dt)**2)**(0.5)
s1 = ((a_ver_eff * dt)**2 + dt**2)**(0.5)
s2 = dt * 2
P = (b2 - b1) + s1 + s2
t = ((P/2) * (P/2 - s1) * (P/2 - s2) * (P/2 - b2 + b1))**(0.5)
h = 2 * t / (b2 - b1)
area = n * (t + (b2 - b1) * h)
if (t_end <= dt and abs(stop) <= abs(height)):
v_avg = (v / 2) * (t_end / dt)
self.state[i, 8] = v + a_ver_eff * t_end
elif (stop < (height - area)):
v_max = (height * (2 * a_ver_eff))**(0.5)
t_max = (v_max - v)/a_ver_eff
v_end = 2 * v_max - v - a_ver_eff * dt
v_avg = ((v_max + v) / 2) * (t_max / dt) + ((v_max + v_end) / 2) * ((dt - t_max) / dt)
self.state[i, 8] = v_end
else:
v_avg = v - a_ver_eff * dt / 2
self.state[i, 8] = v - a_ver_eff * dt
else:
self.state[i, 8] += 0 * dt
self.state[i, 2] += v_avg * dt
# unit vector
r = self.state[i, 3:5] - self.state[i, :2]
m = np.linalg.norm(r)
u = r / m
#accelearting horizontal
a_hor = self.acc_hor
v_hor = self.state[i, 6:8]
h = np.linalg.norm(v_hor)
stop = h**2/(2 * a_hor)
t_end = h / a_hor
b1 = (h**2 + t_end**2)**(0.5)
b2 = ((h + a_hor * dt)**2 + (t_end + dt)**2)**(0.5)
s1 = ((a_hor * dt)**2 + dt**2)**(0.5)
s2 = dt*2
P = (b2 - b1) + s1 + s2
t = ((P/2) * (P/2 - s1) * (P/2 - s2) * (P/2 - b2 + b1))**(0.5)
s = 2 * t / (b2 - b1)
area = (t + (b2 - b1) * s)
if (t_end <= dt and stop < area):
v_hor = (h / 2) * (t_end / dt)
self.state[i, 6:8] = (h - (a_hor * t_end)) * u
elif (stop > (m - area)):
v_max = (m * (2 * a_hor))**(0.5)
t_max = (v_max - h)/a_hor
v_end = 2 * v_max - h - a_hor * dt
v_hor = ((v_max + h) / 2) * (t_max / dt) + ((v_max + v_end) / 2) * ((dt - t_max) / dt)
self.state[i, 6:8] = v_end * u
else:
v_hor = h + a_hor * dt / 2
self.state[i, 6:8] = (h + a_hor * dt) * u
self.state[i, :2] += (v_hor * dt) * u
#find drones hovering
done, fund = np.where(D <= 122)
uniquo = (done == fund)
done = done[uniquo]
for d in zip(done):
print("here")
#velocity vector
v = self.state[i, 8]
v_avg = v
a_ver_eff = self.acc_vert_eff
#accelerating negative z
n = -1
if v < 0:
n = v / abs(v)
stop = n * v**2/(2 * a_ver_eff)
t_end = abs(v / a_ver_eff)
b1 = (v**2 + t_end**2)**(0.5)
b2 = ((v + n * a_ver_eff * dt)**2 + (t_end + dt)**2)**(0.5)
s1 = ((a_ver_eff * dt)**2 + dt**2)**(0.5)
s2 = dt * 2
P = (b2 - b1) + s1 + s2
t = ((P/2) * (P/2 - s1) * (P/2 - s2) * (P/2 - b2 + b1))**(0.5)
h = 2 * t / (b2 - b1)
area = n * (t + (b2 - b1) * h)
if (t_end <= dt and stop > area):
v_avg = (v / 2) * (t_end / dt)
self.state[i, 8] = v + a_ver_eff * t_end
self.state[i, 9] = self.time_elapsed
elif (stop < (-self.state[i, 2] - area)):
v_max = ((-self.state[i, 2]) * (2 * a_ver_eff))**(0.5)
t_max = (v_max - v)/a_ver_eff
v_end = 2 * v_max - v - a_ver_eff * dt
v_avg = ((v_max + v) / 2) * (t_max / dt) + ((v_max + v_end) / 2) * ((dt - t_max) / dt)
self.state[i, 8] = v_end
else:
v_avg = v - a_ver_eff * dt / 2
self.state[i, 8] = v - a_ver_eff * dt
self.state[i, 2] += v_avg * dt
E = squareform(pdist(self.state[:, :3], 'euclidean'))
ind1, ind2 = np.where(E < (2 * self.size))
unique = (ind1 < ind2)
ind1 = ind1[unique]
ind2 = ind2[unique]
for i1, i2 in zip(ind1, ind2):
if (self.state[i1, 2] > self.state[i2, 2]):
self.state[i1, 8] += (self.acc_vert) * dt
self.state[i2, 8] -= (self.acc_vert_eff) * dt
else:
self.state[i1, 8] -= (self.acc_vert) * dt
self.state[i2, 8] += (self.acc_vert_eff) * dt
if self.obstacles > 0:
DO = np.vstack([self.state[:, :3].copy(), self.obs_state.copy()])
F = squareform(pdist(DO, 'euclidean'))
d_rone, obs = np.where(F < (2 * self.obstacles_size))
unique = (d_rone < obs and obs >= self.drones)
d_rone = d_rone[unique]
obs = obs[unique]
for d, o in zip(d_rone, obs):
if (self.obs_state[o-self.drones, 2] < 110 and self.state[d, 2] < self.obs_state[o-self.drones, 2]):
self.state[d, 8] += self.acc_vert * dt
else:
r = self.state[d, 3:5] - self.state[d, :2]
ro = self.obs_state[o-self.drones, :2] - self.state[d, :2]
r_rel = np.cross(r, ro)
if (r_rel[2] > 0):
self.state[d, 6] += self.acc_hor * dt
self.state[d, 7] += self.acc_hor * dt
else:
self.state[d, 6] -= self.acc_hor * dt
self.state[d, 7] -= self.acc_hor * dt
#restrict velocity
np.clip(self.state[:, 6], -self.max_speed + self.wind[0], self.max_speed + self.wind[0])
np.clip(self.state[:, 7], -self.max_speed + self.wind[1], self.max_speed + self.wind[1])
#------------------------------------------------------------
# set up initial state
box = ParticleBox()
dt = 1. # 1 fps
#ani = animation.FuncAnimation(fig, animate, frames=600, interval=10, init_func=init)
for i in range(10):
box.step(dt)
#final = np.hstack([box.init_state[:, :3], box.state[:, 3:]])
#with open('people.csv', 'w') as writeFile:
# writer = csv.writer(writeFile)
# writer.writerows(final) #2d list
"""with open('initial.csv', 'w') as writeInit:
writer = csv.writer(writeInit)
writer.writerows(box.init_state)
writeInit.close()
"""
with open('final_2.csv', 'w') as writeFin:
writer = csv.writer(writeFin)
writer.writerows(box.init_state)
writer.writerows(box.state)
writeFin.close()
print(box.state) | drone_2.py | 12,781 | Orbits class
init_state is an [N x 6] array, where N is the number of particles:
[[xi1, yi1, zi1, xf1, yf1, zf1, vx1, vy1, vz1, t1],
[xi2, yi2, zi2, xf2, yf2, zf2, vx2, vy2, vz2, t2],
... ]
bounds is the size of the box: [xmin, xmax, ymin, ymax, zmin, zmax]
step once by dt seconds
Created on Sun Mar 10 22:59:51 2019
@author: Sravan
-*- coding: utf-8 -*- -*- coding: utf-8 -*-self.obs_state = np.asarray(obs_state, dtype=float)update velocity find distance to goal update velocities of individual dronesvelocity vector unit vectoraccelearting horizontalfind drones hoveringvelocity vectoraccelerating negative zrestrict velocity------------------------------------------------------------ set up initial state 1 fpsani = animation.FuncAnimation(fig, animate, frames=600, interval=10, init_func=init)final = np.hstack([box.init_state[:, :3], box.state[:, 3:]])with open('people.csv', 'w') as writeFile: writer = csv.writer(writeFile) writer.writerows(final) 2d list | 1,001 | en | 0.560786 |
__author__ = 'hofmann'
__version__ = '0.0.2.1'
import os
from scripts.MetaDataTable.metadatatable import MetadataTable
from scripts.NcbiTaxonomy.ncbitaxonomy import NcbiTaxonomy
from scripts.Validator.validator import Validator
class TaxonomicProfile(Validator):
"""
Constructing taxonomic profiles from files with relative abundances.
"""
_taxonomic_profile_version = "0.9.1"
def __init__(self, taxonomy, logfile=None, verbose=True, debug=False):
"""
@param taxonomy: taxonomy handler
@type taxonomy: NcbiTaxonomy
@param logfile: file handler or file path to a log file
@type logfile: file | FileIO | StringIO | str
@param verbose: Not verbose means that only warnings and errors will be past to stream
@type verbose: bool
@param debug: Display debug messages
@type debug: bool
"""
super(TaxonomicProfile, self).__init__(label="TaxonomicProfile", logfile=logfile, verbose=verbose, debug=debug)
self._ranks = ['superkingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species', 'strain']
assert isinstance(taxonomy, NcbiTaxonomy)
self._taxonomy = taxonomy
self._filename_taxonomic_profile = "taxonomic_profile_{sample_index}.txt"
def write_taxonomic_profile_from_abundance_files(
self, metadata_table, list_of_file_paths, directory_output, sample_id=""):
"""
Write a taxonomic profile file for each relative abundance file
@param metadata_table: Contains metadata of all communities
@type metadata_table: MetadataTable
@param list_of_file_paths: List of abundance file paths
@type list_of_file_paths: list[str | unicode]
@param directory_output: Profiles are written in this directory
@type directory_output: str | unicode
@param sample_id: Identifier of a sample
@type sample_id: str | unicode
"""
metadata_table_tmp = MetadataTable(logfile=self._logfile, verbose=self._verbose)
for index_abundance, file_path in enumerate(list_of_file_paths):
community_abundance = metadata_table_tmp.parse_file(file_path, column_names=False)
file_path_output = os.path.join(directory_output, self._filename_taxonomic_profile.format(
sample_index=index_abundance))
with open(file_path_output, 'w') as stream_output:
self.write_taxonomic_profile(
community_abundance,
stream_output,
metadata_table,
sample_id)
def write_taxonomic_profile(self, community_abundance, stream_output, metadata_table, sample_id=""):
"""
Stream a taxonomic profile by list of relative abundances
@param community_abundance: list of relative abundances
@type community_abundance: generator[ dict[int|long|str|unicode, str|unicode] ]
@param stream_output: Output of taxonomic profile
@type stream_output: file | FileIO | StringIO
@param metadata_table: Contains metadata of all communities
@type metadata_table: MetadataTable
@param sample_id: Identifier of a sample
@type sample_id: str | unicode
"""
assert isinstance(metadata_table, MetadataTable)
genome_abundance = {}
total_abundance = 0.0
# for community in community_abundance:
# all_communities += community
for genome_id, abundance in community_abundance:
if genome_id in genome_abundance:
raise IOError("genome id '{}' is not unique!".format(genome_id))
genome_abundance[genome_id] = float(abundance) # *float(total_length)
total_abundance += genome_abundance[genome_id]
for key, value in genome_abundance.items():
genome_abundance[key] = value / total_abundance
self._stream_taxonomic_profile(stream_output, genome_abundance, metadata_table, sample_id)
def _stream_taxonomic_profile(self, stream_output, genome_id_to_percent, metadata_table, sample_id=""):
"""
Stream a taxonomic profile by list of percentages by genome id
@param stream_output: Output of taxonomic profile
@type stream_output: file | FileIO | StringIO
@param genome_id_to_percent: Percentage for each genome id
@type genome_id_to_percent: dict[str|unicode, float]
@param metadata_table: Contains metadata of all communities
@type metadata_table: MetadataTable
@param sample_id: Identifier of a sample
@type sample_id: str | unicode
"""
strain_id_to_genome_id = {}
genome_id_to_strain_id = {}
genome_id_to_taxid = metadata_table.get_map(key_column_name="genome_ID", value_column_name="NCBI_ID")
genome_id_to_otu = metadata_table.get_map(key_column_name="genome_ID", value_column_name="OTU")
column_genome_id = metadata_table.get_column("genome_ID")
if not metadata_table.has_column("strain_id"):
column_strain_id = metadata_table.get_empty_column()
else:
column_strain_id = metadata_table.get_column("strain_id")
genome_id_to_strain_id = metadata_table.get_map(key_column_name="genome_ID", value_column_name="strain_id")
genome_id_to_lineage = self._get_genome_id_to_lineage(
genome_id_to_percent.keys(), genome_id_to_taxid, strain_id_to_genome_id, genome_id_to_strain_id)
percent_by_rank_by_taxid = self._get_percent_by_rank_by_taxid(genome_id_to_lineage, genome_id_to_percent)
# add strain_id to metadata
#for row_index, genome_id in enumerate(column_genome_id):
# column_strain_id[row_index] = genome_id_to_strain_id[genome_id]
#assert len(column_strain_id) == len(set(column_strain_id))
#metadata_table.insert_column(column_strain_id, "strain_id")
# stream taxonomic profile
self._stream_tp_header(stream_output, sample_id)
self._stream_tp_rows(stream_output, percent_by_rank_by_taxid, strain_id_to_genome_id, genome_id_to_otu)
def _get_genome_id_to_lineage(
self, list_of_genome_id, genome_id_to_taxid, strain_id_to_genome_id, genome_id_to_strain_id):
"""
Returnes the lineage for each genome id, assigning new strain id if not available
@param list_of_genome_id: List of identifier of genomes
@type list_of_genome_id: list[str|unicode]
@param genome_id_to_taxid: Assigned taxid for each genome id
@type genome_id_to_taxid: dict[str|unicode, str|unicode]
@param strain_id_to_genome_id: Mapping from strain id to genome id
@type strain_id_to_genome_id: dict[str|unicode, str|unicode]
@param genome_id_to_strain_id: Mapping from genome id to strain id
@type genome_id_to_strain_id: dict[str|unicode, str|unicode]
@return: lineage for each genome id using genome id as key
@rtype: dict[str|unicode, list[None|str|unicode]]
"""
strains_by_taxid = {}
genome_id_to_lineage = {}
for genome_id in list_of_genome_id:
tax_id = genome_id_to_taxid[genome_id]
if tax_id == "":
raise KeyError("genome_ID '{}' has no taxid!".format(genome_id))
tax_id = self._taxonomy.get_updated_taxid(tax_id)
genome_id_to_lineage[genome_id] = self._taxonomy.get_lineage_of_legal_ranks(
tax_id, ranks=self._ranks, default_value=None)
if genome_id_to_lineage[genome_id][-1] is not None:
continue
if tax_id not in strains_by_taxid:
strains_by_taxid[tax_id] = 0
strains_by_taxid[tax_id] += 1
if genome_id in genome_id_to_strain_id and genome_id_to_strain_id[genome_id]:
strain_id = genome_id_to_strain_id[genome_id]
else:
strain_id = "{}.{}".format(tax_id, strains_by_taxid[tax_id])
# make sure assigned strain ids are unique, in case of previous assigned ids
while strain_id in genome_id_to_strain_id.values():
strains_by_taxid[tax_id] += 1
strain_id = "{}.{}".format(tax_id, strains_by_taxid[tax_id])
genome_id_to_strain_id[genome_id] = strain_id
genome_id_to_lineage[genome_id][-1] = strain_id
strain_id_to_genome_id[strain_id] = genome_id
return genome_id_to_lineage
def _get_percent_by_rank_by_taxid(self, genome_id_to_lineage, genome_id_to_percent):
"""
Return the percentage for each taxid of a list of default ranks
@param genome_id_to_lineage: Mapping from genome id to a lineage (list)
@type genome_id_to_lineage: dict[str|unicode, list[None|str|unicode]]
@param genome_id_to_percent: Mapping from genome id to percentage
@type genome_id_to_percent: dict[str|unicode, float]
@return: Percentage for each taxid of a list of default ranks as dictionary of dictionaries
@rtype: dict[str|unicode, dict[str|unicode, float]]
"""
percent_by_rank_by_taxid = {}
for rank in self._ranks:
percent_by_rank_by_taxid[rank] = dict()
for rank_index, rank in enumerate(self._ranks):
# rank = ranks[rank_index]
for genome_id in genome_id_to_lineage:
tax_id = genome_id_to_lineage[genome_id][rank_index]
if tax_id is None:
continue
percent = genome_id_to_percent[genome_id]
if tax_id not in percent_by_rank_by_taxid[rank]:
percent_by_rank_by_taxid[rank][tax_id] = 0
percent_by_rank_by_taxid[rank][tax_id] += percent
return percent_by_rank_by_taxid
def _stream_tp_rows(self, stream_output, percent_by_rank_by_taxid, strain_id_to_genome_id, genome_id_to_otu):
"""
Stream the rows of the taxonomic profile.
@param stream_output: Output of taxonomic profile
@type stream_output: file | FileIO | StringIO
@param percent_by_rank_by_taxid: Percentage for each taxid of a list of default ranks as dictionary of dictionaries
@type percent_by_rank_by_taxid: dict[str|unicode, dict[str|unicode, float]]
@param strain_id_to_genome_id: Map from strain id to a genome identifier
@type strain_id_to_genome_id: dict[str|unicode, str|unicode]
@param genome_id_to_otu: Map from genome id to an otu identifier
@type genome_id_to_otu: dict[str|unicode, str|unicode]
"""
row_format = "{taxid}\t{rank}\t{taxpath}\t{taxpath_sn}\t{abp:.4f}\t{gid}\t{otu}\n"
for rank_index, rank in enumerate(self._ranks):
for tax_id in percent_by_rank_by_taxid[rank]:
if tax_id == '':
self._logger.warning("Missing rank %s for a genome" % rank)
continue
if '.' in tax_id:
genome_id = strain_id_to_genome_id[tax_id]
otu = genome_id_to_otu[genome_id]
lineage = self._taxonomy.get_lineage_of_legal_ranks(tax_id.split('.')[0], ranks=self._ranks, default_value="")
lineage[-1] = tax_id
else:
genome_id = ""
otu = ""
lineage = self._taxonomy.get_lineage_of_legal_ranks(tax_id, ranks=self._ranks, default_value="")
lineage = lineage[:rank_index+1]
lineage_sn = [self._taxonomy.get_scientific_name(tid) if tid != "" and '.' not in tid else "" for tid in lineage]
if '.' in tax_id:
lineage_sn[-1] = self._taxonomy.get_scientific_name(tax_id.split('.')[0]) + " strain" # ""
if percent_by_rank_by_taxid[rank][tax_id] != 0:
stream_output.write(row_format.format(
taxid=tax_id,
rank=rank,
taxpath="|".join(lineage),
taxpath_sn="|".join(lineage_sn),
abp=percent_by_rank_by_taxid[rank][tax_id]*100,
gid=genome_id,
otu=otu
))
def _stream_tp_header(self, output_stream, identifier):
"""
Stream the header of the taxonomic profile.
@param output_stream: Output of taxonomic profile
@type output_stream: file | FileIO | StringIO
@param identifier: Identifier of a sample
@type identifier: str | unicode
"""
output_stream.write("@SampleID:{}\n".format(identifier))
output_stream.write("@Version:{}\n".format(self._taxonomic_profile_version))
output_stream.write("@Ranks:{ranks}\n\n".format(ranks="|".join(self._ranks)))
output_stream.write("@@TAXID\tRANK\tTAXPATH\tTAXPATHSN\tPERCENTAGE\t_CAMI_genomeID\t_CAMI_OTU\n")
| scripts/ComunityDesign/taxonomicprofile.py | 13,031 | Constructing taxonomic profiles from files with relative abundances.
@param taxonomy: taxonomy handler
@type taxonomy: NcbiTaxonomy
@param logfile: file handler or file path to a log file
@type logfile: file | FileIO | StringIO | str
@param verbose: Not verbose means that only warnings and errors will be past to stream
@type verbose: bool
@param debug: Display debug messages
@type debug: bool
Returnes the lineage for each genome id, assigning new strain id if not available
@param list_of_genome_id: List of identifier of genomes
@type list_of_genome_id: list[str|unicode]
@param genome_id_to_taxid: Assigned taxid for each genome id
@type genome_id_to_taxid: dict[str|unicode, str|unicode]
@param strain_id_to_genome_id: Mapping from strain id to genome id
@type strain_id_to_genome_id: dict[str|unicode, str|unicode]
@param genome_id_to_strain_id: Mapping from genome id to strain id
@type genome_id_to_strain_id: dict[str|unicode, str|unicode]
@return: lineage for each genome id using genome id as key
@rtype: dict[str|unicode, list[None|str|unicode]]
Return the percentage for each taxid of a list of default ranks
@param genome_id_to_lineage: Mapping from genome id to a lineage (list)
@type genome_id_to_lineage: dict[str|unicode, list[None|str|unicode]]
@param genome_id_to_percent: Mapping from genome id to percentage
@type genome_id_to_percent: dict[str|unicode, float]
@return: Percentage for each taxid of a list of default ranks as dictionary of dictionaries
@rtype: dict[str|unicode, dict[str|unicode, float]]
Stream a taxonomic profile by list of percentages by genome id
@param stream_output: Output of taxonomic profile
@type stream_output: file | FileIO | StringIO
@param genome_id_to_percent: Percentage for each genome id
@type genome_id_to_percent: dict[str|unicode, float]
@param metadata_table: Contains metadata of all communities
@type metadata_table: MetadataTable
@param sample_id: Identifier of a sample
@type sample_id: str | unicode
Stream the header of the taxonomic profile.
@param output_stream: Output of taxonomic profile
@type output_stream: file | FileIO | StringIO
@param identifier: Identifier of a sample
@type identifier: str | unicode
Stream the rows of the taxonomic profile.
@param stream_output: Output of taxonomic profile
@type stream_output: file | FileIO | StringIO
@param percent_by_rank_by_taxid: Percentage for each taxid of a list of default ranks as dictionary of dictionaries
@type percent_by_rank_by_taxid: dict[str|unicode, dict[str|unicode, float]]
@param strain_id_to_genome_id: Map from strain id to a genome identifier
@type strain_id_to_genome_id: dict[str|unicode, str|unicode]
@param genome_id_to_otu: Map from genome id to an otu identifier
@type genome_id_to_otu: dict[str|unicode, str|unicode]
Stream a taxonomic profile by list of relative abundances
@param community_abundance: list of relative abundances
@type community_abundance: generator[ dict[int|long|str|unicode, str|unicode] ]
@param stream_output: Output of taxonomic profile
@type stream_output: file | FileIO | StringIO
@param metadata_table: Contains metadata of all communities
@type metadata_table: MetadataTable
@param sample_id: Identifier of a sample
@type sample_id: str | unicode
Write a taxonomic profile file for each relative abundance file
@param metadata_table: Contains metadata of all communities
@type metadata_table: MetadataTable
@param list_of_file_paths: List of abundance file paths
@type list_of_file_paths: list[str | unicode]
@param directory_output: Profiles are written in this directory
@type directory_output: str | unicode
@param sample_id: Identifier of a sample
@type sample_id: str | unicode
for community in community_abundance: all_communities += community *float(total_length) add strain_id to metadatafor row_index, genome_id in enumerate(column_genome_id): column_strain_id[row_index] = genome_id_to_strain_id[genome_id]assert len(column_strain_id) == len(set(column_strain_id))metadata_table.insert_column(column_strain_id, "strain_id") stream taxonomic profile make sure assigned strain ids are unique, in case of previous assigned ids rank = ranks[rank_index] "" | 4,154 | en | 0.44997 |
"""
Support for interface with a Bose Soundtouch.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.soundtouch/
"""
import logging
import re
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.media_player import (
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PREVIOUS_TRACK,
SUPPORT_TURN_OFF, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_STEP,
SUPPORT_VOLUME_SET, SUPPORT_TURN_ON, SUPPORT_PLAY, MediaPlayerDevice,
PLATFORM_SCHEMA)
from homeassistant.const import (CONF_HOST, CONF_NAME, STATE_OFF, CONF_PORT,
STATE_PAUSED, STATE_PLAYING,
STATE_UNAVAILABLE)
REQUIREMENTS = ['libsoundtouch==0.7.2']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'media_player'
SERVICE_PLAY_EVERYWHERE = 'soundtouch_play_everywhere'
SERVICE_CREATE_ZONE = 'soundtouch_create_zone'
SERVICE_ADD_ZONE_SLAVE = 'soundtouch_add_zone_slave'
SERVICE_REMOVE_ZONE_SLAVE = 'soundtouch_remove_zone_slave'
MAP_STATUS = {
"PLAY_STATE": STATE_PLAYING,
"BUFFERING_STATE": STATE_PLAYING,
"PAUSE_STATE": STATE_PAUSED,
"STOP_STATE": STATE_OFF
}
DATA_SOUNDTOUCH = "soundtouch"
SOUNDTOUCH_PLAY_EVERYWHERE = vol.Schema({
vol.Required('master'): cv.entity_id
})
SOUNDTOUCH_CREATE_ZONE_SCHEMA = vol.Schema({
vol.Required('master'): cv.entity_id,
vol.Required('slaves'): cv.entity_ids
})
SOUNDTOUCH_ADD_ZONE_SCHEMA = vol.Schema({
vol.Required('master'): cv.entity_id,
vol.Required('slaves'): cv.entity_ids
})
SOUNDTOUCH_REMOVE_ZONE_SCHEMA = vol.Schema({
vol.Required('master'): cv.entity_id,
vol.Required('slaves'): cv.entity_ids
})
DEFAULT_NAME = 'Bose Soundtouch'
DEFAULT_PORT = 8090
SUPPORT_SOUNDTOUCH = SUPPORT_PAUSE | SUPPORT_VOLUME_STEP | \
SUPPORT_VOLUME_MUTE | SUPPORT_PREVIOUS_TRACK | \
SUPPORT_NEXT_TRACK | SUPPORT_TURN_OFF | \
SUPPORT_VOLUME_SET | SUPPORT_TURN_ON | SUPPORT_PLAY
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Bose Soundtouch platform."""
if DATA_SOUNDTOUCH not in hass.data:
hass.data[DATA_SOUNDTOUCH] = []
if discovery_info:
host = discovery_info['host']
port = int(discovery_info['port'])
# if device already exists by config
if host in [device.config['host'] for device in
hass.data[DATA_SOUNDTOUCH]]:
return
remote_config = {
'id': 'ha.component.soundtouch',
'host': host,
'port': port
}
soundtouch_device = SoundTouchDevice(None, remote_config)
hass.data[DATA_SOUNDTOUCH].append(soundtouch_device)
add_devices([soundtouch_device])
else:
name = config.get(CONF_NAME)
remote_config = {
'id': 'ha.component.soundtouch',
'port': config.get(CONF_PORT),
'host': config.get(CONF_HOST)
}
soundtouch_device = SoundTouchDevice(name, remote_config)
hass.data[DATA_SOUNDTOUCH].append(soundtouch_device)
add_devices([soundtouch_device])
def service_handle(service):
"""Handle the applying of a service."""
master_device_id = service.data.get('master')
slaves_ids = service.data.get('slaves')
slaves = []
if slaves_ids:
slaves = [device for device in hass.data[DATA_SOUNDTOUCH] if
device.entity_id in slaves_ids]
master = next([device for device in hass.data[DATA_SOUNDTOUCH] if
device.entity_id == master_device_id].__iter__(), None)
if master is None:
_LOGGER.warning("Unable to find master with entity_id: %s",
str(master_device_id))
return
if service.service == SERVICE_PLAY_EVERYWHERE:
slaves = [d for d in hass.data[DATA_SOUNDTOUCH] if
d.entity_id != master_device_id]
master.create_zone(slaves)
elif service.service == SERVICE_CREATE_ZONE:
master.create_zone(slaves)
elif service.service == SERVICE_REMOVE_ZONE_SLAVE:
master.remove_zone_slave(slaves)
elif service.service == SERVICE_ADD_ZONE_SLAVE:
master.add_zone_slave(slaves)
hass.services.register(DOMAIN, SERVICE_PLAY_EVERYWHERE,
service_handle,
schema=SOUNDTOUCH_PLAY_EVERYWHERE)
hass.services.register(DOMAIN, SERVICE_CREATE_ZONE,
service_handle,
schema=SOUNDTOUCH_CREATE_ZONE_SCHEMA)
hass.services.register(DOMAIN, SERVICE_REMOVE_ZONE_SLAVE,
service_handle,
schema=SOUNDTOUCH_REMOVE_ZONE_SCHEMA)
hass.services.register(DOMAIN, SERVICE_ADD_ZONE_SLAVE,
service_handle,
schema=SOUNDTOUCH_ADD_ZONE_SCHEMA)
class SoundTouchDevice(MediaPlayerDevice):
"""Representation of a SoundTouch Bose device."""
def __init__(self, name, config):
"""Create Soundtouch Entity."""
from libsoundtouch import soundtouch_device
self._device = soundtouch_device(config['host'], config['port'])
if name is None:
self._name = self._device.config.name
else:
self._name = name
self._status = self._device.status()
self._volume = self._device.volume()
self._config = config
@property
def config(self):
"""Return specific soundtouch configuration."""
return self._config
@property
def device(self):
"""Return Soundtouch device."""
return self._device
def update(self):
"""Retrieve the latest data."""
self._status = self._device.status()
self._volume = self._device.volume()
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume.actual / 100
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self._status.source == 'STANDBY':
return STATE_OFF
return MAP_STATUS.get(self._status.play_status, STATE_UNAVAILABLE)
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._volume.muted
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_SOUNDTOUCH
def turn_off(self):
"""Turn off media player."""
self._device.power_off()
self._status = self._device.status()
def turn_on(self):
"""Turn on media player."""
self._device.power_on()
self._status = self._device.status()
def volume_up(self):
"""Volume up the media player."""
self._device.volume_up()
self._volume = self._device.volume()
def volume_down(self):
"""Volume down media player."""
self._device.volume_down()
self._volume = self._device.volume()
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._device.set_volume(int(volume * 100))
self._volume = self._device.volume()
def mute_volume(self, mute):
"""Send mute command."""
self._device.mute()
self._volume = self._device.volume()
def media_play_pause(self):
"""Simulate play pause media player."""
self._device.play_pause()
self._status = self._device.status()
def media_play(self):
"""Send play command."""
self._device.play()
self._status = self._device.status()
def media_pause(self):
"""Send media pause command to media player."""
self._device.pause()
self._status = self._device.status()
def media_next_track(self):
"""Send next track command."""
self._device.next_track()
self._status = self._device.status()
def media_previous_track(self):
"""Send the previous track command."""
self._device.previous_track()
self._status = self._device.status()
@property
def media_image_url(self):
"""Image url of current playing media."""
return self._status.image
@property
def media_title(self):
"""Title of current playing media."""
if self._status.station_name is not None:
return self._status.station_name
elif self._status.artist is not None:
return self._status.artist + " - " + self._status.track
return None
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self._status.duration
@property
def media_artist(self):
"""Artist of current playing media."""
return self._status.artist
@property
def media_track(self):
"""Artist of current playing media."""
return self._status.track
@property
def media_album_name(self):
"""Album name of current playing media."""
return self._status.album
def play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
_LOGGER.debug("Starting media with media_id: " + str(media_id))
if re.match(r'http://', str(media_id)):
# URL
_LOGGER.debug("Playing URL %s", str(media_id))
self._device.play_url(str(media_id))
else:
# Preset
presets = self._device.presets()
preset = next([preset for preset in presets if
preset.preset_id == str(media_id)].__iter__(), None)
if preset is not None:
_LOGGER.debug("Playing preset: " + preset.name)
self._device.select_preset(preset)
else:
_LOGGER.warning(
"Unable to find preset with id " + str(media_id))
def create_zone(self, slaves):
"""
Create a zone (multi-room) and play on selected devices.
:param slaves: slaves on which to play
"""
if not slaves:
_LOGGER.warning("Unable to create zone without slaves")
else:
_LOGGER.info(
"Creating zone with master " + str(self.device.config.name))
self.device.create_zone([slave.device for slave in slaves])
def remove_zone_slave(self, slaves):
"""
Remove slave(s) from and existing zone (multi-room).
Zone must already exist and slaves array can not be empty.
Note: If removing last slave, the zone will be deleted and you'll have
to create a new one. You will not be able to add a new slave anymore
:param slaves: slaves to remove from the zone
"""
if not slaves:
_LOGGER.warning("Unable to find slaves to remove")
else:
_LOGGER.info("Removing slaves from zone with master " +
str(self.device.config.name))
self.device.remove_zone_slave([slave.device for slave in slaves])
def add_zone_slave(self, slaves):
"""
Add slave(s) to and existing zone (multi-room).
Zone must already exist and slaves array can not be empty.
:param slaves:slaves to add
"""
if not slaves:
_LOGGER.warning("Unable to find slaves to add")
else:
_LOGGER.info(
"Adding slaves to zone with master " + str(
self.device.config.name))
self.device.add_zone_slave([slave.device for slave in slaves])
| homeassistant/components/media_player/soundtouch.py | 12,058 | Representation of a SoundTouch Bose device.
Create Soundtouch Entity.
Add slave(s) to and existing zone (multi-room).
Zone must already exist and slaves array can not be empty.
:param slaves:slaves to add
Return specific soundtouch configuration.
Create a zone (multi-room) and play on selected devices.
:param slaves: slaves on which to play
Return Soundtouch device.
Boolean if volume is currently muted.
Album name of current playing media.
Artist of current playing media.
Duration of current playing media in seconds.
Image url of current playing media.
Send next track command.
Send media pause command to media player.
Send play command.
Simulate play pause media player.
Send the previous track command.
Title of current playing media.
Artist of current playing media.
Send mute command.
Return the name of the device.
Play a piece of media.
Remove slave(s) from and existing zone (multi-room).
Zone must already exist and slaves array can not be empty.
Note: If removing last slave, the zone will be deleted and you'll have
to create a new one. You will not be able to add a new slave anymore
:param slaves: slaves to remove from the zone
Handle the applying of a service.
Set volume level, range 0..1.
Set up the Bose Soundtouch platform.
Return the state of the device.
Flag media player features that are supported.
Turn off media player.
Turn on media player.
Retrieve the latest data.
Volume down media player.
Volume level of the media player (0..1).
Volume up the media player.
Support for interface with a Bose Soundtouch.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.soundtouch/
if device already exists by config URL Preset | 1,731 | en | 0.891168 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
# The MIT License
# Copyright (c) 2017 - 2021 Tammo Ippen, tammo.ippen@posteo.de
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from plotille import Canvas
# The underlying canvas-implementation can be used on its own.
def main():
c = Canvas(width=40, height=20)
c.rect(0.1, 0.1, 0.6, 0.6)
c.line(0.1, 0.1, 0.6, 0.6)
c.line(0.1, 0.6, 0.6, 0.1)
c.line(0.1, 0.6, 0.35, 0.8)
c.line(0.35, 0.8, 0.6, 0.6)
c.text(0.3, 0.5, 'hi', color='red')
c.point(0.35, 0.35, color='blue')
c.fill_char(0.35, 0.1)
print(c.plot())
if __name__ == '__main__':
main()
| examples/house_example.py | 1,709 | -*- coding: utf-8 -*- The MIT License Copyright (c) 2017 - 2021 Tammo Ippen, tammo.ippen@posteo.de Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. The underlying canvas-implementation can be used on its own. | 1,180 | en | 0.861182 |
#!/usr/bin/env python
import random
import unittest
from kaldi.base.io import istringstream, ostringstream
from kaldi.cudamatrix import cuda_available, approx_equal_cu_matrix, CuMatrix
from kaldi.matrix import Matrix, Vector
from kaldi.matrix.functions import approx_equal
from kaldi.nnet3 import *
class TestNnetCompute(unittest.TestCase):
def test_nnet_compute(self):
gen_config = NnetGenerationOptions()
test_collapse_model = random.choice([True, False])
configs = generate_config_sequence(gen_config)
nnet = Nnet()
for j, config in enumerate(configs):
# print("Input config[{}]:".format(j))
# print(config)
istrm = istringstream.from_str(config)
nnet.read_config(istrm)
request = ComputationRequest()
inputs = compute_example_computation_request_simple(nnet, request)
if test_collapse_model:
set_batchnorm_test_mode(True, nnet)
set_dropout_test_mode(True, nnet)
compiler = Compiler(request, nnet)
opts = CompilerOptions()
computation = compiler.create_computation(opts)
nnet_collapsed = Nnet.from_other(nnet)
if test_collapse_model:
collapse_config = CollapseModelConfig()
collapse_model(collapse_config, nnet_collapsed)
compiler_collapsed = Compiler(request, nnet_collapsed)
computation_collapsed = compiler_collapsed.create_computation(opts)
computation_collapsed.compute_cuda_indexes()
ostrm = ostringstream()
computation.print_computation(ostrm, nnet)
# print("Generated computation:")
# print(ostrm.to_str())
check_config = CheckComputationOptions()
check_config.check_rewrite = True
checker = ComputationChecker(check_config, nnet, computation)
checker.check()
if random.choice([True, False]):
opt_config = NnetOptimizeOptions()
optimize(opt_config, nnet, max_output_time_in_request(request),
computation)
ostrm = ostringstream()
computation.print_computation(ostrm, nnet)
# print("Optimized computation:")
# print(ostrm.to_str())
compute_opts = NnetComputeOptions()
compute_opts.debug = random.choice([True, False])
computation.compute_cuda_indexes()
computer = NnetComputer(compute_opts, computation, nnet, nnet)
for i, ispec in enumerate(request.inputs):
temp = CuMatrix.from_matrix(inputs[i])
print("Input sum:", temp.sum())
computer.accept_input(ispec.name, temp)
computer.run()
output = computer.get_output_destructive("output")
print("Output sum:", output.sum())
if test_collapse_model:
computer_collapsed = NnetComputer(compute_opts,
computation_collapsed,
nnet_collapsed, nnet_collapsed)
for i, ispec in enumerate(request.inputs):
temp = CuMatrix.from_matrix(inputs[i])
computer_collapsed.accept_input(ispec.name, temp)
computer_collapsed.run()
output_collapsed = computer_collapsed.get_output_destructive("output")
print("Output sum [collapsed]:", output_collapsed.sum())
self.assertTrue(approx_equal_cu_matrix(output, output_collapsed),
"Regular and collapsed computation outputs differ.")
output_deriv = CuMatrix.from_size(output.num_rows(), output.num_cols())
output_deriv.set_randn()
if request.outputs[0].has_deriv:
computer.accept_input("output", output_deriv)
computer.run()
for i, ispec in enumerate(request.inputs):
if ispec.has_deriv:
in_deriv = computer.get_output_destructive(ispec.name)
print("Input-deriv sum for input {} is:".format(ispec.name),
in_deriv.sum())
def test_nnet_decodable(self):
gen_config = NnetGenerationOptions()
configs = generate_config_sequence(gen_config)
nnet = Nnet()
for j, config in enumerate(configs):
# print("Input config[{}]:".format(j))
# print(config)
istrm = istringstream.from_str(config)
nnet.read_config(istrm)
num_frames = 5 + random.randint(1, 100)
input_dim = nnet.input_dim("input")
output_dim = nnet.output_dim("output")
ivector_dim = max(0, nnet.input_dim("ivector"))
input = Matrix(num_frames, input_dim)
set_batchnorm_test_mode(True, nnet)
set_dropout_test_mode(True, nnet)
input.set_randn_()
ivector = Vector(ivector_dim)
ivector.set_randn_()
priors = Vector(output_dim if random.choice([True, False]) else 0)
if len(priors) != 0:
priors.set_randn_()
priors.apply_exp_()
output1 = Matrix(num_frames, output_dim)
output2 = Matrix(num_frames, output_dim)
opts = NnetSimpleComputationOptions()
opts.frames_per_chunk = random.randint(5, 25)
compiler = CachingOptimizingCompiler(nnet)
decodable = DecodableNnetSimple(opts, nnet, priors, input, compiler,
ivector if ivector_dim else None)
for t in range(num_frames):
decodable.get_output_for_frame(t, output1[t])
opts = NnetSimpleLoopedComputationOptions()
info = DecodableNnetSimpleLoopedInfo.from_priors(opts, priors, nnet)
decodable = DecodableNnetSimpleLooped(info, input,
ivector if ivector_dim else None)
for t in range(num_frames):
decodable.get_output_for_frame(t, output2[t])
if (not nnet_is_recurrent(nnet)
and nnet.info().find("statistics-extraction") == -1
and nnet.info().find("TimeHeightConvolutionComponent") == -1
and nnet.info().find("RestrictedAttentionComponent") == -1):
for t in range(num_frames):
self.assertTrue(approx_equal(output1[t], output2[t]))
if __name__ == '__main__':
for i in range(2):
if cuda_available():
from kaldi.cudamatrix import CuDevice
CuDevice.instantiate().set_debug_stride_mode(True)
if i == 0:
CuDevice.instantiate().select_gpu_id("no")
else:
CuDevice.instantiate().select_gpu_id("yes")
unittest.main(exit=False)
| tests/nnet3/nnet-compute-test.py | 6,662 | !/usr/bin/env python print("Input config[{}]:".format(j)) print(config) print("Generated computation:") print(ostrm.to_str()) print("Optimized computation:") print(ostrm.to_str()) print("Input config[{}]:".format(j)) print(config) | 230 | en | 0.104727 |
# this is here to avoid a circular import
from collections import namedtuple
class Point(namedtuple("Point", ["x", "y", "group", "fid"])):
@property
def __geo_interface__(self):
return {"type": "Point", "coordinates": (self.x, self.y)}
def as_feature(self):
geometry = self.__geo_interface__
properties = {"group": self.group, "fid": self.fid}
return {"type": "Feature", "properties": properties, "geometry": geometry}
| dorchester/point.py | 466 | this is here to avoid a circular import | 39 | en | 0.849323 |
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class AddressTokensTransactionUnconfirmedOmnilayertoken(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'name': (str,), # noqa: E501
'property_id': (str,), # noqa: E501
'transaction_type': (str,), # noqa: E501
'created_by_transaction_id': (str,), # noqa: E501
'amount': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'name': 'name', # noqa: E501
'property_id': 'propertyId', # noqa: E501
'transaction_type': 'transactionType', # noqa: E501
'created_by_transaction_id': 'createdByTransactionId', # noqa: E501
'amount': 'amount', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, name, property_id, transaction_type, created_by_transaction_id, amount, *args, **kwargs): # noqa: E501
"""AddressTokensTransactionUnconfirmedOmnilayertoken - a model defined in OpenAPI
Args:
name (str): Specifies the name of the token.
property_id (str): Defines the ID of the property for Omni Layer.
transaction_type (str): Defines the type of the transaction made.
created_by_transaction_id (str): The transaction ID used to create the token.
amount (str): Defines the amount of tokens sent with the transaction that is pending confirmation.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.name = name
self.property_id = property_id
self.transaction_type = transaction_type
self.created_by_transaction_id = created_by_transaction_id
self.amount = amount
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| cryptoapis/model/address_tokens_transaction_unconfirmed_omnilayertoken.py | 8,057 | NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
AddressTokensTransactionUnconfirmedOmnilayertoken - a model defined in OpenAPI
Args:
name (str): Specifies the name of the token.
property_id (str): Defines the ID of the property for Omni Layer.
transaction_type (str): Defines the type of the transaction made.
created_by_transaction_id (str): The transaction ID used to create the token.
amount (str): Defines the amount of tokens sent with the transaction that is pending confirmation.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
noqa: F401 noqa: F401 noqa: F401 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 discard variable. | 4,490 | en | 0.777639 |
from typing import Union, List, Optional
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType
# This file is auto-generated by generate_schema so do not edit manually
# noinspection PyPep8Naming
class TestReport_TeardownSchema:
"""
A summary of information based on the results of executing a TestScript.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueQuantity",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
) -> Union[StructType, DataType]:
"""
A summary of information based on the results of executing a TestScript.
id: unique id for the element within a resource (for internal references). This
may be any string value that does not contain spaces.
extension: May be used to represent additional information that is not part of the basic
definition of the element. In order to make the use of extensions safe and
manageable, there is a strict set of governance applied to the definition and
use of extensions. Though any implementer is allowed to define an extension,
there is a set of requirements that SHALL be met as part of the definition of
the extension.
action: The teardown action will only contain an operation.
"""
from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.stu3.complex_types.testreport_action2 import (
TestReport_Action2Schema,
)
if (
max_recursion_limit
and nesting_list.count("TestReport_Teardown") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["TestReport_Teardown"]
schema = StructType(
[
# unique id for the element within a resource (for internal references). This
# may be any string value that does not contain spaces.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the element. In order to make the use of extensions safe and
# manageable, there is a strict set of governance applied to the definition and
# use of extensions. Though any implementer is allowed to define an extension,
# there is a set of requirements that SHALL be met as part of the definition of
# the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The teardown action will only contain an operation.
StructField(
"action",
ArrayType(
TestReport_Action2Schema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
return schema
| spark_fhir_schemas/stu3/complex_types/testreport_teardown.py | 5,245 | A summary of information based on the results of executing a TestScript.
A summary of information based on the results of executing a TestScript.
id: unique id for the element within a resource (for internal references). This
may be any string value that does not contain spaces.
extension: May be used to represent additional information that is not part of the basic
definition of the element. In order to make the use of extensions safe and
manageable, there is a strict set of governance applied to the definition and
use of extensions. Though any implementer is allowed to define an extension,
there is a set of requirements that SHALL be met as part of the definition of
the extension.
action: The teardown action will only contain an operation.
This file is auto-generated by generate_schema so do not edit manually noinspection PyPep8Naming noinspection PyDefaultArgument add my name to recursion list for later unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces. May be used to represent additional information that is not part of the basic definition of the element. In order to make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer is allowed to define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. The teardown action will only contain an operation. | 1,534 | en | 0.889939 |
from tracking.harvest import save_dfes_avl
from django.core.management.base import BaseCommand
import logging
LOGGER = logging.getLogger('tracking_points')
class Command(BaseCommand):
help = "Runs harvest_tracking_email to harvest points"
def handle(self, *args, **options):
LOGGER.info('Harvesting DFES feed')
try:
print("Harvested {} from DFES; created {}, updated {}, ingored {}; Earliest seen {}, Lastest seen {}.".format(*save_dfes_avl()))
#LOGGER.info("Updated {} of {} scanned DFES devices".format(updated, num_records))
except Exception as e:
LOGGER.error(e)
| tracking/management/commands/harvest_dfes_feed.py | 641 | LOGGER.info("Updated {} of {} scanned DFES devices".format(updated, num_records)) | 81 | en | 0.544475 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from .amp_type import AMP_TYPE
from colossalai.context import Config
import torch.nn as nn
from torch.optim import Optimizer
from torch.nn.modules.loss import _Loss
from .torch_amp import convert_to_torch_amp
from .apex_amp import convert_to_apex_amp
from .naive_amp import convert_to_naive_amp
def convert_to_amp(model: nn.Module, optimizer: Optimizer, criterion: _Loss, mode: AMP_TYPE, amp_config: Config = None):
"""A helper function to wrap training components with Torch AMP modules.
Args:
param model (:class:`torch.nn.Module`): your model object.
optimizer (:class:`torch.optim.Optimizer`): your optimizer object.
criterion (:class:`torch.nn.modules.loss._Loss`): your loss function object.
mode (:class:`colossalai.amp.AMP_TYPE`): amp mode.
amp_config (Union[:class:`colossalai.context.Config`, dict]): configuration for different amp modes.
Returns:
A tuple (model, optimizer, criterion).
Note:
``amp_config`` may vary from different mode you choose. You should check the corresponding amp mode
for more details about ``amp_config``.
For ``apex_amp``, please check
`apex_amp config <https://nvidia.github.io/apex/amp.html?highlight=apex%20amp>`_.
For ``naive_amp``, please check
`naive_amp config <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/amp/naive_amp/_fp16_optimizer.py#L42>`_.
For ``torch_amp``, please check
`torch_amp config <https://github.com/pytorch/pytorch/blob/master/torch/cuda/amp/grad_scaler.py#L97>`_.
"""
assert isinstance(mode, AMP_TYPE), \
f'expected the argument mode be AMP_TYPE, but got {type(mode)}'
if amp_config is None:
amp_config = Config()
if mode == AMP_TYPE.TORCH:
model, optimizer, criterion = convert_to_torch_amp(model, optimizer, criterion, amp_config)
elif mode == AMP_TYPE.APEX:
model, optimizer = convert_to_apex_amp(model, optimizer, amp_config)
elif mode == AMP_TYPE.NAIVE:
model, optimizer = convert_to_naive_amp(model, optimizer, amp_config)
return model, optimizer, criterion
| colossalai/amp/__init__.py | 2,198 | A helper function to wrap training components with Torch AMP modules.
Args:
param model (:class:`torch.nn.Module`): your model object.
optimizer (:class:`torch.optim.Optimizer`): your optimizer object.
criterion (:class:`torch.nn.modules.loss._Loss`): your loss function object.
mode (:class:`colossalai.amp.AMP_TYPE`): amp mode.
amp_config (Union[:class:`colossalai.context.Config`, dict]): configuration for different amp modes.
Returns:
A tuple (model, optimizer, criterion).
Note:
``amp_config`` may vary from different mode you choose. You should check the corresponding amp mode
for more details about ``amp_config``.
For ``apex_amp``, please check
`apex_amp config <https://nvidia.github.io/apex/amp.html?highlight=apex%20amp>`_.
For ``naive_amp``, please check
`naive_amp config <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/amp/naive_amp/_fp16_optimizer.py#L42>`_.
For ``torch_amp``, please check
`torch_amp config <https://github.com/pytorch/pytorch/blob/master/torch/cuda/amp/grad_scaler.py#L97>`_.
!/usr/bin/env python -*- encoding: utf-8 -*- | 1,130 | en | 0.47382 |
from collections import defaultdict
import pandas as pd
import pickle
from sqlalchemy import create_engine, inspect, Table, Column
from sqlalchemy.engine.url import make_url
from sys import exit
class DatabaseClient:
""" Takes care of the database pass opening to find the url and can query
the respected database.
Input:
dbpass_path path to the text file with the list of database urls
dbname database name so we know which database to query from the list
"""
def __init__(self, dbpass_path, dbname):
self.dbpass_path = dbpass_path
self.dbname = dbname
self.db_url = self.get_db_url()
self.engine = create_engine(self.db_url)
def get_db_url(self):
with open(self.dbpass_path, 'r') as infile:
db_names = []
for raw_url in infile.read().splitlines():
url_obj = make_url(raw_url)
if url_obj.database == self.dbname:
infile.close()
return raw_url
db_names.append(url_obj.database)
infile.close()
exit('database name does not exist in dbpass given:' + ', '.join(db_names))
def get_df_with_query(self, query):
""" WARNING :: Will crash if too large. If so, you should just create the df file
first via create_df_file(query=).
load example:
with open(input, 'rb') as infile:
objs = []
while True:
try:
obj = pickle.load(infile)
except EOFError:
break
...
"""
return pd.read_sql(query, self.engine)
def create_df_file_with_query(self, query, output):
""" Dumps in df in chunks to avoid crashes.
"""
chunk_size = 100000
offset = 0
data = defaultdict(lambda : defaultdict(list))
with open(output, 'wb') as outfile:
query = query.replace(';', '')
query += """ LIMIT {chunk_size} OFFSET {offset};"""
while True:
print(offset)
query = query.format(
chunk_size=chunk_size,
offset=offset
)
df = pd.read_sql(query, self.engine)
pickle.dump(df, outfile)
offset += chunk_size
if len(df) < chunk_size:
break
outfile.close()
| ilxutils/ilxutils/database_client.py | 2,525 | Takes care of the database pass opening to find the url and can query
the respected database.
Input:
dbpass_path path to the text file with the list of database urls
dbname database name so we know which database to query from the list
Dumps in df in chunks to avoid crashes.
WARNING :: Will crash if too large. If so, you should just create the df file
first via create_df_file(query=).
load example:
with open(input, 'rb') as infile:
objs = []
while True:
try:
obj = pickle.load(infile)
except EOFError:
break
... | 639 | en | 0.819679 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VpnSitesConfigurationOperations:
"""VpnSitesConfigurationOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _download_initial(
self,
resource_group_name: str,
virtual_wan_name: str,
request: "_models.GetVpnSitesConfigurationRequest",
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._download_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(request, 'GetVpnSitesConfigurationRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_download_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWANName}/vpnConfiguration'} # type: ignore
async def begin_download(
self,
resource_group_name: str,
virtual_wan_name: str,
request: "_models.GetVpnSitesConfigurationRequest",
**kwargs
) -> AsyncLROPoller[None]:
"""Gives the sas-url to download the configurations for vpn-sites in a resource group.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWAN for which configuration of all vpn-sites is
needed.
:type virtual_wan_name: str
:param request: Parameters supplied to download vpn-sites configuration.
:type request: ~azure.mgmt.network.v2020_06_01.models.GetVpnSitesConfigurationRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._download_initial(
resource_group_name=resource_group_name,
virtual_wan_name=virtual_wan_name,
request=request,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_download.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWANName}/vpnConfiguration'} # type: ignore
| sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_06_01/aio/operations/_vpn_sites_configuration_operations.py | 8,230 | VpnSitesConfigurationOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
coding=utf-8 -------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. -------------------------------------------------------------------------- type: ClsType[None] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: Dict[str, Any] type: ignore type: Union[bool, AsyncPollingMethod] type: ClsType[None] type: Optional[str] type: ignore | 1,224 | en | 0.593846 |
def testaArq(arq):
"""
-> Verifica se existe o arquivo arq
:arq: Nome do arquivo a ser testado.
:return: retorna True se o arquivo for encontrado,
caso contrário False
"""
try:
a = open(arq)
except FileNotFoundError: # O arquivo não foi encontrado
print('Arquivo não encontrado!')
return False
else:
return True
def criaArq(arq=''):
"""
-> Cria um arquivo de texto, caso ele não exista.
:param arq: Nome do arquivo.
:return:
"""
try:
a = open(arq, 'xt')
except FileExistsError:
print(f'ERRO: o arquivo \"{arq}\" já existe!')
else:
print(f'O arquivo \"{arq}\" foi criado com sucesso!')
finally:
a.close()
return
def leArq(arq=''):
"""
-> Abre e mostra os itens de um arquivo texto.
:param arq: Nome do arquivo.
:return:
"""
return
def editaArq(arq):
"""
-> Abre um arquivo de texto e adiciona novo item no
final do arquivo.
:param arq: Nome do arquivo.
:return:
"""
return
| bibli/arquivo/__init__.py | 1,093 | -> Cria um arquivo de texto, caso ele não exista.
:param arq: Nome do arquivo.
:return:
-> Abre um arquivo de texto e adiciona novo item no
final do arquivo.
:param arq: Nome do arquivo.
:return:
-> Abre e mostra os itens de um arquivo texto.
:param arq: Nome do arquivo.
:return:
-> Verifica se existe o arquivo arq
:arq: Nome do arquivo a ser testado.
:return: retorna True se o arquivo for encontrado,
caso contrário False
O arquivo não foi encontrado | 457 | pt | 0.983002 |
# -*- coding: utf-8 -*-
__author__ = 'abbot'
from selenium import webdriver
from selenium.webdriver import ActionChains
driver = webdriver.PhantomJS(executable_path='/Users/wangbo/Downloads/phantomjs-2.1.1-macosx/bin/phantomjs')
ac = driver.find_element_by_xpath('element')
ActionChains(driver).move_to_element(ac).perform()
ActionChains(driver).move_to_element(ac).click(ac).perform()
| selenium_test/action.py | 392 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A runnable program to evaluate video embeddings.
Given a model checkpoint, and the location of the shards for a dataset,
computes the performance of the Brave video embeddings. This code
may be used to evaluate both UCF101 and HMDB51, as long as they are both
given in the appropriate input format. The only hyperparameter to this program
is the svm_regularization constant, which can impact the performance of the
linear classification.
"""
import glob
import json
from absl import app
from absl import flags
import chex
import jax
import numpy as np
import tensorflow as tf
from brave.datasets import datasets
from brave.evaluate import evaluate_video_embedding
from brave.models.brave import brave
FLAGS = flags.FLAGS
flags.DEFINE_string('checkpoint_path', None, 'Checkpoint to evaluate.')
flags.DEFINE_integer('batch_size', None, 'The size of the batches to use.')
# Hyperparameters
flags.DEFINE_float('svm_regularization', None, 'Regularization constant.')
# Datasets
flags.DEFINE_string('train_dataset_shards', None,
'Glob pattern for train shards.')
flags.DEFINE_string('test_dataset_shards', None,
'Glob pattern for test shards.')
# Transformations to apply to video before running network.
flags.DEFINE_integer('num_video_frames', 32, 'Number of frames in eval videos.')
flags.DEFINE_integer('video_step', 2, 'The step to use in the eval videos.')
flags.DEFINE_integer('image_size', 224, 'The size of the video to evaluate.')
def main(_):
checkpoint_path = FLAGS.checkpoint_path
train_shards = glob.glob(FLAGS.train_dataset_shards)
test_shards = glob.glob(FLAGS.test_dataset_shards)
video_config = evaluate_video_embedding.VideoConfig(
num_frames=FLAGS.num_video_frames,
image_size=FLAGS.image_size,
video_step=FLAGS.video_step,
)
video_embedding_fn = _video_embedding(checkpoint_path)
results = evaluate_video_embedding.evaluate_video_embedding(
train_dataset_shards=train_shards,
test_dataset_shards=test_shards,
embedding_fn=video_embedding_fn,
config=video_config,
svm_regularization=FLAGS.svm_regularization,
batch_size=FLAGS.batch_size)
results_dct = dict(
top_1_train=results.train.top_one_accuracy,
top_5_train=results.train.top_five_accuracy,
top_1_test=results.test.top_one_accuracy,
top_5_test=results.test.top_five_accuracy,
)
# Write the results to stdout in a way that can be used as input to other
# programs.
print(json.dumps(results_dct))
def _video_embedding(checkpoint_path: str):
"""Load the video embedding for the BraVe model to evaluate."""
checkpoint = np.load(checkpoint_path, allow_pickle=True).item()
params = checkpoint['params']
state = checkpoint['state']
brave_config_dct = checkpoint['config']
brave_config = brave.BraveConfig(**brave_config_dct)
model = brave.get_model(brave_config)
@jax.jit
def embedding_fn(view: datasets.View) -> chex.Array:
narrow_forward_fn = model.forward_fns['narrow_video']
embedding, _ = narrow_forward_fn(params, state, None, view, False)
return embedding
def synchronous_embedding_fn(view: datasets.View) -> chex.Array:
# jax.jit causes the above function to be executed lazily, but we want
# to force the computation to happen synchronously.
return jax.device_get(embedding_fn(view))
return synchronous_embedding_fn
if __name__ == '__main__':
try:
tf.config.set_visible_devices([], 'GPU') # Prevent TF from using the GPU.
except tf.errors.NotFoundError:
pass
flags.mark_flag_as_required('checkpoint_path')
flags.mark_flag_as_required('batch_size')
flags.mark_flag_as_required('train_dataset_shards')
flags.mark_flag_as_required('test_dataset_shards')
flags.mark_flag_as_required('svm_regularization')
app.run(main)
| brave/evaluate_video_embeddings.py | 4,504 | Load the video embedding for the BraVe model to evaluate.
A runnable program to evaluate video embeddings.
Given a model checkpoint, and the location of the shards for a dataset,
computes the performance of the Brave video embeddings. This code
may be used to evaluate both UCF101 and HMDB51, as long as they are both
given in the appropriate input format. The only hyperparameter to this program
is the svm_regularization constant, which can impact the performance of the
linear classification.
Copyright 2021 DeepMind Technologies Limited Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== Hyperparameters Datasets Transformations to apply to video before running network. Write the results to stdout in a way that can be used as input to other programs. jax.jit causes the above function to be executed lazily, but we want to force the computation to happen synchronously. Prevent TF from using the GPU. | 1,458 | en | 0.859315 |
class DataGridViewAutoSizeColumnMode(Enum,IComparable,IFormattable,IConvertible):
"""
Defines values for specifying how the width of a column is adjusted.
enum DataGridViewAutoSizeColumnMode,values: AllCells (6),AllCellsExceptHeader (4),ColumnHeader (2),DisplayedCells (10),DisplayedCellsExceptHeader (8),Fill (16),None (1),NotSet (0)
"""
def Instance(self):
""" This function has been arbitrarily put into the stubs"""
return DataGridViewAutoSizeColumnMode()
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
AllCells=None
AllCellsExceptHeader=None
ColumnHeader=None
DisplayedCells=None
DisplayedCellsExceptHeader=None
Fill=None
None_ =None
NotSet=None
value__=None
| release/stubs.min/System/Windows/Forms/__init___parts/DataGridViewAutoSizeColumnMode.py | 1,374 | Defines values for specifying how the width of a column is adjusted.
enum DataGridViewAutoSizeColumnMode,values: AllCells (6),AllCellsExceptHeader (4),ColumnHeader (2),DisplayedCells (10),DisplayedCellsExceptHeader (8),Fill (16),None (1),NotSet (0)
This function has been arbitrarily put into the stubs
x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y
__format__(formattable: IFormattable,format: str) -> str
x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature | 633 | en | 0.418552 |
import contextlib
import os
import sys
import tracemalloc
import unittest
from unittest.mock import patch
from test.support.script_helper import (assert_python_ok, assert_python_failure,
interpreter_requires_environment)
from test import support
try:
import threading
except ImportError:
threading = None
try:
import _testcapi
except ImportError:
_testcapi = None
EMPTY_STRING_SIZE = sys.getsizeof(b'')
def get_frames(nframe, lineno_delta):
frames = []
frame = sys._getframe(1)
for index in range(nframe):
code = frame.f_code
lineno = frame.f_lineno + lineno_delta
frames.append((code.co_filename, lineno))
lineno_delta = 0
frame = frame.f_back
if frame is None:
break
return tuple(frames)
def allocate_bytes(size):
nframe = tracemalloc.get_traceback_limit()
bytes_len = (size - EMPTY_STRING_SIZE)
frames = get_frames(nframe, 1)
data = b'x' * bytes_len
return data, tracemalloc.Traceback(frames)
def create_snapshots():
traceback_limit = 2
# _tracemalloc._get_traces() returns a list of (domain, size,
# traceback_frames) tuples. traceback_frames is a tuple of (filename,
# line_number) tuples.
raw_traces = [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(1, 2, (('a.py', 5), ('b.py', 4))),
(2, 66, (('b.py', 1),)),
(3, 7, (('<unknown>', 0),)),
]
snapshot = tracemalloc.Snapshot(raw_traces, traceback_limit)
raw_traces2 = [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(2, 2, (('a.py', 5), ('b.py', 4))),
(2, 5000, (('a.py', 5), ('b.py', 4))),
(4, 400, (('c.py', 578),)),
]
snapshot2 = tracemalloc.Snapshot(raw_traces2, traceback_limit)
return (snapshot, snapshot2)
def frame(filename, lineno):
return tracemalloc._Frame((filename, lineno))
def traceback(*frames):
return tracemalloc.Traceback(frames)
def traceback_lineno(filename, lineno):
return traceback((filename, lineno))
def traceback_filename(filename):
return traceback_lineno(filename, 0)
class TestTracemallocEnabled(unittest.TestCase):
def setUp(self):
if tracemalloc.is_tracing():
self.skipTest("tracemalloc must be stopped before the test")
tracemalloc.start(1)
def tearDown(self):
tracemalloc.stop()
def test_get_tracemalloc_memory(self):
data = [allocate_bytes(123) for count in range(1000)]
size = tracemalloc.get_tracemalloc_memory()
self.assertGreaterEqual(size, 0)
tracemalloc.clear_traces()
size2 = tracemalloc.get_tracemalloc_memory()
self.assertGreaterEqual(size2, 0)
self.assertLessEqual(size2, size)
def test_get_object_traceback(self):
tracemalloc.clear_traces()
obj_size = 12345
obj, obj_traceback = allocate_bytes(obj_size)
traceback = tracemalloc.get_object_traceback(obj)
self.assertEqual(traceback, obj_traceback)
def test_set_traceback_limit(self):
obj_size = 10
tracemalloc.stop()
self.assertRaises(ValueError, tracemalloc.start, -1)
tracemalloc.stop()
tracemalloc.start(10)
obj2, obj2_traceback = allocate_bytes(obj_size)
traceback = tracemalloc.get_object_traceback(obj2)
self.assertEqual(len(traceback), 10)
self.assertEqual(traceback, obj2_traceback)
tracemalloc.stop()
tracemalloc.start(1)
obj, obj_traceback = allocate_bytes(obj_size)
traceback = tracemalloc.get_object_traceback(obj)
self.assertEqual(len(traceback), 1)
self.assertEqual(traceback, obj_traceback)
def find_trace(self, traces, traceback):
for trace in traces:
if trace[2] == traceback._frames:
return trace
self.fail("trace not found")
def test_get_traces(self):
tracemalloc.clear_traces()
obj_size = 12345
obj, obj_traceback = allocate_bytes(obj_size)
traces = tracemalloc._get_traces()
trace = self.find_trace(traces, obj_traceback)
self.assertIsInstance(trace, tuple)
domain, size, traceback = trace
self.assertEqual(size, obj_size)
self.assertEqual(traceback, obj_traceback._frames)
tracemalloc.stop()
self.assertEqual(tracemalloc._get_traces(), [])
def test_get_traces_intern_traceback(self):
# dummy wrappers to get more useful and identical frames in the traceback
def allocate_bytes2(size):
return allocate_bytes(size)
def allocate_bytes3(size):
return allocate_bytes2(size)
def allocate_bytes4(size):
return allocate_bytes3(size)
# Ensure that two identical tracebacks are not duplicated
tracemalloc.stop()
tracemalloc.start(4)
obj_size = 123
obj1, obj1_traceback = allocate_bytes4(obj_size)
obj2, obj2_traceback = allocate_bytes4(obj_size)
traces = tracemalloc._get_traces()
trace1 = self.find_trace(traces, obj1_traceback)
trace2 = self.find_trace(traces, obj2_traceback)
domain1, size1, traceback1 = trace1
domain2, size2, traceback2 = trace2
self.assertIs(traceback2, traceback1)
def test_get_traced_memory(self):
# Python allocates some internals objects, so the test must tolerate
# a small difference between the expected size and the real usage
max_error = 2048
# allocate one object
obj_size = 1024 * 1024
tracemalloc.clear_traces()
obj, obj_traceback = allocate_bytes(obj_size)
size, peak_size = tracemalloc.get_traced_memory()
self.assertGreaterEqual(size, obj_size)
self.assertGreaterEqual(peak_size, size)
self.assertLessEqual(size - obj_size, max_error)
self.assertLessEqual(peak_size - size, max_error)
# destroy the object
obj = None
size2, peak_size2 = tracemalloc.get_traced_memory()
self.assertLess(size2, size)
self.assertGreaterEqual(size - size2, obj_size - max_error)
self.assertGreaterEqual(peak_size2, peak_size)
# clear_traces() must reset traced memory counters
tracemalloc.clear_traces()
self.assertEqual(tracemalloc.get_traced_memory(), (0, 0))
# allocate another object
obj, obj_traceback = allocate_bytes(obj_size)
size, peak_size = tracemalloc.get_traced_memory()
self.assertGreaterEqual(size, obj_size)
# stop() also resets traced memory counters
tracemalloc.stop()
self.assertEqual(tracemalloc.get_traced_memory(), (0, 0))
def test_clear_traces(self):
obj, obj_traceback = allocate_bytes(123)
traceback = tracemalloc.get_object_traceback(obj)
self.assertIsNotNone(traceback)
tracemalloc.clear_traces()
traceback2 = tracemalloc.get_object_traceback(obj)
self.assertIsNone(traceback2)
def test_is_tracing(self):
tracemalloc.stop()
self.assertFalse(tracemalloc.is_tracing())
tracemalloc.start()
self.assertTrue(tracemalloc.is_tracing())
def test_snapshot(self):
obj, source = allocate_bytes(123)
# take a snapshot
snapshot = tracemalloc.take_snapshot()
# write on disk
snapshot.dump(support.TESTFN)
self.addCleanup(support.unlink, support.TESTFN)
# load from disk
snapshot2 = tracemalloc.Snapshot.load(support.TESTFN)
self.assertEqual(snapshot2.traces, snapshot.traces)
# tracemalloc must be tracing memory allocations to take a snapshot
tracemalloc.stop()
with self.assertRaises(RuntimeError) as cm:
tracemalloc.take_snapshot()
self.assertEqual(str(cm.exception),
"the tracemalloc module must be tracing memory "
"allocations to take a snapshot")
def test_snapshot_save_attr(self):
# take a snapshot with a new attribute
snapshot = tracemalloc.take_snapshot()
snapshot.test_attr = "new"
snapshot.dump(support.TESTFN)
self.addCleanup(support.unlink, support.TESTFN)
# load() should recreate the attribute
snapshot2 = tracemalloc.Snapshot.load(support.TESTFN)
self.assertEqual(snapshot2.test_attr, "new")
def fork_child(self):
if not tracemalloc.is_tracing():
return 2
obj_size = 12345
obj, obj_traceback = allocate_bytes(obj_size)
traceback = tracemalloc.get_object_traceback(obj)
if traceback is None:
return 3
# everything is fine
return 0
@unittest.skipUnless(hasattr(os, 'fork'), 'need os.fork()')
def test_fork(self):
# check that tracemalloc is still working after fork
pid = os.fork()
if not pid:
# child
exitcode = 1
try:
exitcode = self.fork_child()
finally:
os._exit(exitcode)
else:
pid2, status = os.waitpid(pid, 0)
self.assertTrue(os.WIFEXITED(status))
exitcode = os.WEXITSTATUS(status)
self.assertEqual(exitcode, 0)
class TestSnapshot(unittest.TestCase):
maxDiff = 4000
def test_create_snapshot(self):
raw_traces = [(0, 5, (('a.py', 2),))]
with contextlib.ExitStack() as stack:
stack.enter_context(patch.object(tracemalloc, 'is_tracing',
return_value=True))
stack.enter_context(patch.object(tracemalloc, 'get_traceback_limit',
return_value=5))
stack.enter_context(patch.object(tracemalloc, '_get_traces',
return_value=raw_traces))
snapshot = tracemalloc.take_snapshot()
self.assertEqual(snapshot.traceback_limit, 5)
self.assertEqual(len(snapshot.traces), 1)
trace = snapshot.traces[0]
self.assertEqual(trace.size, 5)
self.assertEqual(len(trace.traceback), 1)
self.assertEqual(trace.traceback[0].filename, 'a.py')
self.assertEqual(trace.traceback[0].lineno, 2)
def test_filter_traces(self):
snapshot, snapshot2 = create_snapshots()
filter1 = tracemalloc.Filter(False, "b.py")
filter2 = tracemalloc.Filter(True, "a.py", 2)
filter3 = tracemalloc.Filter(True, "a.py", 5)
original_traces = list(snapshot.traces._traces)
# exclude b.py
snapshot3 = snapshot.filter_traces((filter1,))
self.assertEqual(snapshot3.traces._traces, [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(1, 2, (('a.py', 5), ('b.py', 4))),
(3, 7, (('<unknown>', 0),)),
])
# filter_traces() must not touch the original snapshot
self.assertEqual(snapshot.traces._traces, original_traces)
# only include two lines of a.py
snapshot4 = snapshot3.filter_traces((filter2, filter3))
self.assertEqual(snapshot4.traces._traces, [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(1, 2, (('a.py', 5), ('b.py', 4))),
])
# No filter: just duplicate the snapshot
snapshot5 = snapshot.filter_traces(())
self.assertIsNot(snapshot5, snapshot)
self.assertIsNot(snapshot5.traces, snapshot.traces)
self.assertEqual(snapshot5.traces, snapshot.traces)
self.assertRaises(TypeError, snapshot.filter_traces, filter1)
def test_filter_traces_domain(self):
snapshot, snapshot2 = create_snapshots()
filter1 = tracemalloc.Filter(False, "a.py", domain=1)
filter2 = tracemalloc.Filter(True, "a.py", domain=1)
original_traces = list(snapshot.traces._traces)
# exclude a.py of domain 1
snapshot3 = snapshot.filter_traces((filter1,))
self.assertEqual(snapshot3.traces._traces, [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(2, 66, (('b.py', 1),)),
(3, 7, (('<unknown>', 0),)),
])
# include domain 1
snapshot3 = snapshot.filter_traces((filter1,))
self.assertEqual(snapshot3.traces._traces, [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(2, 66, (('b.py', 1),)),
(3, 7, (('<unknown>', 0),)),
])
def test_filter_traces_domain_filter(self):
snapshot, snapshot2 = create_snapshots()
filter1 = tracemalloc.DomainFilter(False, domain=3)
filter2 = tracemalloc.DomainFilter(True, domain=3)
# exclude domain 2
snapshot3 = snapshot.filter_traces((filter1,))
self.assertEqual(snapshot3.traces._traces, [
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(0, 10, (('a.py', 2), ('b.py', 4))),
(1, 2, (('a.py', 5), ('b.py', 4))),
(2, 66, (('b.py', 1),)),
])
# include domain 2
snapshot3 = snapshot.filter_traces((filter2,))
self.assertEqual(snapshot3.traces._traces, [
(3, 7, (('<unknown>', 0),)),
])
def test_snapshot_group_by_line(self):
snapshot, snapshot2 = create_snapshots()
tb_0 = traceback_lineno('<unknown>', 0)
tb_a_2 = traceback_lineno('a.py', 2)
tb_a_5 = traceback_lineno('a.py', 5)
tb_b_1 = traceback_lineno('b.py', 1)
tb_c_578 = traceback_lineno('c.py', 578)
# stats per file and line
stats1 = snapshot.statistics('lineno')
self.assertEqual(stats1, [
tracemalloc.Statistic(tb_b_1, 66, 1),
tracemalloc.Statistic(tb_a_2, 30, 3),
tracemalloc.Statistic(tb_0, 7, 1),
tracemalloc.Statistic(tb_a_5, 2, 1),
])
# stats per file and line (2)
stats2 = snapshot2.statistics('lineno')
self.assertEqual(stats2, [
tracemalloc.Statistic(tb_a_5, 5002, 2),
tracemalloc.Statistic(tb_c_578, 400, 1),
tracemalloc.Statistic(tb_a_2, 30, 3),
])
# stats diff per file and line
statistics = snapshot2.compare_to(snapshot, 'lineno')
self.assertEqual(statistics, [
tracemalloc.StatisticDiff(tb_a_5, 5002, 5000, 2, 1),
tracemalloc.StatisticDiff(tb_c_578, 400, 400, 1, 1),
tracemalloc.StatisticDiff(tb_b_1, 0, -66, 0, -1),
tracemalloc.StatisticDiff(tb_0, 0, -7, 0, -1),
tracemalloc.StatisticDiff(tb_a_2, 30, 0, 3, 0),
])
def test_snapshot_group_by_file(self):
snapshot, snapshot2 = create_snapshots()
tb_0 = traceback_filename('<unknown>')
tb_a = traceback_filename('a.py')
tb_b = traceback_filename('b.py')
tb_c = traceback_filename('c.py')
# stats per file
stats1 = snapshot.statistics('filename')
self.assertEqual(stats1, [
tracemalloc.Statistic(tb_b, 66, 1),
tracemalloc.Statistic(tb_a, 32, 4),
tracemalloc.Statistic(tb_0, 7, 1),
])
# stats per file (2)
stats2 = snapshot2.statistics('filename')
self.assertEqual(stats2, [
tracemalloc.Statistic(tb_a, 5032, 5),
tracemalloc.Statistic(tb_c, 400, 1),
])
# stats diff per file
diff = snapshot2.compare_to(snapshot, 'filename')
self.assertEqual(diff, [
tracemalloc.StatisticDiff(tb_a, 5032, 5000, 5, 1),
tracemalloc.StatisticDiff(tb_c, 400, 400, 1, 1),
tracemalloc.StatisticDiff(tb_b, 0, -66, 0, -1),
tracemalloc.StatisticDiff(tb_0, 0, -7, 0, -1),
])
def test_snapshot_group_by_traceback(self):
snapshot, snapshot2 = create_snapshots()
# stats per file
tb1 = traceback(('a.py', 2), ('b.py', 4))
tb2 = traceback(('a.py', 5), ('b.py', 4))
tb3 = traceback(('b.py', 1))
tb4 = traceback(('<unknown>', 0))
stats1 = snapshot.statistics('traceback')
self.assertEqual(stats1, [
tracemalloc.Statistic(tb3, 66, 1),
tracemalloc.Statistic(tb1, 30, 3),
tracemalloc.Statistic(tb4, 7, 1),
tracemalloc.Statistic(tb2, 2, 1),
])
# stats per file (2)
tb5 = traceback(('c.py', 578))
stats2 = snapshot2.statistics('traceback')
self.assertEqual(stats2, [
tracemalloc.Statistic(tb2, 5002, 2),
tracemalloc.Statistic(tb5, 400, 1),
tracemalloc.Statistic(tb1, 30, 3),
])
# stats diff per file
diff = snapshot2.compare_to(snapshot, 'traceback')
self.assertEqual(diff, [
tracemalloc.StatisticDiff(tb2, 5002, 5000, 2, 1),
tracemalloc.StatisticDiff(tb5, 400, 400, 1, 1),
tracemalloc.StatisticDiff(tb3, 0, -66, 0, -1),
tracemalloc.StatisticDiff(tb4, 0, -7, 0, -1),
tracemalloc.StatisticDiff(tb1, 30, 0, 3, 0),
])
self.assertRaises(ValueError,
snapshot.statistics, 'traceback', cumulative=True)
def test_snapshot_group_by_cumulative(self):
snapshot, snapshot2 = create_snapshots()
tb_0 = traceback_filename('<unknown>')
tb_a = traceback_filename('a.py')
tb_b = traceback_filename('b.py')
tb_a_2 = traceback_lineno('a.py', 2)
tb_a_5 = traceback_lineno('a.py', 5)
tb_b_1 = traceback_lineno('b.py', 1)
tb_b_4 = traceback_lineno('b.py', 4)
# per file
stats = snapshot.statistics('filename', True)
self.assertEqual(stats, [
tracemalloc.Statistic(tb_b, 98, 5),
tracemalloc.Statistic(tb_a, 32, 4),
tracemalloc.Statistic(tb_0, 7, 1),
])
# per line
stats = snapshot.statistics('lineno', True)
self.assertEqual(stats, [
tracemalloc.Statistic(tb_b_1, 66, 1),
tracemalloc.Statistic(tb_b_4, 32, 4),
tracemalloc.Statistic(tb_a_2, 30, 3),
tracemalloc.Statistic(tb_0, 7, 1),
tracemalloc.Statistic(tb_a_5, 2, 1),
])
def test_trace_format(self):
snapshot, snapshot2 = create_snapshots()
trace = snapshot.traces[0]
self.assertEqual(str(trace), 'a.py:2: 10 B')
traceback = trace.traceback
self.assertEqual(str(traceback), 'a.py:2')
frame = traceback[0]
self.assertEqual(str(frame), 'a.py:2')
def test_statistic_format(self):
snapshot, snapshot2 = create_snapshots()
stats = snapshot.statistics('lineno')
stat = stats[0]
self.assertEqual(str(stat),
'b.py:1: size=66 B, count=1, average=66 B')
def test_statistic_diff_format(self):
snapshot, snapshot2 = create_snapshots()
stats = snapshot2.compare_to(snapshot, 'lineno')
stat = stats[0]
self.assertEqual(str(stat),
'a.py:5: size=5002 B (+5000 B), count=2 (+1), average=2501 B')
def test_slices(self):
snapshot, snapshot2 = create_snapshots()
self.assertEqual(snapshot.traces[:2],
(snapshot.traces[0], snapshot.traces[1]))
traceback = snapshot.traces[0].traceback
self.assertEqual(traceback[:2],
(traceback[0], traceback[1]))
def test_format_traceback(self):
snapshot, snapshot2 = create_snapshots()
def getline(filename, lineno):
return ' <%s, %s>' % (filename, lineno)
with unittest.mock.patch('tracemalloc.linecache.getline',
side_effect=getline):
tb = snapshot.traces[0].traceback
self.assertEqual(tb.format(),
[' File "a.py", line 2',
' <a.py, 2>',
' File "b.py", line 4',
' <b.py, 4>'])
self.assertEqual(tb.format(limit=1),
[' File "a.py", line 2',
' <a.py, 2>'])
self.assertEqual(tb.format(limit=-1),
[])
class TestFilters(unittest.TestCase):
maxDiff = 2048
def test_filter_attributes(self):
# test default values
f = tracemalloc.Filter(True, "abc")
self.assertEqual(f.inclusive, True)
self.assertEqual(f.filename_pattern, "abc")
self.assertIsNone(f.lineno)
self.assertEqual(f.all_frames, False)
# test custom values
f = tracemalloc.Filter(False, "test.py", 123, True)
self.assertEqual(f.inclusive, False)
self.assertEqual(f.filename_pattern, "test.py")
self.assertEqual(f.lineno, 123)
self.assertEqual(f.all_frames, True)
# parameters passed by keyword
f = tracemalloc.Filter(inclusive=False, filename_pattern="test.py", lineno=123, all_frames=True)
self.assertEqual(f.inclusive, False)
self.assertEqual(f.filename_pattern, "test.py")
self.assertEqual(f.lineno, 123)
self.assertEqual(f.all_frames, True)
# read-only attribute
self.assertRaises(AttributeError, setattr, f, "filename_pattern", "abc")
def test_filter_match(self):
# filter without line number
f = tracemalloc.Filter(True, "abc")
self.assertTrue(f._match_frame("abc", 0))
self.assertTrue(f._match_frame("abc", 5))
self.assertTrue(f._match_frame("abc", 10))
self.assertFalse(f._match_frame("12356", 0))
self.assertFalse(f._match_frame("12356", 5))
self.assertFalse(f._match_frame("12356", 10))
f = tracemalloc.Filter(False, "abc")
self.assertFalse(f._match_frame("abc", 0))
self.assertFalse(f._match_frame("abc", 5))
self.assertFalse(f._match_frame("abc", 10))
self.assertTrue(f._match_frame("12356", 0))
self.assertTrue(f._match_frame("12356", 5))
self.assertTrue(f._match_frame("12356", 10))
# filter with line number > 0
f = tracemalloc.Filter(True, "abc", 5)
self.assertFalse(f._match_frame("abc", 0))
self.assertTrue(f._match_frame("abc", 5))
self.assertFalse(f._match_frame("abc", 10))
self.assertFalse(f._match_frame("12356", 0))
self.assertFalse(f._match_frame("12356", 5))
self.assertFalse(f._match_frame("12356", 10))
f = tracemalloc.Filter(False, "abc", 5)
self.assertTrue(f._match_frame("abc", 0))
self.assertFalse(f._match_frame("abc", 5))
self.assertTrue(f._match_frame("abc", 10))
self.assertTrue(f._match_frame("12356", 0))
self.assertTrue(f._match_frame("12356", 5))
self.assertTrue(f._match_frame("12356", 10))
# filter with line number 0
f = tracemalloc.Filter(True, "abc", 0)
self.assertTrue(f._match_frame("abc", 0))
self.assertFalse(f._match_frame("abc", 5))
self.assertFalse(f._match_frame("abc", 10))
self.assertFalse(f._match_frame("12356", 0))
self.assertFalse(f._match_frame("12356", 5))
self.assertFalse(f._match_frame("12356", 10))
f = tracemalloc.Filter(False, "abc", 0)
self.assertFalse(f._match_frame("abc", 0))
self.assertTrue(f._match_frame("abc", 5))
self.assertTrue(f._match_frame("abc", 10))
self.assertTrue(f._match_frame("12356", 0))
self.assertTrue(f._match_frame("12356", 5))
self.assertTrue(f._match_frame("12356", 10))
def test_filter_match_filename(self):
def fnmatch(inclusive, filename, pattern):
f = tracemalloc.Filter(inclusive, pattern)
return f._match_frame(filename, 0)
self.assertTrue(fnmatch(True, "abc", "abc"))
self.assertFalse(fnmatch(True, "12356", "abc"))
self.assertFalse(fnmatch(True, "<unknown>", "abc"))
self.assertFalse(fnmatch(False, "abc", "abc"))
self.assertTrue(fnmatch(False, "12356", "abc"))
self.assertTrue(fnmatch(False, "<unknown>", "abc"))
def test_filter_match_filename_joker(self):
def fnmatch(filename, pattern):
filter = tracemalloc.Filter(True, pattern)
return filter._match_frame(filename, 0)
# empty string
self.assertFalse(fnmatch('abc', ''))
self.assertFalse(fnmatch('', 'abc'))
self.assertTrue(fnmatch('', ''))
self.assertTrue(fnmatch('', '*'))
# no *
self.assertTrue(fnmatch('abc', 'abc'))
self.assertFalse(fnmatch('abc', 'abcd'))
self.assertFalse(fnmatch('abc', 'def'))
# a*
self.assertTrue(fnmatch('abc', 'a*'))
self.assertTrue(fnmatch('abc', 'abc*'))
self.assertFalse(fnmatch('abc', 'b*'))
self.assertFalse(fnmatch('abc', 'abcd*'))
# a*b
self.assertTrue(fnmatch('abc', 'a*c'))
self.assertTrue(fnmatch('abcdcx', 'a*cx'))
self.assertFalse(fnmatch('abb', 'a*c'))
self.assertFalse(fnmatch('abcdce', 'a*cx'))
# a*b*c
self.assertTrue(fnmatch('abcde', 'a*c*e'))
self.assertTrue(fnmatch('abcbdefeg', 'a*bd*eg'))
self.assertFalse(fnmatch('abcdd', 'a*c*e'))
self.assertFalse(fnmatch('abcbdefef', 'a*bd*eg'))
# replace .pyc suffix with .py
self.assertTrue(fnmatch('a.pyc', 'a.py'))
self.assertTrue(fnmatch('a.py', 'a.pyc'))
if os.name == 'nt':
# case insensitive
self.assertTrue(fnmatch('aBC', 'ABc'))
self.assertTrue(fnmatch('aBcDe', 'Ab*dE'))
self.assertTrue(fnmatch('a.pyc', 'a.PY'))
self.assertTrue(fnmatch('a.py', 'a.PYC'))
else:
# case sensitive
self.assertFalse(fnmatch('aBC', 'ABc'))
self.assertFalse(fnmatch('aBcDe', 'Ab*dE'))
self.assertFalse(fnmatch('a.pyc', 'a.PY'))
self.assertFalse(fnmatch('a.py', 'a.PYC'))
if os.name == 'nt':
# normalize alternate separator "/" to the standard separator "\"
self.assertTrue(fnmatch(r'a/b', r'a\b'))
self.assertTrue(fnmatch(r'a\b', r'a/b'))
self.assertTrue(fnmatch(r'a/b\c', r'a\b/c'))
self.assertTrue(fnmatch(r'a/b/c', r'a\b\c'))
else:
# there is no alternate separator
self.assertFalse(fnmatch(r'a/b', r'a\b'))
self.assertFalse(fnmatch(r'a\b', r'a/b'))
self.assertFalse(fnmatch(r'a/b\c', r'a\b/c'))
self.assertFalse(fnmatch(r'a/b/c', r'a\b\c'))
# as of 3.5, .pyo is no longer munged to .py
self.assertFalse(fnmatch('a.pyo', 'a.py'))
def test_filter_match_trace(self):
t1 = (("a.py", 2), ("b.py", 3))
t2 = (("b.py", 4), ("b.py", 5))
t3 = (("c.py", 5), ('<unknown>', 0))
unknown = (('<unknown>', 0),)
f = tracemalloc.Filter(True, "b.py", all_frames=True)
self.assertTrue(f._match_traceback(t1))
self.assertTrue(f._match_traceback(t2))
self.assertFalse(f._match_traceback(t3))
self.assertFalse(f._match_traceback(unknown))
f = tracemalloc.Filter(True, "b.py", all_frames=False)
self.assertFalse(f._match_traceback(t1))
self.assertTrue(f._match_traceback(t2))
self.assertFalse(f._match_traceback(t3))
self.assertFalse(f._match_traceback(unknown))
f = tracemalloc.Filter(False, "b.py", all_frames=True)
self.assertFalse(f._match_traceback(t1))
self.assertFalse(f._match_traceback(t2))
self.assertTrue(f._match_traceback(t3))
self.assertTrue(f._match_traceback(unknown))
f = tracemalloc.Filter(False, "b.py", all_frames=False)
self.assertTrue(f._match_traceback(t1))
self.assertFalse(f._match_traceback(t2))
self.assertTrue(f._match_traceback(t3))
self.assertTrue(f._match_traceback(unknown))
f = tracemalloc.Filter(False, "<unknown>", all_frames=False)
self.assertTrue(f._match_traceback(t1))
self.assertTrue(f._match_traceback(t2))
self.assertTrue(f._match_traceback(t3))
self.assertFalse(f._match_traceback(unknown))
f = tracemalloc.Filter(True, "<unknown>", all_frames=True)
self.assertFalse(f._match_traceback(t1))
self.assertFalse(f._match_traceback(t2))
self.assertTrue(f._match_traceback(t3))
self.assertTrue(f._match_traceback(unknown))
f = tracemalloc.Filter(False, "<unknown>", all_frames=True)
self.assertTrue(f._match_traceback(t1))
self.assertTrue(f._match_traceback(t2))
self.assertFalse(f._match_traceback(t3))
self.assertFalse(f._match_traceback(unknown))
class TestCommandLine(unittest.TestCase):
def test_env_var_disabled_by_default(self):
# not tracing by default
code = 'import tracemalloc; print(tracemalloc.is_tracing())'
ok, stdout, stderr = assert_python_ok('-c', code)
stdout = stdout.rstrip()
self.assertEqual(stdout, b'False')
@unittest.skipIf(interpreter_requires_environment(),
'Cannot run -E tests when PYTHON env vars are required.')
def test_env_var_ignored_with_E(self):
"""PYTHON* environment variables must be ignored when -E is present."""
code = 'import tracemalloc; print(tracemalloc.is_tracing())'
ok, stdout, stderr = assert_python_ok('-E', '-c', code, PYTHONTRACEMALLOC='1')
stdout = stdout.rstrip()
self.assertEqual(stdout, b'False')
def test_env_var_enabled_at_startup(self):
# tracing at startup
code = 'import tracemalloc; print(tracemalloc.is_tracing())'
ok, stdout, stderr = assert_python_ok('-c', code, PYTHONTRACEMALLOC='1')
stdout = stdout.rstrip()
self.assertEqual(stdout, b'True')
def test_env_limit(self):
# start and set the number of frames
code = 'import tracemalloc; print(tracemalloc.get_traceback_limit())'
ok, stdout, stderr = assert_python_ok('-c', code, PYTHONTRACEMALLOC='10')
stdout = stdout.rstrip()
self.assertEqual(stdout, b'10')
def test_env_var_invalid(self):
for nframe in (-1, 0, 2**30):
with self.subTest(nframe=nframe):
with support.SuppressCrashReport():
ok, stdout, stderr = assert_python_failure(
'-c', 'pass',
PYTHONTRACEMALLOC=str(nframe))
self.assertIn(b'PYTHONTRACEMALLOC: invalid '
b'number of frames',
stderr)
def test_sys_xoptions(self):
for xoptions, nframe in (
('tracemalloc', 1),
('tracemalloc=1', 1),
('tracemalloc=15', 15),
):
with self.subTest(xoptions=xoptions, nframe=nframe):
code = 'import tracemalloc; print(tracemalloc.get_traceback_limit())'
ok, stdout, stderr = assert_python_ok('-X', xoptions, '-c', code)
stdout = stdout.rstrip()
self.assertEqual(stdout, str(nframe).encode('ascii'))
def test_sys_xoptions_invalid(self):
for nframe in (-1, 0, 2**30):
with self.subTest(nframe=nframe):
with support.SuppressCrashReport():
args = ('-X', 'tracemalloc=%s' % nframe, '-c', 'pass')
ok, stdout, stderr = assert_python_failure(*args)
self.assertIn(b'-X tracemalloc=NFRAME: invalid '
b'number of frames',
stderr)
def test_pymem_alloc0(self):
# Issue #21639: Check that PyMem_Malloc(0) with tracemalloc enabled
# does not crash.
code = 'import _testcapi; _testcapi.test_pymem_alloc0(); 1'
assert_python_ok('-X', 'tracemalloc', '-c', code)
@unittest.skipIf(_testcapi is None, 'need _testcapi')
class TestCAPI(unittest.TestCase):
maxDiff = 80 * 20
def setUp(self):
if tracemalloc.is_tracing():
self.skipTest("tracemalloc must be stopped before the test")
self.domain = 5
self.size = 123
self.obj = allocate_bytes(self.size)[0]
# for the type "object", id(obj) is the address of its memory block.
# This type is not tracked by the garbage collector
self.ptr = id(self.obj)
def tearDown(self):
tracemalloc.stop()
def get_traceback(self):
frames = _testcapi.tracemalloc_get_traceback(self.domain, self.ptr)
if frames is not None:
return tracemalloc.Traceback(frames)
else:
return None
def track(self, release_gil=False, nframe=1):
frames = get_frames(nframe, 2)
_testcapi.tracemalloc_track(self.domain, self.ptr, self.size,
release_gil)
return frames
def untrack(self):
_testcapi.tracemalloc_untrack(self.domain, self.ptr)
def get_traced_memory(self):
# Get the traced size in the domain
snapshot = tracemalloc.take_snapshot()
domain_filter = tracemalloc.DomainFilter(True, self.domain)
snapshot = snapshot.filter_traces([domain_filter])
return sum(trace.size for trace in snapshot.traces)
def check_track(self, release_gil):
nframe = 5
tracemalloc.start(nframe)
size = tracemalloc.get_traced_memory()[0]
frames = self.track(release_gil, nframe)
self.assertEqual(self.get_traceback(),
tracemalloc.Traceback(frames))
self.assertEqual(self.get_traced_memory(), self.size)
def test_track(self):
self.check_track(False)
def test_track_without_gil(self):
# check that calling _PyTraceMalloc_Track() without holding the GIL
# works too
self.check_track(True)
def test_track_already_tracked(self):
nframe = 5
tracemalloc.start(nframe)
# track a first time
self.track()
# calling _PyTraceMalloc_Track() must remove the old trace and add
# a new trace with the new traceback
frames = self.track(nframe=nframe)
self.assertEqual(self.get_traceback(),
tracemalloc.Traceback(frames))
def test_untrack(self):
tracemalloc.start()
self.track()
self.assertIsNotNone(self.get_traceback())
self.assertEqual(self.get_traced_memory(), self.size)
# untrack must remove the trace
self.untrack()
self.assertIsNone(self.get_traceback())
self.assertEqual(self.get_traced_memory(), 0)
# calling _PyTraceMalloc_Untrack() multiple times must not crash
self.untrack()
self.untrack()
def test_stop_track(self):
tracemalloc.start()
tracemalloc.stop()
with self.assertRaises(RuntimeError):
self.track()
self.assertIsNone(self.get_traceback())
def test_stop_untrack(self):
tracemalloc.start()
self.track()
tracemalloc.stop()
with self.assertRaises(RuntimeError):
self.untrack()
def test_main():
support.run_unittest(
TestTracemallocEnabled,
TestSnapshot,
TestFilters,
TestCommandLine,
TestCAPI,
)
if __name__ == "__main__":
test_main()
| python3/Python-3.6.1/Lib/test/test_tracemalloc.py | 36,190 | PYTHON* environment variables must be ignored when -E is present.
_tracemalloc._get_traces() returns a list of (domain, size, traceback_frames) tuples. traceback_frames is a tuple of (filename, line_number) tuples. dummy wrappers to get more useful and identical frames in the traceback Ensure that two identical tracebacks are not duplicated Python allocates some internals objects, so the test must tolerate a small difference between the expected size and the real usage allocate one object destroy the object clear_traces() must reset traced memory counters allocate another object stop() also resets traced memory counters take a snapshot write on disk load from disk tracemalloc must be tracing memory allocations to take a snapshot take a snapshot with a new attribute load() should recreate the attribute everything is fine check that tracemalloc is still working after fork child exclude b.py filter_traces() must not touch the original snapshot only include two lines of a.py No filter: just duplicate the snapshot exclude a.py of domain 1 include domain 1 exclude domain 2 include domain 2 stats per file and line stats per file and line (2) stats diff per file and line stats per file stats per file (2) stats diff per file stats per file stats per file (2) stats diff per file per file per line test default values test custom values parameters passed by keyword read-only attribute filter without line number filter with line number > 0 filter with line number 0 empty string no * a* a*b a*b*c replace .pyc suffix with .py case insensitive case sensitive normalize alternate separator "/" to the standard separator "\" there is no alternate separator as of 3.5, .pyo is no longer munged to .py not tracing by default tracing at startup start and set the number of frames Issue 21639: Check that PyMem_Malloc(0) with tracemalloc enabled does not crash. for the type "object", id(obj) is the address of its memory block. This type is not tracked by the garbage collector Get the traced size in the domain check that calling _PyTraceMalloc_Track() without holding the GIL works too track a first time calling _PyTraceMalloc_Track() must remove the old trace and add a new trace with the new traceback untrack must remove the trace calling _PyTraceMalloc_Untrack() multiple times must not crash | 2,306 | en | 0.81375 |
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import numpy as np
import torch
from nnunet.network_architecture.generic_modular_residual_UNet import FabiansUNet, get_default_network_config
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet.utilities.nd_softmax import softmax_helper
class nnUNetTrainerV2_ResencUNet(nnUNetTrainerV2):
def initialize_network(self):
if self.threeD:
cfg = get_default_network_config(3, None, norm_type="in")
else:
cfg = get_default_network_config(1, None, norm_type="in")
stage_plans = self.plans['plans_per_stage'][self.stage]
conv_kernel_sizes = stage_plans['conv_kernel_sizes']
blocks_per_stage_encoder = stage_plans['num_blocks_encoder']
blocks_per_stage_decoder = stage_plans['num_blocks_decoder']
pool_op_kernel_sizes = stage_plans['pool_op_kernel_sizes']
self.network = FabiansUNet(self.num_input_channels, self.base_num_features, blocks_per_stage_encoder, 2,
pool_op_kernel_sizes, conv_kernel_sizes, cfg, self.num_classes,
blocks_per_stage_decoder, True, False, 320, InitWeights_He(1e-2))
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
def setup_DA_params(self):
"""
net_num_pool_op_kernel_sizes is different in resunet
"""
super().setup_DA_params()
self.deep_supervision_scales = [[1, 1, 1]] + list(list(i) for i in 1 / np.cumprod(
np.vstack(self.net_num_pool_op_kernel_sizes[1:]), axis=0))[:-1]
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5,
save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
force_separate_z: bool = None, interpolation_order: int = 3, interpolation_order_z=0,
segmentation_export_kwargs: dict = None, run_postprocessing_on_folds: bool = True):
ds = self.network.decoder.deep_supervision
self.network.decoder.deep_supervision = False
ret = nnUNetTrainer.validate(self, do_mirroring=do_mirroring, use_sliding_window=use_sliding_window,
step_size=step_size, save_softmax=save_softmax, use_gaussian=use_gaussian,
overwrite=overwrite, validation_folder_name=validation_folder_name,
debug=debug, all_in_gpu=all_in_gpu,
segmentation_export_kwargs=segmentation_export_kwargs,
run_postprocessing_on_folds=run_postprocessing_on_folds)
self.network.decoder.deep_supervision = ds
return ret
def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True,
mirror_axes: Tuple[int] = None,
use_sliding_window: bool = True, step_size: float = 0.5,
use_gaussian: bool = True, pad_border_mode: str = 'constant',
pad_kwargs: dict = None, all_in_gpu: bool = False,
verbose: bool = True, mixed_precision=True) -> Tuple[np.ndarray, np.ndarray]:
ds = self.network.decoder.deep_supervision
self.network.decoder.deep_supervision = False
ret = nnUNetTrainer.predict_preprocessed_data_return_seg_and_softmax(self, data, do_mirroring=do_mirroring,
mirror_axes=mirror_axes,
use_sliding_window=use_sliding_window,
step_size=step_size,
use_gaussian=use_gaussian,
pad_border_mode=pad_border_mode,
pad_kwargs=pad_kwargs,
all_in_gpu=all_in_gpu,
verbose=verbose,
mixed_precision=mixed_precision)
self.network.decoder.deep_supervision = ds
return ret
def run_training(self):
self.maybe_update_lr(self.epoch) # if we dont overwrite epoch then self.epoch+1 is used which is not what we
# want at the start of the training
ds = self.network.decoder.deep_supervision
self.network.decoder.deep_supervision = True
ret = nnUNetTrainer.run_training(self)
self.network.decoder.deep_supervision = ds
return ret
nnUNetTrainerV2_ResencUNet_copy1 = nnUNetTrainerV2_ResencUNet
nnUNetTrainerV2_ResencUNet_copy2 = nnUNetTrainerV2_ResencUNet
nnUNetTrainerV2_ResencUNet_copy3 = nnUNetTrainerV2_ResencUNet
nnUNetTrainerV2_ResencUNet_copy4 = nnUNetTrainerV2_ResencUNet
| nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_ResencUNet.py | 6,332 | net_num_pool_op_kernel_sizes is different in resunet
Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. if we dont overwrite epoch then self.epoch+1 is used which is not what we want at the start of the training | 824 | en | 0.865014 |
# Add comments to explain what the output from this program will be and how you know.
def math1():
num1 = 50
num2 = 5
return num1 + num2
def math2():
num1 = 50
num2 = 5
return num1 - num2
def math3():
num1 = 50
num2 = 5
return num1 * num2
output_num = math2()
print(output_num)
'''
Add prediction(s) here:
# I think it will work because i am smart. I predict be 45
'''
| return_practice.py | 402 | Add comments to explain what the output from this program will be and how you know. | 83 | en | 0.951642 |
#!/usr/bin/python
#
# A library for finding the optimal dirichlet prior from counts
# By: Max Sklar
# @maxsklar
# https://github.com/maxsklar
# Copyright 2013 Max Sklar
import math
import logging
import random
import scipy.special as mathExtra
import scipy
import numpy as np
def digamma(x): return mathExtra.psi(x)
def trigamma(x): return mathExtra.polygamma(1, x)
# Find the "sufficient statistic" for a group of multinomials.
# Essential, it's the average of the log probabilities
def getSufficientStatistic(multinomials):
N = len(multinomials)
K = len(multinomials[0])
retVal = [0]*K
for m in multinomials:
for k in range(0, K):
retVal[k] += math.log(m[k])
for k in range(0, K): retVal[k] /= N
return retVal
# Find the log probability of the data for a given dirichlet
# This is equal to the log probabiliy of the data.. up to a linear transform
def logProbForMultinomials(alphas, ss, delta):
alpha_sum = np.sum(alphas)
retVal = mathExtra.gammaln(alpha_sum)
retVal -= np.sum(mathExtra.gammaln(alphas))
retVal += np.sum(np.multiply(alphas, ss))
retVal -= delta * np.square(alphas).sum()
return retVal
#Gives the derivative with respect to the log of prior. This will be used to adjust the loss
def getGradientForMultinomials(alphas, ss, delta):
K = len(alphas)
C = digamma(sum(alphas)) # - DELTA * sum(alphas)
retVal = [C]*K
for k in range(0, K):
retVal[k] += ss[k] - digamma(alphas[k]) - 2 * delta * alphas[k]
return retVal
#The hessian is actually the sum of two matrices: a diagonal matrix and a constant-value matrix.
#We'll write two functions to get both
def priorHessianConst(alphas, ss, delta): return -trigamma(sum(alphas)) + 2 * delta
def priorHessianDiag(alphas, ss): return [trigamma(a) for a in alphas]
# Compute the next value to try here
# http://research.microsoft.com/en-us/um/people/minka/papers/dirichlet/minka-dirichlet.pdf (eq 18)
def getPredictedStep(hConst, hDiag, gradient):
K = len(gradient)
numSum = 0.0
for i in range(0, K):
numSum += gradient[i] / hDiag[i]
denSum = 0.0
for i in range(0, K): denSum += 1.0 / hDiag[i]
b = numSum / ((1.0/hConst) + denSum)
retVal = [0]*K
for i in range(0, K): retVal[i] = (b - gradient[i]) / hDiag[i]
return retVal
# Uses the diagonal hessian on the log-alpha values
def getPredictedStepAlt(hConst, hDiag, gradient, alphas):
K = len(gradient)
Z = 0
for k in range(0, K):
Z += alphas[k] / (gradient[k] - alphas[k]*hDiag[k])
Z *= hConst
Ss = [0]*K
for k in range(0, K):
Ss[k] = 1.0 / (gradient[k] - alphas[k]*hDiag[k]) / (1 + Z)
S = sum(Ss)
retVal = [0]*K
for i in range(0, K):
retVal[i] = gradient[i] / (gradient[i] - alphas[i]*hDiag[i]) * (1 - hConst * alphas[i] * S)
return retVal
#The priors and data are global, so we don't need to pass them in
def getTotalLoss(trialPriors, ss, delta):
return -1*logProbForMultinomials(trialPriors, ss, delta)
def predictStepUsingHessian(gradient, priors, ss, delta):
totalHConst = priorHessianConst(priors, ss, delta)
totalHDiag = priorHessianDiag(priors, ss)
return getPredictedStep(totalHConst, totalHDiag, gradient)
def predictStepLogSpace(gradient, priors, ss, delta):
totalHConst = priorHessianConst(priors, ss, delta)
totalHDiag = priorHessianDiag(priors, ss)
return getPredictedStepAlt(totalHConst, totalHDiag, gradient, priors)
# Returns whether it's a good step, and the loss
def testTrialPriors(trialPriors, ss, delta):
for alpha in trialPriors:
if alpha <= 0:
return float("inf")
return getTotalLoss(trialPriors, ss, delta)
def sqVectorSize(v):
s = 0
for i in range(0, len(v)): s += v[i] ** 2
return s
def findDirichletPriors(ss, initAlphas, max_iter=1000, delta=1e-2):
priors = initAlphas
# Let the learning begin!!
#Only step in a positive direction, get the current best loss.
currentLoss = getTotalLoss(priors, ss, delta)
gradientToleranceSq = 2 ** -20
learnRateTolerance = 2 ** -10
count = 0
while(count < max_iter):
count += 1
#Get the data for taking steps
gradient = getGradientForMultinomials(priors, ss, delta)
gradientSize = sqVectorSize(gradient)
#print(count, "Loss: ", currentLoss, ", Priors: ", priors, ", Gradient Size: ", gradientSize, gradient)
if (gradientSize < gradientToleranceSq):
#print("Converged with small gradient")
return priors
trialStep = predictStepUsingHessian(gradient, priors, ss, delta)
#First, try the second order method
trialPriors = [0]*len(priors)
for i in range(0, len(priors)): trialPriors[i] = priors[i] + trialStep[i]
loss = testTrialPriors(trialPriors, ss, delta)
if loss < currentLoss:
currentLoss = loss
priors = trialPriors
continue
trialStep = predictStepLogSpace(gradient, priors, ss, delta)
trialPriors = [0]*len(priors)
for i in range(0, len(priors)): trialPriors[i] = priors[i] * math.exp(trialStep[i])
loss = testTrialPriors(trialPriors, ss, delta)
#Step in the direction of the gradient until there is a loss improvement
loss = 10000000
learnRate = 1.0
while loss > currentLoss:
learnRate *= 0.9
trialPriors = [0]*len(priors)
for i in range(0, len(priors)): trialPriors[i] = priors[i] + gradient[i]*learnRate
loss = testTrialPriors(trialPriors, ss, delta)
if (learnRate < learnRateTolerance):
#print("Converged with small learn rate")
return priors
currentLoss = loss
priors = trialPriors
#print("Reached max iterations")
return priors
def findDirichletPriorsFromMultinomials(multinomials, initAlphas):
ss = getSufficientStatistic(multinomials)
return findDirichletPriors(ss, initAlphas)
| xview/models/dirichletEstimation.py | 5,701 | !/usr/bin/python A library for finding the optimal dirichlet prior from counts By: Max Sklar @maxsklar https://github.com/maxsklar Copyright 2013 Max Sklar Find the "sufficient statistic" for a group of multinomials. Essential, it's the average of the log probabilities Find the log probability of the data for a given dirichlet This is equal to the log probabiliy of the data.. up to a linear transformGives the derivative with respect to the log of prior. This will be used to adjust the loss - DELTA * sum(alphas)The hessian is actually the sum of two matrices: a diagonal matrix and a constant-value matrix.We'll write two functions to get both Compute the next value to try here http://research.microsoft.com/en-us/um/people/minka/papers/dirichlet/minka-dirichlet.pdf (eq 18) Uses the diagonal hessian on the log-alpha valuesThe priors and data are global, so we don't need to pass them in Returns whether it's a good step, and the loss Let the learning begin!!Only step in a positive direction, get the current best loss.Get the data for taking stepsprint(count, "Loss: ", currentLoss, ", Priors: ", priors, ", Gradient Size: ", gradientSize, gradient)print("Converged with small gradient")First, try the second order methodStep in the direction of the gradient until there is a loss improvementprint("Converged with small learn rate")print("Reached max iterations") | 1,374 | en | 0.812546 |
#!/usr/bin/env python3
import unittest
import networkit as nk
class TestReachability(unittest.TestCase):
def testReachableNodes(self):
for directed in [False, True]:
for exact in [False, True]:
g = nk.generators.ErdosRenyiGenerator(100, 0.01, directed).generate()
rn = nk.reachability.ReachableNodes(g, exact).run()
for u in g.iterNodes():
reached = []
nk.traversal.Traversal.BFSfrom(g, u, lambda v, _: reached.append(v))
if exact:
self.assertEqual(rn.numberOfReachableNodes(u), len(reached))
else:
self.assertLessEqual(rn.numberOfReachableNodesLB(u), len(reached))
self.assertGreaterEqual(rn.numberOfReachableNodesUB(u), len(reached))
if __name__ == "__main__":
unittest.main()
| networkit/test/test_reachability.py | 740 | !/usr/bin/env python3 | 21 | fr | 0.448822 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from senlin.common import exception as exc
from senlin.profiles.os.nova import server
from senlin.tests.unit.common import base
from senlin.tests.unit.common import utils
spec = {
'type': 'os.nova.server',
'version': '1.0',
'properties': {
'context': {},
'auto_disk_config': True,
'availability_zone': 'FAKE_AZ',
'block_device_mapping': [{
'device_name': 'FAKE_NAME',
'volume_size': 1000,
}],
'flavor': 'FLAV',
'image': 'FAKE_IMAGE',
'key_name': 'FAKE_KEYNAME',
"metadata": {"meta var": "meta val"},
'name': 'FAKE_SERVER_NAME',
'networks': [{
'floating_ip': 'FAKE_FLOATING_IP',
'floating_network': 'FAKE_FLOATING_NET',
'security_groups': ['FAKE_SECURITY_GROUP'],
'port': 'FAKE_PORT',
'fixed_ip': 'FAKE_IP',
'network': 'FAKE_NET',
}],
'scheduler_hints': {
'same_host': 'HOST_ID',
},
}
}
class TestAvailabilityZoneValidation(base.SenlinTestCase):
scenarios = [
('validate:success', dict(
reason=None,
success=True,
validate_result=[['FAKE_AZ']],
result='FAKE_AZ',
exception=None,
message='')),
('validate:driver_failure', dict(
reason=None,
success=False,
validate_result=exc.InternalError(message='BANG.'),
result='FAKE_AZ',
exception=exc.InternalError,
message='BANG.')),
('validate:not_found', dict(
reason=None,
success=False,
validate_result=[[]],
result='FAKE_AZ',
exception=exc.InvalidSpec,
message=("The specified availability_zone 'FAKE_AZ' could "
"not be found"))),
('create:success', dict(
reason='create',
success=True,
validate_result=[['FAKE_AZ']],
result='FAKE_AZ',
exception=None,
message='')),
('create:driver_failure', dict(
reason='create',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FAKE_AZ',
exception=exc.EResourceCreation,
message='Failed in creating server: BANG.')),
('create:not_found', dict(
reason='create',
success=False,
validate_result=[[]],
result='FAKE_AZ',
exception=exc.EResourceCreation,
message=("Failed in creating server: The specified "
"availability_zone 'FAKE_AZ' could not be found.")))
]
def setUp(self):
super(TestAvailabilityZoneValidation, self).setUp()
self.cc = mock.Mock()
prof = server.ServerProfile('t', spec)
prof._computeclient = self.cc
self.profile = prof
def test_validation(self):
self.cc.validate_azs.side_effect = self.validate_result
node = mock.Mock(id='NODE_ID')
if self.success:
res = self.profile._validate_az(node, 'FAKE_AZ', self.reason)
self.assertEqual(self.result, res)
else:
ex = self.assertRaises(self.exception,
self.profile._validate_az,
node, 'FAKE_AZ', self.reason)
self.assertEqual(self.message, six.text_type(ex))
self.cc.validate_azs.assert_called_once_with(['FAKE_AZ'])
class TestFlavorValidation(base.SenlinTestCase):
scenarios = [
('validate:success', dict(
reason=None,
success=True,
validate_result=[mock.Mock(id='FID', is_disabled=False)],
result='FID',
exception=None,
message='')),
('validate:driver_failure', dict(
reason=None,
success=False,
validate_result=exc.InternalError(message='BANG.'),
result='FID',
exception=exc.InternalError,
message='BANG.')),
('validate:not_found', dict(
reason=None,
success=False,
validate_result=exc.InternalError(code=404, message='BANG.'),
result='FID',
exception=exc.InvalidSpec,
message="The specified flavor 'FLAVOR' could not be found.")),
('validate:disabled', dict(
reason=None,
success=False,
validate_result=[mock.Mock(id='FID', is_disabled=True)],
result='FID',
exception=exc.InvalidSpec,
message="The specified flavor 'FLAVOR' is disabled")),
('create:success', dict(
reason='create',
success=True,
validate_result=[mock.Mock(id='FID', is_disabled=False)],
result='FID',
exception=None,
message='')),
('create:driver_failure', dict(
reason='create',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message='Failed in creating server: BANG.')),
('create:not_found', dict(
reason='create',
success=False,
validate_result=exc.InternalError(code=404, message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message="Failed in creating server: BANG.")),
('create:disabled', dict(
reason='create',
success=False,
validate_result=[mock.Mock(id='FID', is_disabled=True)],
result='FID',
exception=exc.EResourceCreation,
message=("Failed in creating server: The specified flavor "
"'FLAVOR' is disabled."))),
('update:success', dict(
reason='update',
success=True,
validate_result=[mock.Mock(id='FID', is_disabled=False)],
result='FID',
exception=None,
message='')),
('update:driver_failure', dict(
reason='update',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FID',
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': BANG.")),
('update:not_found', dict(
reason='update',
success=False,
validate_result=exc.InternalError(code=404, message='BANG'),
result='FID',
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': BANG.")),
('update:disabled', dict(
reason='update',
success=False,
validate_result=[mock.Mock(id='FID', is_disabled=True)],
result='FID',
exception=exc.EResourceUpdate,
message=("Failed in updating server 'NOVA_ID': The specified "
"flavor 'FLAVOR' is disabled.")))
]
def setUp(self):
super(TestFlavorValidation, self).setUp()
self.cc = mock.Mock()
self.profile = server.ServerProfile('t', spec)
self.profile._computeclient = self.cc
def test_validation(self):
self.cc.flavor_find.side_effect = self.validate_result
node = mock.Mock(id='NODE_ID', physical_id='NOVA_ID')
flavor = 'FLAVOR'
if self.success:
res = self.profile._validate_flavor(node, flavor, self.reason)
self.assertIsNotNone(res)
self.assertEqual(self.result, res.id)
else:
ex = self.assertRaises(self.exception,
self.profile._validate_flavor,
node, flavor, self.reason)
self.assertEqual(self.message, six.text_type(ex))
self.cc.flavor_find.assert_called_once_with(flavor, False)
class TestImageValidation(base.SenlinTestCase):
scenarios = [
('validate:success', dict(
reason=None,
success=True,
validate_result=[mock.Mock(id='IMAGE_ID')],
result='IMAGE_ID',
exception=None,
message='')),
('validate:driver_failure', dict(
reason=None,
success=False,
validate_result=exc.InternalError(message='BANG.'),
result='FID',
exception=exc.InternalError,
message='BANG.')),
('validate:not_found', dict(
reason=None,
success=False,
validate_result=exc.InternalError(code=404, message='BANG.'),
result='FID',
exception=exc.InvalidSpec,
message="The specified image 'IMAGE' could not be found.")),
('create:success', dict(
reason='create',
success=True,
validate_result=[mock.Mock(id='IMAGE_ID')],
result='IMAGE_ID',
exception=None,
message='')),
('create:driver_failure', dict(
reason='create',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message='Failed in creating server: BANG.')),
('create:not_found', dict(
reason='create',
success=False,
validate_result=exc.InternalError(code=404, message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message="Failed in creating server: BANG.")),
('update:success', dict(
reason='update',
success=True,
validate_result=[mock.Mock(id='IMAGE_ID')],
result='IMAGE_ID',
exception=None,
message='')),
('update:driver_failure', dict(
reason='update',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FID',
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': BANG.")),
('update:not_found', dict(
reason='update',
success=False,
validate_result=exc.InternalError(code=404, message='BANG'),
result='FID',
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': BANG.")),
]
def setUp(self):
super(TestImageValidation, self).setUp()
self.cc = mock.Mock()
self.gc = mock.Mock()
self.profile = server.ServerProfile('t', spec)
self.profile._computeclient = self.cc
self.profile._glanceclient = self.gc
def test_validation(self):
self.gc.image_find.side_effect = self.validate_result
node = mock.Mock(id='NODE_ID', physical_id='NOVA_ID')
image = 'IMAGE'
if self.success:
res = self.profile._validate_image(node, image, self.reason)
self.assertIsNotNone(res)
self.assertEqual(self.result, res.id)
else:
ex = self.assertRaises(self.exception,
self.profile._validate_image,
node, image, self.reason)
self.assertEqual(self.message, six.text_type(ex))
self.gc.image_find.assert_called_once_with(image, False)
class TestVolumeValidation(base.SenlinTestCase):
scenarios = [
('validate:success', dict(
reason=None,
success=True,
validate_result=[mock.Mock(id='VOLUME_ID', status='available')],
result='VOLUME_ID',
exception=None,
message='')),
('validate:failure', dict(
reason=None,
success=False,
validate_result=[mock.Mock(id='VOLUME_ID', status='in-use')],
result='VOLUME_ID',
exception=exc.InvalidSpec,
message="The volume VOLUME should be in 'available' "
"status but is in 'in-use' status.")),
('validate:driver_failure', dict(
reason=None,
success=False,
validate_result=exc.InternalError(message='BANG.'),
result='FID',
exception=exc.InternalError,
message='BANG.')),
('validate:not_found', dict(
reason=None,
success=False,
validate_result=exc.InternalError(code=404, message='BANG.'),
result='FID',
exception=exc.InvalidSpec,
message="The specified volume 'VOLUME' could not be found.")),
('create:success', dict(
reason='create',
success=True,
validate_result=[mock.Mock(id='VOLUME_ID', status='available')],
result='VOLUME_ID',
exception=None,
message='')),
('create:driver_failure', dict(
reason='create',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message='Failed in creating server: BANG.')),
('create:not_found', dict(
reason='create',
success=False,
validate_result=exc.InternalError(code=404, message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message="Failed in creating server: BANG.")),
]
def setUp(self):
super(TestVolumeValidation, self).setUp()
bdm_v2 = [
{
'volume_size': 1,
'uuid': '6ce0be68',
'source_type': 'volume',
'destination_type': 'volume',
'boot_index': 0,
},
]
volume_spec = {
'type': 'os.nova.server',
'version': '1.0',
'properties': {
'flavor': 'FLAV',
'name': 'FAKE_SERVER_NAME',
'security_groups': ['HIGH_SECURITY_GROUP'],
'block_device_mapping_v2': bdm_v2,
}
}
self.vc = mock.Mock()
self.profile = server.ServerProfile('t', volume_spec)
self.profile._block_storageclient = self.vc
def test_validation(self):
self.vc.volume_get.side_effect = self.validate_result
node = mock.Mock(id='NODE_ID', physical_id='NOVA_ID')
volume = 'VOLUME'
if self.success:
res = self.profile._validate_volume(node, volume, self.reason)
self.assertIsNotNone(res)
self.assertEqual(self.result, res.id)
else:
ex = self.assertRaises(self.exception,
self.profile._validate_volume,
node, volume, self.reason)
self.assertEqual(self.message, six.text_type(ex))
self.vc.volume_get.assert_called_once_with(volume)
class TestKeypairValidation(base.SenlinTestCase):
scenarios = [
('validate:success', dict(
reason=None,
success=True,
validate_result=[mock.Mock(id='KEY_ID')],
result='KEY_ID',
exception=None,
message='')),
('validate:driver_failure', dict(
reason=None,
success=False,
validate_result=exc.InternalError(message='BANG.'),
result='FID',
exception=exc.InternalError,
message='BANG.')),
('validate:not_found', dict(
reason=None,
success=False,
validate_result=exc.InternalError(code=404, message='BANG.'),
result='FID',
exception=exc.InvalidSpec,
message="The specified key_name 'KEY' could not be found.")),
('create:success', dict(
reason='create',
success=True,
validate_result=[mock.Mock(id='IMAGE_ID')],
result='IMAGE_ID',
exception=None,
message='')),
('create:driver_failure', dict(
reason='create',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message='Failed in creating server: BANG.')),
('create:not_found', dict(
reason='create',
success=False,
validate_result=exc.InternalError(code=404, message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message="Failed in creating server: BANG.")),
('update:success', dict(
reason='update',
success=True,
validate_result=[mock.Mock(id='KEY_ID')],
result='KEY_ID',
exception=None,
message='')),
('update:driver_failure', dict(
reason='update',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FID',
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': BANG.")),
('update:not_found', dict(
reason='update',
success=False,
validate_result=exc.InternalError(code=404, message='BANG'),
result='FID',
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': BANG.")),
]
def setUp(self):
super(TestKeypairValidation, self).setUp()
self.cc = mock.Mock()
self.profile = server.ServerProfile('t', spec)
self.profile._computeclient = self.cc
def test_validation(self):
self.cc.keypair_find.side_effect = self.validate_result
node = mock.Mock(id='NODE_ID', physical_id='NOVA_ID')
key = 'KEY'
if self.success:
res = self.profile._validate_keypair(node, key, self.reason)
self.assertIsNotNone(res)
self.assertEqual(self.result, res.id)
else:
ex = self.assertRaises(self.exception,
self.profile._validate_keypair,
node, key, self.reason)
self.assertEqual(self.message, six.text_type(ex))
self.cc.keypair_find.assert_called_once_with(key, False)
class TestNetworkValidation(base.SenlinTestCase):
scenarios = [
('validate:net-n:port-n:fixed_ip-n:sgroups-n', dict(
reason=None,
success=True,
inputs={'port': 'PORT'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[],
result={'port': 'PORT_ID'},
exception=None,
message='')),
('validate:net-y:port-n:fixed_ip-n:sgroups-y', dict(
reason=None,
success=True,
inputs={'network': 'NET', 'security_groups': ['default']},
net_result=[mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[mock.Mock(id='SG_ID')],
floating_result=[],
result={'network': 'NET_ID', 'security_groups': ['SG_ID']},
exception=None,
message='')),
('validate:net-y:port-n:fixed_ip-n:sgroups-n:floating_net-y', dict(
reason=None,
success=True,
inputs={'network': 'NET', 'floating_network': 'NET'},
net_result=[mock.Mock(id='NET_ID'), mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID', 'floating_network': 'NET_ID'},
exception=None,
message='')),
('validate:net-y:port-n:fixed_ip-n:floating_net-y:floating_ip-y', dict(
reason=None,
success=True,
inputs={'network': 'NET', 'floating_network': 'NET',
'floating_ip': 'FLOATINGIP'},
net_result=[mock.Mock(id='NET_ID'), mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[],
floating_result=[mock.Mock(id='FLOATINGIP_ID', status='INACTIVE')],
result={'network': 'NET_ID', 'floating_network': 'NET_ID',
'floating_ip_id': 'FLOATINGIP_ID',
'floating_ip': 'FLOATINGIP'},
exception=None,
message='')),
('validate:net-y:port-n:fixed_ip-y:sgroups-n', dict(
reason=None,
success=True,
inputs={'network': 'NET', 'fixed_ip': 'FIXED_IP'},
net_result=[mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID', 'fixed_ip': 'FIXED_IP'},
exception=None,
message='')),
('validate:net-f:port-y:fixed_ip-n:sgroups-n', dict(
reason=None,
success=False,
inputs={'network': 'NET', 'port': 'PORT'},
net_result=[exc.InternalError(message='NET Failure')],
port_result=[],
sg_result=[],
floating_result=[],
result={},
exception=exc.InvalidSpec,
message='NET Failure')),
('validate:net-n:port-f:fixed_ip-n', dict(
reason=None,
success=False,
inputs={'port': 'PORT'},
net_result=[],
port_result=[exc.InternalError(message='PORT Failure')],
sg_result=[],
floating_result=[],
result={},
exception=exc.InvalidSpec,
message='PORT Failure')),
('validate:net-n:port-active:fixed_ip-n', dict(
reason=None,
success=False,
inputs={'port': 'PORT'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='ACTIVE')],
sg_result=[],
floating_result=[],
result={},
exception=exc.InvalidSpec,
message='The status of the port PORT must be DOWN')),
('validate:net-n:port-y:fixed_ip-n:floating_net-n:floating_ip-y', dict(
reason=None,
success=False,
inputs={'port': 'PORT', 'floating_ip': 'FLOATINGIP'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[mock.Mock(id='FLOATINGIP_ID', status='INACTIVE')],
result={},
exception=exc.InvalidSpec,
message='Must specify a network to create floating IP')),
('validate:net-n:port-y:fixed_ip-n:floating_ip-active', dict(
reason=None,
success=False,
inputs={'port': 'PORT', 'floating_network': 'NET',
'floating_ip': 'FLOATINGIP'},
net_result=[mock.Mock(id='NET_ID')],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[mock.Mock(id='FLOATINGIP_ID', status='ACTIVE')],
result={},
exception=exc.InvalidSpec,
message='the floating IP FLOATINGIP has been used.')),
('validate:net-n:port-n:fixed_ip-n', dict(
reason=None,
success=False,
inputs={'fixed_ip': 'FIXED_IP'},
net_result=[],
port_result=[],
sg_result=[],
floating_result=[],
result={},
exception=exc.InvalidSpec,
message="One of 'port' and 'network' must be provided")),
('validate:net-n:port-y:fixed_ip-y', dict(
reason=None,
success=False,
inputs={'port': 'PORT', 'fixed_ip': 'FIXED_IP'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[],
result={},
exception=exc.InvalidSpec,
message=("The 'port' property and the 'fixed_ip' property cannot "
"be specified at the same time"))),
('create:net-y:port-y:fixed_ip-n', dict(
reason='create',
success=True,
inputs={'network': 'NET', 'port': 'PORT'},
net_result=[mock.Mock(id='NET_ID')],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID', 'port': 'PORT_ID'},
exception=None,
message='')),
('create:net-y:port-n:fixed_ip-y', dict(
reason='create',
success=True,
inputs={'network': 'NET', 'fixed_ip': 'FIXED_IP'},
net_result=[mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID', 'fixed_ip': 'FIXED_IP'},
exception=None,
message='')),
('create:net-y:port-n:fixed_ip-n:sgroups-y', dict(
reason='create',
success=True,
inputs={'network': 'NET', 'security_groups': ['default']},
net_result=[mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[mock.Mock(id='SG_ID')],
floating_result=[],
result={'network': 'NET_ID', 'security_groups': ['SG_ID']},
exception=None,
message='')),
('create:net-y:port-n:fixed_ip-n:sgroups-n:floating_net-y', dict(
reason=None,
success=True,
inputs={'network': 'NET', 'floating_network': 'NET'},
net_result=[mock.Mock(id='NET_ID'), mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID', 'floating_network': 'NET_ID'},
exception=None,
message='')),
('create:net-f:port-y:fixed_ip-n', dict(
reason='create',
success=False,
inputs={'network': 'NET', 'port': 'PORT'},
net_result=[exc.InternalError(message='NET Failure')],
port_result=[],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceCreation,
message='Failed in creating server: NET Failure.')),
('create:net-n:port-f:fixed_ip-n', dict(
reason='create',
success=False,
inputs={'port': 'PORT'},
net_result=[],
port_result=[exc.InternalError(message='PORT Failure')],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceCreation,
message='Failed in creating server: PORT Failure.')),
('create:net-n:port-active:fixed_ip-n', dict(
reason='create',
success=False,
inputs={'port': 'PORT'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='ACTIVE')],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceCreation,
message=('Failed in creating server: The status of the port PORT '
'must be DOWN.'))),
('create:net-n:port-n:fixed_ip-n', dict(
reason='create',
success=False,
inputs={'fixed_ip': 'FIXED_IP'},
net_result=[],
port_result=[],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceCreation,
message=("Failed in creating server: One of 'port' "
"and 'network' must be provided."))),
('create:net-n:port-y:fixed_ip-y', dict(
reason='create',
success=False,
inputs={'port': 'PORT', 'fixed_ip': 'FIXED_IP'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceCreation,
message=("Failed in creating server: The 'port' property and the "
"'fixed_ip' property cannot be specified at the same "
"time."))),
('update:net-y:port-y:fixed_ip-n', dict(
reason='update',
success=True,
inputs={'network': 'NET', 'port': 'PORT'},
net_result=[mock.Mock(id='NET_ID')],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID', 'port': 'PORT_ID'},
exception=None,
message='')),
('update:net-y:port-n:fixed_ip-y', dict(
reason='update',
success=True,
inputs={'network': 'NET', 'fixed_ip': 'FIXED_IP'},
net_result=[mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID',
'fixed_ip': 'FIXED_IP'},
exception=None,
message='')),
('update:net-y:port-n:fixed_ip-n:sgroups-y', dict(
reason='create',
success=True,
inputs={'network': 'NET', 'security_groups': ['default']},
net_result=[mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[mock.Mock(id='SG_ID')],
floating_result=[],
result={'network': 'NET_ID', 'security_groups': ['SG_ID']},
exception=None,
message='')),
('update:net-y:port-n:fixed_ip-n:sgroups-n:floating_net-y', dict(
reason=None,
success=True,
inputs={'network': 'NET', 'floating_network': 'NET'},
net_result=[mock.Mock(id='NET_ID'), mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID', 'floating_network': 'NET_ID'},
exception=None,
message='')),
('update:net-f:port-y:fixed_ip-n', dict(
reason='update',
success=False,
inputs={'network': 'NET', 'port': 'PORT'},
net_result=[exc.InternalError(message='NET Failure')],
port_result=[],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': NET Failure.")),
('update:net-n:port-f:fixed_ip-n', dict(
reason='update',
success=False,
inputs={'port': 'PORT'},
net_result=[],
port_result=[exc.InternalError(message='PORT Failure')],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': PORT Failure.")),
('update:net-n:port-active:fixed_ip-n', dict(
reason='update',
success=False,
inputs={'port': 'PORT'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='ACTIVE')],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceUpdate,
message=("Failed in updating server 'NOVA_ID': The status of the "
"port PORT must be DOWN."))),
('update:net-n:port-n:fixed_ip-n', dict(
reason='update',
success=False,
inputs={'fixed_ip': 'FIXED_IP'},
net_result=[],
port_result=[],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceUpdate,
message=("Failed in updating server 'NOVA_ID': One of 'port' "
"and 'network' must be provided."))),
('update:net-n:port-y:fixed_ip-y', dict(
reason='update',
success=False,
inputs={'port': 'PORT', 'fixed_ip': 'FIXED_IP'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceUpdate,
message=("Failed in updating server 'NOVA_ID': The 'port' "
"property and the 'fixed_ip' property cannot be "
"specified at the same time."))),
]
def setUp(self):
super(TestNetworkValidation, self).setUp()
self.nc = mock.Mock()
self.profile = server.ServerProfile('t', spec)
self.profile._networkclient = self.nc
def test_validation(self):
self.nc.network_get.side_effect = self.net_result
self.nc.port_find.side_effect = self.port_result
self.nc.security_group_find.side_effect = self.sg_result
self.nc.floatingip_find.side_effect = self.floating_result
obj = mock.Mock(physical_id='NOVA_ID')
if self.success:
res = self.profile._validate_network(obj, self.inputs, self.reason)
self.assertEqual(self.result, res)
else:
ex = self.assertRaises(self.exception,
self.profile._validate_network,
obj, self.inputs, self.reason)
self.assertEqual(self.message, six.text_type(ex))
if self.net_result:
self.nc.network_get.assert_called_with('NET')
if self.port_result:
self.nc.port_find.assert_called_once_with('PORT')
if self.sg_result:
self.nc.security_group_find.assert_called_once_with('default')
if self.floating_result:
self.nc.floatingip_find.assert_called_once_with('FLOATINGIP')
class TestNovaServerValidate(base.SenlinTestCase):
def setUp(self):
super(TestNovaServerValidate, self).setUp()
self.context = utils.dummy_context()
def test_do_validate_all_passed(self):
profile = server.ServerProfile('t', spec)
mock_az = self.patchobject(profile, '_validate_az')
mock_flavor = self.patchobject(profile, '_validate_flavor')
mock_image = self.patchobject(profile, '_validate_image')
mock_keypair = self.patchobject(profile, '_validate_keypair')
mock_network = self.patchobject(profile, '_validate_network')
obj = mock.Mock()
res = profile.do_validate(obj)
properties = spec['properties']
self.assertTrue(res)
mock_az.assert_called_once_with(obj, properties['availability_zone'])
mock_flavor.assert_called_once_with(obj, properties['flavor'])
mock_image.assert_called_once_with(obj, properties['image'])
mock_keypair.assert_called_once_with(obj, properties['key_name'])
mock_network.assert_called_once_with(obj, properties['networks'][0])
| senlin/tests/unit/profiles/test_nova_server_validate.py | 35,642 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 525 | en | 0.872906 |
from setuptools import setup, find_packages
setup(
name='simplefb',
version='0.2.0a1',
description='A simple facebook graph api and auth Mixins',
url='https://github.com/fm100/simplefb',
author='Freddie Park',
author_email='sorelove@gmail.com',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='facebook graph api auth',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
)
| setup.py | 1,206 | See https://pypi.python.org/pypi?%3Aaction=list_classifiers How mature is this project? Common values are 3 - Alpha 4 - Beta 5 - Production/Stable Indicate who your project is intended for Pick your license as you wish (should match "license" above) Specify the Python versions you support here. In particular, ensure that you indicate whether you support Python 2, Python 3 or both. | 389 | en | 0.794938 |
#
# Copyright (c) 2019-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Multi-threaded sample to run a RMNet & SSDMobilenet v2 that will
# detect only person, bike and vehicle (change the output parsing
# for more classes)
#
# Example usage:
# RMNet: python3.6 multi_inputs.py -n "RMNet" -l "data" -o "detection_out"
# -d 1024 -i 127.0.0.1 -p 9001 -c 1
# -f /var/repos/github/sample-videos/person-bicycle-car-detection.mp4
# SSDMobileNet: python3.6 multi_inputs.py -n "SSDMobileNet" -l "image_tensor"
# -o "DetectionOutput" -d 300 -i 127.0.0.1 -p 9001 -c 1
# -f /var/repos/github/sample-videos/person-bicycle-car-detection.mp4
from __future__ import print_function
from argparse import ArgumentParser, SUPPRESS
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
from time import time, sleep
import sys
import os
import cv2
import grpc
import threading
import logging as log
from tensorflow import make_tensor_proto, make_ndarray
# global data (shared between threads & main)
CLASSES = ["None", "Pedestrian", "Vehicle", "Bike", "Other"]
COLORS = [(255, 255, 255), (255, 0, 0), (0, 255, 0), (0, 0, 255), (128, 128, 128)]
SRC_TYPE = ["Camera", "Video"]
exit_ok = False # manage thread loop
CAM_WIDTH = 640 # camera width
CAM_HEIGHT = 480 # camera height
CAM_FPS = 30 # camera speed
CONFIDENCE_THRESHOLD = 0.75 # detection confidence
#####################################################################################
def build_argparser():
parser = ArgumentParser(add_help=False)
args = parser.add_argument_group('Options')
args.add_argument('-h', '--help', action='help', default=SUPPRESS,
help='Show this help message and exit.')
args.add_argument('-n', '--network_name', required=True,
type=str, help='Network name')
args.add_argument('-l', '--input_layer', required=True,
type=str, help='Input layer name')
args.add_argument('-o', '--output_layer', required=True,
type=str, help='Output layer name')
args.add_argument('-d', '--frame_size', required=True,
type=int, help='Input frame width and height that matches used model')
args.add_argument('-c', '--num_cameras', help='Number of cameras to be used',
required=False, type=int, default=1)
args.add_argument('-f', '--file', help='Path to the video file',
required=False, type=str)
args.add_argument('-i', '--ip', help='ip address of the ovms', required=True)
args.add_argument('-p', '--port', help='port of the ovms', required=True)
return parser
# Decoding idea based on the link below. Not very accurate. So pls implement yours
# https://github.com/opencv/open_model_zoo/blob/master/intel_models/\
# person-vehicle-bike-detection-crossroad-0078/\
# description/person-vehicle-bike-detection-crossroad-0078.md
def parse_output(thr_id, res, frame):
for batch, data in enumerate(res):
pred = data[0]
for values in enumerate(pred):
# tuple
index = values[0]
l_pred = values[1]
# actual predictions
img_id = l_pred[0]
label = l_pred[1]
conf = l_pred[2]
x_min = l_pred[3]
y_min = l_pred[4]
x_max = l_pred[5]
y_max = l_pred[6]
# preventing any wrong array indexing (for RMNet)
if label > 4:
# Unsupported class label detected. Change to `other`.
label = 4
# Do you want confidence level to be passed from command line?
if img_id != -1 and conf >= CONFIDENCE_THRESHOLD:
# draw the bounding boxes on the frame
height, width = frame.shape[:2]
cv2.rectangle(frame, (int(width * x_min), int(height * y_min)),
(int(width * x_max), int(height * y_max)), COLORS[int(label)], 2)
cv2.putText(frame, str(CLASSES[int(label)]), (int(width * x_min)-10,
int(height * y_min)-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
COLORS[int(label)], 2)
return frame
# This is common for both the camera & video files
def thread_function(thr_id, network_name, input_layer, output_layer, input_dimension,
ip, port, disp_buf, src_type, src_name):
if src_type == "Camera":
# UVC camera init - camera threads always come first and we use it
# to generate the camera indexes
cam = cv2.VideoCapture(thr_id)
if not (cam.isOpened()):
log.error("Failed to open the UVC camera {}".format(thr_id))
return
cam.set(cv2.CAP_PROP_FRAME_WIDTH, CAM_WIDTH)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, CAM_HEIGHT)
# not all UVC cameras honor below request
cam.set(cv2.CAP_PROP_FPS, CAM_FPS)
# If your camera sends other than MJPEG, change below
cam.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*"MJPG"))
elif src_type == "Video":
# Assumption: src_name will be valid
cam = cv2.VideoCapture(src_name)
# inference stats
fps = 0 # camera fps
inf_fps = 0 # inference fps
dropped_fps = 0 # dropped frame fps
cam_start_time = time()
# ovms connection
channel = grpc.insecure_channel("{}:{}".format(ip, port))
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
request = predict_pb2.PredictRequest()
# Note: Pls maintain the same name while launching ovms docker container
request.model_spec.name = network_name
global exit_ok
while exit_ok == False:
ret, frame = cam.read()
if src_type == "Video":
# restart the video file when it reaches the end
if not ret:
cam.set(cv2.CAP_PROP_POS_FRAMES, 0)
continue
# normalize the video frame dimension to that of the camera
else:
# to maintain the frame inferencing parity with the cameras, lets sleep
# here to maintain cam_fps speed
sleep((1000 / CAM_FPS) / 1000)
# enable below line to keep video file & camera output window dimensions the same
# frame = cv2.resize(frame, (CAM_WIDTH, CAM_HEIGHT))
fps = fps + 1
if (time() - cam_start_time) * 1000 >= 1000:
log.warning('{}{} fps: {}, Inf fps: {}, dropped fps: {}'
.format(src_type, thr_id, fps, inf_fps, dropped_fps))
fps = 0
inf_fps = 0
dropped_fps = 0
cam_start_time = time()
# resize the frame to what network input layer expects it to be
image = cv2.resize(frame, (input_dimension, input_dimension))
image = image.transpose(2, 0, 1).reshape(1, 3, input_dimension, input_dimension)
image = image.astype('float32')
inf_time = time()
# send the input as protobuf
request.inputs[input_layer].CopyFrom(
make_tensor_proto(image, shape=None))
try:
result = stub.Predict(request, 10.0)
except Exception as e:
log.error('Caught exception {}'.format(e))
cam.release()
return
duration = time() - inf_time
# decode the received output as protobuf
res = make_ndarray(result.outputs[output_layer])
if not res.any():
log.error('Thr{}: Predictions came back with wrong output layer name'.format(thr_id))
dropped_fps = dropped_fps + 1
disp_buf[thr_id] = frame
else:
log.debug('Predictions came back fine')
inf_fps = inf_fps + 1
disp_buf[thr_id] = parse_output(thr_id, res, frame)
# while exit_ok == False
cam.release()
log.warning('Exiting thread {}'.format(thr_id))
#####################################################################################
def main():
log.basicConfig(format="[$(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
num_cam = args.num_cameras if (args.num_cameras) else 0
vid_src = args.file
network_name = args.network_name
input_layer = args.input_layer
output_layer = args.output_layer
input_dimension = args.frame_size
ip_addr = args.ip
port_no = args.port
if not args.file and not args.num_cameras:
log.error('Please supply either the camera or the video file. Try -f for options')
return
if not ip_addr or not port_no:
log.error('Please supply valid IP and/or port number of OVMS server')
return
video_files = []
if vid_src:
if os.path.isdir(vid_src):
for r, d, f in os.walk(vid_src):
for f_ in f:
# only mp4 files supported as of now
if '.mp4' in f_:
video_files.append(r + f_)
elif os.path.isfile(vid_src):
if '.mp4' in vid_src:
video_files.append(vid_src)
# thread management
thr = [None] * (num_cam + len(video_files))
# display buffers shared between camera threads
disp_buf = {}
# Known issue: Depending on the USB enumeration, camera nodes need not be
# in sequence. Pls pass the device node info through a file or command line
# if it happens in your system
for i in range(num_cam):
disp_buf[i] = None
thr[i] = threading.Thread(target=thread_function,
args=(i, network_name, input_layer, output_layer, input_dimension,
ip_addr, port_no, disp_buf, SRC_TYPE[0], None))
thr[i].start()
for i in range(num_cam, num_cam + len(video_files)):
disp_buf[i] = None
thr[i] = threading.Thread(target=thread_function,
args=(i, network_name, input_layer, output_layer, input_dimension,
ip_addr, port_no, disp_buf, SRC_TYPE[1], video_files[i - num_cam]))
thr[i].start()
# For whatever reasons, cv2.imshow() doesnt work from threads. Hence we shove the
# infered data to the main thread to display.
global exit_ok
while exit_ok == False:
for i in range(num_cam + len(video_files)):
if disp_buf[i] is not None:
cv2.imshow('Predictions {}'.format(i), disp_buf[i])
disp_buf[i] = None
# exit the program if 'q' is pressed on any window
if cv2.waitKey(1) == ord('q'):
exit_ok = True
break
# wait for all the threads to join
for i in range(num_cam):
thr[i].join()
# close all open windows
cv2.destroyAllWindows()
log.warning('Good Bye!')
if __name__ == '__main__':
sys.exit(main() or 0)
| example_client/multi_inputs.py | 10,602 | Copyright (c) 2019-2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Multi-threaded sample to run a RMNet & SSDMobilenet v2 that will detect only person, bike and vehicle (change the output parsing for more classes) Example usage: RMNet: python3.6 multi_inputs.py -n "RMNet" -l "data" -o "detection_out" -d 1024 -i 127.0.0.1 -p 9001 -c 1 -f /var/repos/github/sample-videos/person-bicycle-car-detection.mp4 SSDMobileNet: python3.6 multi_inputs.py -n "SSDMobileNet" -l "image_tensor" -o "DetectionOutput" -d 300 -i 127.0.0.1 -p 9001 -c 1 -f /var/repos/github/sample-videos/person-bicycle-car-detection.mp4 global data (shared between threads & main) manage thread loop camera width camera height camera speed detection confidence Decoding idea based on the link below. Not very accurate. So pls implement yours https://github.com/opencv/open_model_zoo/blob/master/intel_models/\ person-vehicle-bike-detection-crossroad-0078/\ description/person-vehicle-bike-detection-crossroad-0078.md tuple actual predictions preventing any wrong array indexing (for RMNet) Unsupported class label detected. Change to `other`. Do you want confidence level to be passed from command line? draw the bounding boxes on the frame This is common for both the camera & video files UVC camera init - camera threads always come first and we use it to generate the camera indexes not all UVC cameras honor below request If your camera sends other than MJPEG, change below Assumption: src_name will be valid inference stats camera fps inference fps dropped frame fps ovms connection Note: Pls maintain the same name while launching ovms docker container restart the video file when it reaches the end normalize the video frame dimension to that of the camera to maintain the frame inferencing parity with the cameras, lets sleep here to maintain cam_fps speed enable below line to keep video file & camera output window dimensions the same frame = cv2.resize(frame, (CAM_WIDTH, CAM_HEIGHT)) resize the frame to what network input layer expects it to be send the input as protobuf decode the received output as protobuf while exit_ok == False only mp4 files supported as of now thread management display buffers shared between camera threads Known issue: Depending on the USB enumeration, camera nodes need not be in sequence. Pls pass the device node info through a file or command line if it happens in your system For whatever reasons, cv2.imshow() doesnt work from threads. Hence we shove the infered data to the main thread to display. exit the program if 'q' is pressed on any window wait for all the threads to join close all open windows | 3,112 | en | 0.799207 |
import asyncio
import logging
import types
import typing
import enum
from dataclasses import dataclass
from ..types import ASGIApp, Message
from ..exceptions import LifespanUnsupported, LifespanFailure, UnexpectedMessage
class LifespanCycleState(enum.Enum):
"""
The state of the ASGI `lifespan` connection.
* **CONNECTING** - Initial state. The ASGI application instance will be run with
the connection scope containing the `lifespan` type.
* **STARTUP** - The lifespan startup event has been pushed to the queue to be
received by the application.
* **SHUTDOWN** - The lifespan shutdown event has been pushed to the queue to be
received by the application.
* **FAILED** - A lifespan failure has been detected, and the connection will be
closed with an error.
* **UNSUPPORTED** - An application attempted to send a message before receiving
the lifepan startup event. If the lifespan argument is "on", then the connection
will be closed with an error.
"""
CONNECTING = enum.auto()
STARTUP = enum.auto()
SHUTDOWN = enum.auto()
FAILED = enum.auto()
UNSUPPORTED = enum.auto()
@dataclass
class LifespanCycle:
"""
Manages the application cycle for an ASGI `lifespan` connection.
* **app** - An asynchronous callable that conforms to version 3.0 of the ASGI
specification. This will usually be an ASGI framework application instance.
* **lifespan** - A string to configure lifespan support. Choices are `auto`, `on`,
and `off`. Default is `auto`.
* **state** - An enumerated `LifespanCycleState` type that indicates the state of
the ASGI connection.
* **exception** - An exception raised while handling the ASGI event.
* **app_queue** - An asyncio queue (FIFO) containing messages to be received by the
application.
* **startup_event** - An asyncio event object used to control the application
startup flow.
* **shutdown_event** - An asyncio event object used to control the application
shutdown flow.
* **exception** - An exception raised while handling the ASGI event. This may or
may not be raised depending on the state.
"""
app: ASGIApp
lifespan: str
state: LifespanCycleState = LifespanCycleState.CONNECTING
exception: typing.Optional[BaseException] = None
def __post_init__(self) -> None:
self.logger = logging.getLogger("mangum.lifespan")
self.loop = asyncio.get_event_loop()
self.app_queue: asyncio.Queue = asyncio.Queue()
self.startup_event: asyncio.Event = asyncio.Event()
self.shutdown_event: asyncio.Event = asyncio.Event()
def __enter__(self) -> None:
"""
Runs the event loop for application startup.
"""
self.loop.create_task(self.run())
self.loop.run_until_complete(self.startup())
def __exit__(
self,
exc_type: typing.Optional[typing.Type[BaseException]],
exc_value: typing.Optional[BaseException],
traceback: typing.Optional[types.TracebackType],
) -> None:
"""
Runs the event loop for application shutdown.
"""
self.loop.run_until_complete(self.shutdown())
async def run(self) -> None:
"""
Calls the application with the `lifespan` connection scope.
"""
try:
await self.app({"type": "lifespan"}, self.receive, self.send)
except LifespanUnsupported:
self.logger.info("ASGI 'lifespan' protocol appears unsupported.")
except (LifespanFailure, UnexpectedMessage) as exc:
self.exception = exc
except BaseException as exc:
self.logger.error("Exception in 'lifespan' protocol.", exc_info=exc)
finally:
self.startup_event.set()
self.shutdown_event.set()
async def receive(self) -> Message:
"""
Awaited by the application to receive ASGI `lifespan` events.
"""
if self.state is LifespanCycleState.CONNECTING:
# Connection established. The next event returned by the queue will be
# `lifespan.startup` to inform the application that the connection is
# ready to receive lfiespan messages.
self.state = LifespanCycleState.STARTUP
elif self.state is LifespanCycleState.STARTUP:
# Connection shutting down. The next event returned by the queue will be
# `lifespan.shutdown` to inform the application that the connection is now
# closing so that it may perform cleanup.
self.state = LifespanCycleState.SHUTDOWN
return await self.app_queue.get()
async def send(self, message: Message) -> None:
"""
Awaited by the application to send ASGI `lifespan` events.
"""
message_type = message["type"]
self.logger.info(
"%s: '%s' event received from application.", self.state, message_type
)
if self.state is LifespanCycleState.CONNECTING:
if self.lifespan == "on":
raise LifespanFailure(
"Lifespan connection failed during startup and lifespan is 'on'."
)
# If a message is sent before the startup event is received by the
# application, then assume that lifespan is unsupported.
self.state = LifespanCycleState.UNSUPPORTED
raise LifespanUnsupported("Lifespan protocol appears unsupported.")
if message_type not in (
"lifespan.startup.complete",
"lifespan.shutdown.complete",
"lifespan.startup.failed",
"lifespan.shutdown.failed",
):
self.state = LifespanCycleState.FAILED
raise UnexpectedMessage(f"Unexpected '{message_type}' event received.")
if self.state is LifespanCycleState.STARTUP:
if message_type == "lifespan.startup.complete":
self.startup_event.set()
elif message_type == "lifespan.startup.failed":
self.state = LifespanCycleState.FAILED
self.startup_event.set()
message = message.get("message", "")
raise LifespanFailure(f"Lifespan startup failure. {message}")
elif self.state is LifespanCycleState.SHUTDOWN:
if message_type == "lifespan.shutdown.complete":
self.shutdown_event.set()
elif message_type == "lifespan.shutdown.failed":
self.state = LifespanCycleState.FAILED
self.shutdown_event.set()
message = message.get("message", "")
raise LifespanFailure(f"Lifespan shutdown failure. {message}")
async def startup(self) -> None:
"""
Pushes the `lifespan` startup event to application queue and handles errors.
"""
self.logger.info("Waiting for application startup.")
await self.app_queue.put({"type": "lifespan.startup"})
await self.startup_event.wait()
if self.state is LifespanCycleState.FAILED:
raise LifespanFailure(self.exception)
if not self.exception:
self.logger.info("Application startup complete.")
else:
self.logger.info("Application startup failed.")
async def shutdown(self) -> None:
"""
Pushes the `lifespan` shutdown event to application queue and handles errors.
"""
self.logger.info("Waiting for application shutdown.")
await self.app_queue.put({"type": "lifespan.shutdown"})
await self.shutdown_event.wait()
if self.state is LifespanCycleState.FAILED:
raise LifespanFailure(self.exception)
| mangum/protocols/lifespan.py | 7,731 | Manages the application cycle for an ASGI `lifespan` connection.
* **app** - An asynchronous callable that conforms to version 3.0 of the ASGI
specification. This will usually be an ASGI framework application instance.
* **lifespan** - A string to configure lifespan support. Choices are `auto`, `on`,
and `off`. Default is `auto`.
* **state** - An enumerated `LifespanCycleState` type that indicates the state of
the ASGI connection.
* **exception** - An exception raised while handling the ASGI event.
* **app_queue** - An asyncio queue (FIFO) containing messages to be received by the
application.
* **startup_event** - An asyncio event object used to control the application
startup flow.
* **shutdown_event** - An asyncio event object used to control the application
shutdown flow.
* **exception** - An exception raised while handling the ASGI event. This may or
may not be raised depending on the state.
The state of the ASGI `lifespan` connection.
* **CONNECTING** - Initial state. The ASGI application instance will be run with
the connection scope containing the `lifespan` type.
* **STARTUP** - The lifespan startup event has been pushed to the queue to be
received by the application.
* **SHUTDOWN** - The lifespan shutdown event has been pushed to the queue to be
received by the application.
* **FAILED** - A lifespan failure has been detected, and the connection will be
closed with an error.
* **UNSUPPORTED** - An application attempted to send a message before receiving
the lifepan startup event. If the lifespan argument is "on", then the connection
will be closed with an error.
Runs the event loop for application startup.
Runs the event loop for application shutdown.
Connection established. The next event returned by the queue will be `lifespan.startup` to inform the application that the connection is ready to receive lfiespan messages. Connection shutting down. The next event returned by the queue will be `lifespan.shutdown` to inform the application that the connection is now closing so that it may perform cleanup. If a message is sent before the startup event is received by the application, then assume that lifespan is unsupported. | 2,169 | en | 0.896097 |
import json
import socket
def is_jsonable(obj):
try:
json.dumps(obj)
return True
except (TypeError, OverflowError, ValueError):
return False
def sanitize_meta(meta):
keys_to_sanitize = []
for key, value in meta.items():
if not is_jsonable(value):
keys_to_sanitize.append(key)
if keys_to_sanitize:
for key in keys_to_sanitize:
del meta[key]
meta['__errors'] = 'These keys have been sanitized: ' + ', '.join(
keys_to_sanitize)
return meta
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
ip = s.getsockname()[0]
except Exception:
ip = '127.0.0.1'
finally:
s.close()
return ip
| logdna/utils.py | 840 | doesn't even have to be reachable | 33 | en | 0.99759 |
# Copyright 2017 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova_powervm.virt.powervm.volume import fileio
class GPFSVolumeAdapter(fileio.FileIOVolumeAdapter):
"""Connects GPFS Cinder Volumes to PowerVM VMs."""
def _get_path(self):
return self.connection_info.get("data")['device_path']
| nova_powervm/virt/powervm/volume/gpfs.py | 878 | Connects GPFS Cinder Volumes to PowerVM VMs.
Copyright 2017 IBM Corp. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 642 | en | 0.865283 |
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from jupyter_core.paths import jupyter_data_dir
import subprocess
import os
import errno
import stat
c = get_config() # noqa: F821
c.NotebookApp.ip = "0.0.0.0"
c.NotebookApp.port = 8888
c.NotebookApp.open_browser = False
c.Spawner.args = ['--NotebookApp.tornado_settings={"headers":{"Content-Security-Policy": "frame-ancestors * \'self\' colinjbrown.com:*"}}']
c.NotebookApp.tornado_settings = { 'headers': { 'Content-Security-Policy': "frame-ancestors * \'self\' colinjbrown.com:*"} }
c.JupyterHub.tornado_settings = { 'headers': { 'Content-Security-Policy': "frame-ancestors * \'self\' colinjbrown.com:*"} }
# https://github.com/jupyter/notebook/issues/3130
c.FileContentsManager.delete_to_trash = False
# Generate a self-signed certificate
if "GEN_CERT" in os.environ:
dir_name = jupyter_data_dir()
pem_file = os.path.join(dir_name, "notebook.pem")
try:
os.makedirs(dir_name)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(dir_name):
pass
else:
raise
# Generate an openssl.cnf file to set the distinguished name
cnf_file = os.path.join(os.getenv("CONDA_DIR", "/usr/lib"), "ssl", "openssl.cnf")
if not os.path.isfile(cnf_file):
with open(cnf_file, "w") as fh:
fh.write(
"""\
[req]
distinguished_name = req_distinguished_name
[req_distinguished_name]
"""
)
# Generate a certificate if one doesn't exist on disk
subprocess.check_call(
[
"openssl",
"req",
"-new",
"-newkey=rsa:2048",
"-days=365",
"-nodes",
"-x509",
"-subj=/C=XX/ST=XX/L=XX/O=generated/CN=generated",
f"-keyout={pem_file}",
f"-out={pem_file}",
]
)
# Restrict access to the file
os.chmod(pem_file, stat.S_IRUSR | stat.S_IWUSR)
c.NotebookApp.certfile = pem_file
# Change default umask for all subprocesses of the notebook server if set in
# the environment
if "NB_UMASK" in os.environ:
os.umask(int(os.environ["NB_UMASK"], 8))
| jupyter_notebook_config.py | 2,231 | Copyright (c) Jupyter Development Team. Distributed under the terms of the Modified BSD License. noqa: F821 https://github.com/jupyter/notebook/issues/3130 Generate a self-signed certificate Python >2.5 Generate an openssl.cnf file to set the distinguished name Generate a certificate if one doesn't exist on disk Restrict access to the file Change default umask for all subprocesses of the notebook server if set in the environment | 432 | en | 0.818961 |
#!/usr/bin/env python
#
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""stack symbolizes native crash dumps."""
import getopt
import glob
import logging
import os
import sys
import stack_core
import stack_libs
import subprocess
import symbol
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, os.pardir, os.pardir,
'build', 'android'))
from pylib import constants
sys.path.insert(0, os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, os.pardir, os.pardir,
'tools', 'python'))
import llvm_symbolizer
DEFAULT_SYMROOT='/tmp/symbols'
# From: https://source.android.com/source/build-numbers.html
_ANDROID_M_MAJOR_VERSION=6
def PrintUsage():
"""Print usage and exit with error."""
# pylint: disable-msg=C6310
print
print " usage: " + sys.argv[0] + " [options] [FILE]"
print
print " --symbols-dir=path"
print " the path to a symbols dir, such as =/tmp/out/target/product/dream/symbols"
print
print " --chrome-symbols-dir=path"
print " the path to a Chrome symbols dir (can be absolute or relative"
print " to src), such as =out/Debug/lib.unstripped"
print
print " --output-directory=path"
print " the path to the build output directory, such as out/Debug."
print " Ignored if --chrome-symbols-dir is passed."
print
print " --packed-relocation-adjustments"
print " --no-packed-relocation-adjustments"
print " turn packed relocation adjustment on and off (default is off)"
print " If running on pre-M Android and the stack trace appears to"
print " make no sense, try turning this feature on."
print
print " --symbols-zip=path"
print " the path to a symbols zip file, such as =dream-symbols-12345.zip"
print
print " --more-info"
print " --less-info"
print " Change the level of detail in the output."
print " --more-info is slower and more verbose, but more functions will"
print " be fully qualified with namespace/classname and have full"
print " argument information. Also, the 'stack data' section will be"
print " printed."
print
print " --arch=arm|arm64|x64|x86|mips"
print " the target architecture"
print
print " --fallback-monochrome"
print " fallback to monochrome instead of chrome if fail to detect"
print " shared lib which is loaded from APK, this doesn't work for"
print " component build."
print
print " --verbose"
print " enable extra logging, particularly for debugging failed symbolization"
print
print " FILE should contain a stack trace in it somewhere"
print " the tool will find that and re-print it with"
print " source files and line numbers. If you don't"
print " pass FILE, or if file is -, it reads from"
print " stdin."
print
# pylint: enable-msg=C6310
sys.exit(1)
def UnzipSymbols(symbolfile, symdir=None):
"""Unzips a file to DEFAULT_SYMROOT and returns the unzipped location.
Args:
symbolfile: The .zip file to unzip
symdir: Optional temporary directory to use for extraction
Returns:
A tuple containing (the directory into which the zip file was unzipped,
the path to the "symbols" directory in the unzipped file). To clean
up, the caller can delete the first element of the tuple.
Raises:
SymbolDownloadException: When the unzip fails.
"""
if not symdir:
symdir = "%s/%s" % (DEFAULT_SYMROOT, hash(symbolfile))
if not os.path.exists(symdir):
os.makedirs(symdir)
print "extracting %s..." % symbolfile
saveddir = os.getcwd()
os.chdir(symdir)
try:
unzipcode = subprocess.call(["unzip", "-qq", "-o", symbolfile])
if unzipcode > 0:
os.remove(symbolfile)
raise SymbolDownloadException("failed to extract symbol files (%s)."
% symbolfile)
finally:
os.chdir(saveddir)
android_symbols = glob.glob("%s/out/target/product/*/symbols" % symdir)
if android_symbols:
return (symdir, android_symbols[0])
else:
# This is a zip of Chrome symbols, so symbol.CHROME_SYMBOLS_DIR needs to be
# updated to point here.
symbol.CHROME_SYMBOLS_DIR = symdir
return (symdir, symdir)
def main(argv):
try:
options, arguments = getopt.getopt(argv, "",
["packed-relocation-adjustments",
"no-packed-relocation-adjustments",
"more-info",
"less-info",
"chrome-symbols-dir=",
"output-directory=",
"symbols-dir=",
"symbols-zip=",
"packed-lib=",
"arch=",
"fallback-monochrome",
"verbose",
"help"])
except getopt.GetoptError, unused_error:
PrintUsage()
zip_arg = None
more_info = False
fallback_monochrome = False
arch_defined = False
packed_libs = []
for option, value in options:
if option == "--help":
PrintUsage()
elif option == "--symbols-dir":
symbol.SYMBOLS_DIR = os.path.expanduser(value)
elif option == "--symbols-zip":
zip_arg = os.path.expanduser(value)
elif option == "--arch":
symbol.ARCH = value
arch_defined = True
elif option == "--chrome-symbols-dir":
symbol.CHROME_SYMBOLS_DIR = os.path.join(constants.DIR_SOURCE_ROOT,
value)
elif option == "--output-directory":
constants.SetOutputDirectory(value)
elif option == "--packed-lib":
packed_libs.append(os.path.expanduser(value))
elif option == "--more-info":
more_info = True
elif option == "--less-info":
more_info = False
elif option == "--fallback-monochrome":
fallback_monochrome = True
elif option == "--verbose":
logging.basicConfig(level=logging.DEBUG)
elif option in (
'--packed-relocation-adjustments',
'--no-packed-relocation-adjustments'):
print ('--[no-]packed-relocation-adjustments options are deprecated. '
'Specify packed libs directory instead.')
if len(arguments) > 1:
PrintUsage()
# Do an up-front test that the output directory is known.
if not symbol.CHROME_SYMBOLS_DIR:
constants.CheckOutputDirectory()
if not arguments or arguments[0] == "-":
print "Reading native crash info from stdin"
f = sys.stdin
else:
print "Searching for native crashes in: " + os.path.realpath(arguments[0])
f = open(arguments[0], "r")
lines = f.readlines()
f.close()
rootdir = None
if zip_arg:
rootdir, symbol.SYMBOLS_DIR = UnzipSymbols(zip_arg)
version = stack_libs.GetTargetAndroidVersionNumber(lines)
if version is None:
print ("Unknown Android release, "
"consider passing --packed-lib.")
elif version < _ANDROID_M_MAJOR_VERSION and not packed_libs:
print ("Pre-M Android release detected, "
"but --packed-lib not specified. Stack symbolization may fail.")
if (version is None or version < _ANDROID_M_MAJOR_VERSION) and packed_libs:
load_vaddrs = stack_libs.GetLoadVaddrs(stripped_libs=packed_libs)
else:
load_vaddrs = {}
print ("Reading Android symbols from: "
+ os.path.normpath(symbol.SYMBOLS_DIR))
chrome_search_path = symbol.GetLibrarySearchPaths()
with llvm_symbolizer.LLVMSymbolizer() as symbolizer:
print ("Searching for Chrome symbols from within: "
+ ':'.join((os.path.normpath(d) for d in chrome_search_path)))
stack_core.ConvertTrace(lines, load_vaddrs, more_info, fallback_monochrome,
arch_defined, symbolizer)
if rootdir:
# be a good citizen and clean up...os.rmdir and os.removedirs() don't work
cmd = "rm -rf \"%s\"" % rootdir
print "\ncleaning up (%s)" % cmd
os.system(cmd)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
# vi: ts=2 sw=2
| third_party/android_platform/development/scripts/stack.py | 8,899 | !/usr/bin/env python Copyright (C) 2013 The Android Open Source Project Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. From: https://source.android.com/source/build-numbers.html pylint: disable-msg=C6310 pylint: enable-msg=C6310 This is a zip of Chrome symbols, so symbol.CHROME_SYMBOLS_DIR needs to be updated to point here. Do an up-front test that the output directory is known. be a good citizen and clean up...os.rmdir and os.removedirs() don't work vi: ts=2 sw=2 | 944 | en | 0.782622 |
import FWCore.ParameterSet.Config as cms
process = cms.Process("DQM")
# message logger
process.MessageLogger = cms.Service("MessageLogger",
destinations = cms.untracked.vstring('cout'),
cout = cms.untracked.PSet(threshold = cms.untracked.string('WARNING'))
)
#----------------------------
#### Event Source
#----------------------------
# for live online DQM in P5
process.load("DQM.Integration.config.inputsource_cfi")
# for testing in lxplus
#process.load("DQM.Integration.config.fileinputsource_cfi")
# Global tag - Condition for P5 cluster
process.load("DQM.Integration.config.FrontierCondition_GT_cfi")
#----------------------------
#### DQM Environment
#----------------------------
process.load("DQM.Integration.config.environment_cfi")
process.dqmEnv.subSystemFolder = 'Info'
process.dqmSaver.tag = 'Info'
#-----------------------------
# Digitisation: produce the Scalers digis containing DCS bits
process.load("EventFilter.ScalersRawToDigi.ScalersRawToDigi_cfi")
# Digitisation: produce the TCDS digis containing BST record
from EventFilter.Utilities.tcdsRawToDigi_cfi import *
process.tcdsDigis = tcdsRawToDigi.clone()
# OnlineMetaDataRawToDigi will put DCSRecord to an event
process.load('EventFilter.OnlineMetaDataRawToDigi.onlineMetaDataRawToDigi_cfi')
process.onlineMetaDataDigis = cms.EDProducer('OnlineMetaDataRawToDigi')
# DQMProvInfo is the DQM module to be run
process.load("DQMServices.Components.DQMProvInfo_cfi")
# DQM Modules
process.dqmmodules = cms.Sequence(process.dqmEnv + process.dqmSaver)
process.evfDQMmodulesPath = cms.Path(
process.scalersRawToDigi*
process.tcdsDigis*
process.onlineMetaDataRawToDigi*
process.dqmProvInfo*
process.dqmmodules
)
process.schedule = cms.Schedule(process.evfDQMmodulesPath)
process.dqmProvInfo.runType = process.runType.getRunTypeName()
# Heavy Ion Specific Fed Raw Data Collection Label
if (process.runType.getRunType() == process.runType.hi_run):
process.scalersRawToDigi.scalersInputTag = cms.InputTag("rawDataRepacker")
process.tcdsDigis.InputLabel = cms.InputTag("rawDataRepacker")
else:
process.scalersRawToDigi.scalersInputTag = cms.InputTag("rawDataCollector")
process.tcdsDigis.InputLabel = cms.InputTag("rawDataCollector")
# Process customizations included here
from DQM.Integration.config.online_customizations_cfi import *
process = customise(process)
| DQM/Integration/python/clients/info_dqm_sourceclient-live_cfg.py | 2,685 | message logger---------------------------- Event Source---------------------------- for live online DQM in P5 for testing in lxplusprocess.load("DQM.Integration.config.fileinputsource_cfi") Global tag - Condition for P5 cluster---------------------------- DQM Environment--------------------------------------------------------- Digitisation: produce the Scalers digis containing DCS bits Digitisation: produce the TCDS digis containing BST record OnlineMetaDataRawToDigi will put DCSRecord to an event DQMProvInfo is the DQM module to be run DQM Modules Heavy Ion Specific Fed Raw Data Collection Label Process customizations included here | 640 | en | 0.438895 |
"""CategoricalMLPPolicy."""
import akro
import tensorflow as tf
from metarl.tf.distributions import Categorical
from metarl.tf.models import MLPModel
from metarl.tf.policies import StochasticPolicy
class CategoricalMLPPolicy(StochasticPolicy):
"""CategoricalMLPPolicy
A policy that contains a MLP to make prediction based on
a categorical distribution.
It only works with akro.Discrete action space.
Args:
env_spec (metarl.envs.env_spec.EnvSpec): Environment specification.
name (str): Policy name, also the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s).
For example, (32, 32) means the MLP of this policy consists of two
hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
env_spec,
name='CategoricalMLPPolicy',
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.glorot_uniform_initializer(),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=tf.nn.softmax,
output_w_init=tf.glorot_uniform_initializer(),
output_b_init=tf.zeros_initializer(),
layer_normalization=False):
assert isinstance(env_spec.action_space, akro.Discrete), (
'CategoricalMLPPolicy only works with akro.Discrete action '
'space.')
super().__init__(name, env_spec)
self.obs_dim = env_spec.observation_space.flat_dim
self.action_dim = env_spec.action_space.n
self.model = MLPModel(output_dim=self.action_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
layer_normalization=layer_normalization,
name='MLPModel')
self._initialize()
def _initialize(self):
state_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, self.obs_dim))
with tf.compat.v1.variable_scope(self.name) as vs:
self._variable_scope = vs
self.model.build(state_input)
self._f_prob = tf.compat.v1.get_default_session().make_callable(
self.model.networks['default'].outputs,
feed_list=[self.model.networks['default'].input])
@property
def vectorized(self):
"""Vectorized or not."""
return True
def dist_info_sym(self, obs_var, state_info_vars=None, name=None):
"""Symbolic graph of the distribution."""
with tf.compat.v1.variable_scope(self._variable_scope):
prob = self.model.build(obs_var, name=name)
return dict(prob=prob)
def dist_info(self, obs, state_infos=None):
"""Distribution info."""
prob = self._f_prob(obs)
return dict(prob=prob)
def get_action(self, observation):
"""Return a single action."""
flat_obs = self.observation_space.flatten(observation)
prob = self._f_prob([flat_obs])[0]
action = self.action_space.weighted_sample(prob)
return action, dict(prob=prob)
def get_actions(self, observations):
"""Return multiple actions."""
flat_obs = self.observation_space.flatten_n(observations)
probs = self._f_prob(flat_obs)
actions = list(map(self.action_space.weighted_sample, probs))
return actions, dict(prob=probs)
def get_regularizable_vars(self):
"""Get regularizable weight variables under the Policy scope."""
trainable = self.get_trainable_vars()
return [
var for var in trainable
if 'hidden' in var.name and 'kernel' in var.name
]
@property
def distribution(self):
"""Policy distribution."""
return Categorical(self.action_dim)
def __getstate__(self):
"""Object.__getstate__."""
new_dict = super().__getstate__()
del new_dict['_f_prob']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__."""
super().__setstate__(state)
self._initialize()
| src/metarl/tf/policies/categorical_mlp_policy.py | 5,640 | CategoricalMLPPolicy
A policy that contains a MLP to make prediction based on
a categorical distribution.
It only works with akro.Discrete action space.
Args:
env_spec (metarl.envs.env_spec.EnvSpec): Environment specification.
name (str): Policy name, also the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s).
For example, (32, 32) means the MLP of this policy consists of two
hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
layer_normalization (bool): Bool for using layer normalization or not.
Object.__getstate__.
Object.__setstate__.
Distribution info.
Symbolic graph of the distribution.
Policy distribution.
Return a single action.
Return multiple actions.
Get regularizable weight variables under the Policy scope.
Vectorized or not.
CategoricalMLPPolicy. | 1,777 | en | 0.638816 |
# Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants as lib_constants
from oslo_config import cfg
from oslo_log import log
from oslo_utils import uuidutils
from neutron.agent.l3 import agent as l3_agent
from neutron.agent.l3 import dvr_edge_ha_router as dvr_edge_ha_rtr
from neutron.agent.l3 import dvr_edge_router as dvr_edge_rtr
from neutron.agent.l3 import dvr_local_router as dvr_router
from neutron.agent.l3 import link_local_allocator as lla
from neutron.agent.l3 import router_info
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.common import utils as common_utils
from neutron.conf.agent import common as agent_config
from neutron.conf.agent.l3 import config as l3_config
from neutron.conf.agent.l3 import ha as ha_conf
from neutron.conf import common as base_config
from neutron.tests import base
from neutron.tests.common import l3_test_common
_uuid = uuidutils.generate_uuid
FIP_PRI = 32768
HOSTNAME = 'myhost'
class TestDvrRouterOperations(base.BaseTestCase):
def setUp(self):
super(TestDvrRouterOperations, self).setUp()
mock.patch('eventlet.spawn').start()
self.conf = agent_config.setup_conf()
self.conf.register_opts(base_config.core_opts)
log.register_options(self.conf)
self.conf.register_opts(agent_config.AGENT_STATE_OPTS, 'AGENT')
l3_config.register_l3_agent_config_opts(l3_config.OPTS, self.conf)
ha_conf.register_l3_agent_ha_opts(self.conf)
agent_config.register_interface_driver_opts_helper(self.conf)
agent_config.register_process_monitor_opts(self.conf)
agent_config.register_interface_opts(self.conf)
agent_config.register_external_process_opts(self.conf)
self.conf.set_override('interface_driver',
'neutron.agent.linux.interface.NullDriver')
self.conf.set_override('state_path', cfg.CONF.state_path)
self.device_exists_p = mock.patch(
'neutron.agent.linux.ip_lib.device_exists')
self.device_exists = self.device_exists_p.start()
self.ensure_dir = mock.patch(
'oslo_utils.fileutils.ensure_tree').start()
mock.patch('neutron.agent.linux.keepalived.KeepalivedManager'
'.get_full_config_file_path').start()
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.utils_replace_file_p = mock.patch(
'neutron_lib.utils.file.replace_file')
self.utils_replace_file = self.utils_replace_file_p.start()
self.external_process_p = mock.patch(
'neutron.agent.linux.external_process.ProcessManager')
self.external_process = self.external_process_p.start()
self.process_monitor = mock.patch(
'neutron.agent.linux.external_process.ProcessMonitor').start()
self.send_adv_notif_p = mock.patch(
'neutron.agent.linux.ip_lib.send_ip_addr_adv_notif')
self.send_adv_notif = self.send_adv_notif_p.start()
self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver')
driver_cls = self.dvr_cls_p.start()
self.mock_driver = mock.MagicMock()
self.mock_driver.DEV_NAME_LEN = (
interface.LinuxInterfaceDriver.DEV_NAME_LEN)
driver_cls.return_value = self.mock_driver
self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper')
ip_cls = self.ip_cls_p.start()
self.mock_ip = mock.MagicMock()
ip_cls.return_value = self.mock_ip
self.mock_delete_ip_rule = mock.patch.object(ip_lib,
'delete_ip_rule').start()
ip_dev = mock.patch('neutron.agent.linux.ip_lib.IPDevice').start()
self.mock_ip_dev = mock.MagicMock()
ip_dev.return_value = self.mock_ip_dev
self.l3pluginApi_cls_p = mock.patch(
'neutron.agent.l3.agent.L3PluginApi')
l3pluginApi_cls = self.l3pluginApi_cls_p.start()
self.plugin_api = mock.MagicMock()
l3pluginApi_cls.return_value = self.plugin_api
self.looping_call_p = mock.patch(
'oslo_service.loopingcall.FixedIntervalLoopingCall')
self.looping_call_p.start()
subnet_id_1 = _uuid()
subnet_id_2 = _uuid()
self.snat_ports = [{'subnets': [{'cidr': '152.2.0.0/16',
'gateway_ip': '152.2.0.1',
'id': subnet_id_1}],
'network_id': _uuid(),
'device_owner':
lib_constants.DEVICE_OWNER_ROUTER_SNAT,
'mac_address': 'fa:16:3e:80:8d:80',
'fixed_ips': [{'subnet_id': subnet_id_1,
'ip_address': '152.2.0.13',
'prefixlen': 16}],
'id': _uuid(), 'device_id': _uuid()},
{'subnets': [{'cidr': '152.10.0.0/16',
'gateway_ip': '152.10.0.1',
'id': subnet_id_2}],
'network_id': _uuid(),
'device_owner':
lib_constants.DEVICE_OWNER_ROUTER_SNAT,
'mac_address': 'fa:16:3e:80:8d:80',
'fixed_ips': [{'subnet_id': subnet_id_2,
'ip_address': '152.10.0.13',
'prefixlen': 16}],
'id': _uuid(), 'device_id': _uuid()}]
self.ri_kwargs = {'agent_conf': self.conf,
'interface_driver': self.mock_driver}
def _create_router(self, router=None, **kwargs):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.router_id = _uuid()
if not router:
router = mock.MagicMock()
kwargs['agent'] = agent
kwargs['router_id'] = self.router_id
kwargs['router'] = router
kwargs['agent_conf'] = self.conf
kwargs['interface_driver'] = mock.Mock()
return dvr_router.DvrLocalRouter(HOSTNAME, **kwargs)
def _set_ri_kwargs(self, agent, router_id, router):
self.ri_kwargs['agent'] = agent
self.ri_kwargs['router_id'] = router_id
self.ri_kwargs['router'] = router
def test_gw_ns_name(self):
ri = self._create_router()
self.assertEqual(ri.ns_name, ri.get_gw_ns_name())
def test_create_dvr_fip_interfaces_update(self):
ri = self._create_router()
fip_agent_port = {'subnets': []}
ri.get_floating_agent_gw_interface = mock.Mock(
return_value=fip_agent_port)
ri.get_floating_ips = mock.Mock(return_value=True)
ri.fip_ns = mock.Mock()
ri.fip_ns.subscribe.return_value = False
ri.rtr_fip_connect = True
ex_gw_port = {'network_id': 'fake_net_id'}
ri.create_dvr_external_gateway_on_agent(ex_gw_port)
ri.fip_ns.create_or_update_gateway_port.assert_called_once_with(
fip_agent_port)
def test_create_dvr_fip_interfaces_with_matching_address_scope(self):
self._setup_create_dvr_fip_interfaces_for_setting_routing_rules(
address_scopes_match=True)
def test_create_dvr_fip_interfaces_with_address_scope_mismatch(self):
self._setup_create_dvr_fip_interfaces_for_setting_routing_rules()
def _setup_create_dvr_fip_interfaces_for_setting_routing_rules(
self, address_scopes_match=False):
ri = self._create_router()
ri.get_floating_agent_gw_interface = mock.Mock()
ri.fip_ns = mock.Mock()
ri._add_interface_routing_rule_to_router_ns = mock.Mock()
ri._add_interface_route_to_fip_ns = mock.Mock()
ri.fip_ns._create_rtr_2_fip_link = mock.Mock()
ri.internal_ports = ['moke_port_1', 'moke_port_2']
if address_scopes_match:
ri._check_if_address_scopes_match = mock.Mock(
return_value=True)
else:
ri._check_if_address_scopes_match = mock.Mock(
return_value=False)
ri.rtr_fip_connect = False
ex_gw_port = {'network_id': 'fake_net_id'}
ri.create_dvr_external_gateway_on_agent(ex_gw_port)
ri._check_rtr_2_fip_connect = mock.Mock()
ri.connect_rtr_2_fip()
self.assertTrue(ri._check_if_address_scopes_match.called)
if address_scopes_match:
self.assertTrue(
ri.fip_ns.create_rtr_2_fip_link.called)
self.assertTrue(
ri._add_interface_routing_rule_to_router_ns.called)
self.assertTrue(
ri._add_interface_route_to_fip_ns.called)
else:
self.assertFalse(
ri._add_interface_routing_rule_to_router_ns.called)
self.assertFalse(
ri._add_interface_route_to_fip_ns.called)
self.assertTrue(
ri.fip_ns.create_rtr_2_fip_link.called)
def test_get_floating_ips_dvr(self):
router = mock.MagicMock()
router.get.return_value = [{'host': HOSTNAME},
{'host': mock.sentinel.otherhost}]
ri = self._create_router(router)
fips = ri.get_floating_ips()
self.assertEqual(
[{'host': HOSTNAME}, {'host': mock.sentinel.otherhost}], fips)
def test_floating_forward_rules_no_fip_ns(self):
router = mock.MagicMock()
router.get.return_value = [{'host': HOSTNAME},
{'host': mock.sentinel.otherhost}]
fip = {'id': _uuid()}
ri = self._create_router(router)
self.assertFalse(ri.floating_forward_rules(fip))
def test_floating_forward_rules(self):
router = mock.MagicMock()
router.get.return_value = [{'host': HOSTNAME},
{'host': mock.sentinel.otherhost}]
ri = self._create_router(router)
floating_ip = '15.1.2.3'
rtr_2_fip_name = 'fake_router'
fixed_ip = '192.168.0.1'
fip = {'id': _uuid(),
'fixed_ip_address': '192.168.0.1',
'floating_ip_address': '15.1.2.3'}
instance = mock.Mock()
instance.get_rtr_ext_device_name = mock.Mock(
return_value=rtr_2_fip_name)
ri.fip_ns = instance
dnat_from_floatingip_to_fixedip = (
'PREROUTING', '-d %s/32 -i %s -j DNAT --to-destination %s' % (
floating_ip, rtr_2_fip_name, fixed_ip))
to_source = '-s %s/32 -j SNAT --to-source %s' % (fixed_ip, floating_ip)
if ri.iptables_manager.random_fully:
to_source += ' --random-fully'
snat_from_fixedip_to_floatingip = ('float-snat', to_source)
actual = ri.floating_forward_rules(fip)
expected = [dnat_from_floatingip_to_fixedip,
snat_from_fixedip_to_floatingip]
self.assertEqual(expected, actual)
def test_floating_mangle_rules_no_fip_ns(self):
router = mock.MagicMock()
router.get.return_value = [{'host': HOSTNAME},
{'host': mock.sentinel.otherhost}]
ri = self._create_router(router)
floating_ip = mock.Mock()
fixed_ip = mock.Mock()
internal_mark = mock.Mock()
self.assertFalse(ri.floating_mangle_rules(floating_ip, fixed_ip,
internal_mark))
def test_floating_mangle_rules(self):
router = mock.MagicMock()
router.get.return_value = [{'host': HOSTNAME},
{'host': mock.sentinel.otherhost}]
ri = self._create_router(router)
floating_ip = '15.1.2.3'
fixed_ip = '192.168.0.1'
internal_mark = 'fake_mark'
rtr_2_fip_name = 'fake_router'
instance = mock.Mock()
instance.get_rtr_ext_device_name = mock.Mock(
return_value=rtr_2_fip_name)
ri.fip_ns = instance
mark_traffic_to_floating_ip = (
'floatingip', '-d %s/32 -i %s -j MARK --set-xmark %s' % (
floating_ip, rtr_2_fip_name, internal_mark))
mark_traffic_from_fixed_ip = (
'FORWARD', '-s %s/32 -j $float-snat' % fixed_ip)
actual = ri.floating_mangle_rules(floating_ip, fixed_ip, internal_mark)
expected = [mark_traffic_to_floating_ip, mark_traffic_from_fixed_ip]
self.assertEqual(expected, actual)
@mock.patch.object(ip_lib, 'send_ip_addr_adv_notif')
@mock.patch.object(ip_lib, 'IPDevice')
@mock.patch.object(ip_lib, 'add_ip_rule')
def test_floating_ip_added_dist(self, mock_add_ip_rule, mIPDevice,
mock_adv_notif):
router = mock.MagicMock()
ri = self._create_router(router)
ri.ex_gw_port = ri.router['gw_port']
ext_net_id = _uuid()
subnet_id = _uuid()
agent_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': subnet_id}],
'subnets': [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': ext_net_id,
'mac_address': 'ca:fe:de:ad:be:ef'}
fip = {'id': _uuid(),
'host': HOSTNAME,
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.1',
'floating_network_id': ext_net_id,
'port_id': _uuid()}
ri.fip_ns = mock.Mock()
ri.fip_ns.agent_gateway_port = agent_gw_port
ri.create_dvr_external_gateway_on_agent(ri.ex_gw_port)
ri._check_rtr_2_fip_connect = mock.Mock()
ri.connect_rtr_2_fip()
self.assertTrue(ri.rtr_fip_connect)
ri.fip_ns.allocate_rule_priority.return_value = FIP_PRI
subnet = lla.LinkLocalAddressPair('169.254.30.42/31')
ri.rtr_fip_subnet = subnet
ri.fip_ns.local_subnets = mock.Mock()
ri.fip_ns.local_subnets.allocate.return_value = subnet
ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address'])
ri.floating_ip_added_dist(fip, ip_cidr)
mock_add_ip_rule.assert_called_with(
namespace=ri.router_namespace.name, ip='192.168.0.1',
table=16, priority=FIP_PRI)
ri.fip_ns.local_subnets.allocate.assert_not_called()
# Validate that fip_ns.local_subnets is called when
# ri.rtr_fip_subnet is None
ri.rtr_fip_subnet = None
ri.floating_ip_added_dist(fip, ip_cidr)
mock_add_ip_rule.assert_called_with(
namespace=ri.router_namespace.name, ip='192.168.0.1',
table=16, priority=FIP_PRI)
ri.fip_ns.local_subnets.allocate.assert_called_once_with(ri.router_id)
# TODO(mrsmith): add more asserts
@mock.patch.object(ip_lib, 'IPWrapper')
@mock.patch.object(ip_lib, 'IPDevice')
def test_floating_ip_removed_dist(self, mIPDevice, mIPWrapper):
router = mock.MagicMock()
ri = self._create_router(router)
ri.ex_gw_port = ri.router['gw_port']
subnet_id = _uuid()
fixed_ip = '20.0.0.30'
agent_gw_port = {'fixed_ips': [{'ip_address': fixed_ip,
'prefixlen': 24,
'subnet_id': subnet_id}],
'subnets': [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef'}
fip_cidr = '11.22.33.44/24'
ri.fip_ns = mock.Mock()
ri.fip_ns.get_name.return_value = 'fip_ns_name'
ri.floating_ips_dict['11.22.33.44'] = (fixed_ip, FIP_PRI)
ri.fip_2_rtr = '11.22.33.42'
ri.rtr_2_fip = '11.22.33.40'
ri.fip_ns.agent_gateway_port = agent_gw_port
s = lla.LinkLocalAddressPair('169.254.30.42/31')
ri.rtr_fip_subnet = s
ri.fip_ns.local_subnets = mock.Mock()
ri.floating_ip_removed_dist(fip_cidr)
self.mock_delete_ip_rule.assert_called_with(
ri.router_namespace.name, ip=fixed_ip, table=16, priority=FIP_PRI)
mIPDevice().route.delete_route.assert_called_with(fip_cidr,
via=str(s.ip))
ri.fip_ns.local_subnets.allocate.assert_not_called()
@mock.patch.object(ip_lib, 'add_ip_rule')
def test_floating_ip_moved_dist(self, mock_add_ip_rule):
router = mock.MagicMock()
ri = self._create_router(router)
floating_ip_address = '15.1.2.3'
fixed_ip = '192.168.0.1'
fip = {'floating_ip_address': floating_ip_address,
'fixed_ip_address': fixed_ip}
ri.floating_ips_dict['15.1.2.3'] = (fixed_ip, FIP_PRI)
ri.fip_ns = mock.Mock()
ri.fip_ns.allocate_rule_priority.return_value = FIP_PRI
ri.floating_ip_moved_dist(fip)
self.mock_delete_ip_rule.assert_called_once_with(
ri.router_namespace.name, ip=fixed_ip, table=16, priority=FIP_PRI)
ri.fip_ns.deallocate_rule_priority.assert_called_once_with(
floating_ip_address)
ri.fip_ns.allocate_rule_priority.assert_called_once_with(
floating_ip_address)
mock_add_ip_rule.assert_called_with(
namespace=ri.router_namespace.name, ip=fixed_ip,
table=16, priority=FIP_PRI)
def _test_add_floating_ip(self, ri, fip, is_failure=False):
if not is_failure:
ri.floating_ip_added_dist = mock.Mock(
return_value=lib_constants.FLOATINGIP_STATUS_ACTIVE)
else:
ri.floating_ip_added_dist = mock.Mock(
return_value=lib_constants.FLOATINGIP_STATUS_ERROR)
result = ri.add_floating_ip(fip,
mock.sentinel.interface_name,
mock.sentinel.device)
ri.floating_ip_added_dist.assert_called_once_with(
fip, mock.ANY)
return result
def test_add_floating_ip(self):
ri = self._create_router(mock.MagicMock())
ip = '15.1.2.3'
fip = {'floating_ip_address': ip}
result = self._test_add_floating_ip(ri, fip)
ri.floating_ip_added_dist.assert_called_once_with(fip, ip + '/32')
self.assertEqual(lib_constants.FLOATINGIP_STATUS_ACTIVE, result)
def test_add_floating_ip_failure(self):
ri = self._create_router(mock.MagicMock())
ip = '15.1.2.3'
fip = {'floating_ip_address': ip}
result = self._test_add_floating_ip(ri, fip, True)
ri.floating_ip_added_dist.assert_called_once_with(fip, ip + '/32')
self.assertEqual(lib_constants.FLOATINGIP_STATUS_ERROR, result)
@mock.patch.object(router_info.RouterInfo, 'remove_floating_ip')
def test_remove_floating_ip(self, super_remove_floating_ip):
ri = self._create_router(mock.MagicMock())
ri.floating_ip_removed_dist = mock.Mock()
ri.remove_floating_ip(mock.sentinel.device, mock.sentinel.ip_cidr)
self.assertFalse(super_remove_floating_ip.called)
ri.floating_ip_removed_dist.assert_called_once_with(
mock.sentinel.ip_cidr)
def test__get_internal_port(self):
ri = self._create_router()
port = {'fixed_ips': [{'subnet_id': mock.sentinel.subnet_id}]}
router_ports = [port]
ri.router.get.return_value = router_ports
self.assertEqual(port, ri._get_internal_port(mock.sentinel.subnet_id))
def test__get_internal_port_not_found(self):
ri = self._create_router()
port = {'fixed_ips': [{'subnet_id': mock.sentinel.subnet_id}]}
router_ports = [port]
ri.router.get.return_value = router_ports
self.assertIsNone(ri._get_internal_port(mock.sentinel.subnet_id2))
def test__get_snat_idx_ipv4(self):
ip_cidr = '101.12.13.00/24'
ri = self._create_router(mock.MagicMock())
snat_idx = ri._get_snat_idx(ip_cidr)
# 0x650C0D00 is numerical value of 101.12.13.00
self.assertEqual(0x650C0D00, snat_idx)
def test__get_snat_idx_ipv6(self):
ip_cidr = '2620:0:a03:e100::/64'
ri = self._create_router(mock.MagicMock())
snat_idx = ri._get_snat_idx(ip_cidr)
# 0x3D345705 is 30 bit xor folded crc32 of the ip_cidr
self.assertEqual(0x3D345705, snat_idx)
def test__get_snat_idx_ipv6_below_32768(self):
ip_cidr = 'd488::/30'
# crc32 of this ip_cidr is 0x1BD7
ri = self._create_router(mock.MagicMock())
snat_idx = ri._get_snat_idx(ip_cidr)
# 0x1BD7 + 0x3FFFFFFF = 0x40001BD6
self.assertEqual(0x40001BD6, snat_idx)
def test__set_subnet_arp_info(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['distributed'] = True
self._set_ri_kwargs(agent, router['id'], router)
ri = dvr_router.DvrLocalRouter(HOSTNAME, **self.ri_kwargs)
ports = ri.router.get(lib_constants.INTERFACE_KEY, [])
subnet_id = l3_test_common.get_subnet_id(ports[0])
test_ports = [{'mac_address': '00:11:22:33:44:55',
'device_owner': lib_constants.DEVICE_OWNER_DHCP,
'fixed_ips': [{'ip_address': '1.2.3.4',
'prefixlen': 24,
'subnet_id': subnet_id}]},
{'mac_address': '11:22:33:44:55:66',
'device_owner': lib_constants.DEVICE_OWNER_LOADBALANCER,
'fixed_ips': [{'ip_address': '1.2.3.5',
'prefixlen': 24,
'subnet_id': subnet_id}]},
{'mac_address': '22:33:44:55:66:77',
'device_owner':
lib_constants.DEVICE_OWNER_LOADBALANCERV2,
'fixed_ips': [{'ip_address': '1.2.3.6',
'prefixlen': 24,
'subnet_id': subnet_id}]}]
self.plugin_api.get_ports_by_subnet.return_value = test_ports
# Test basic case
ports[0]['subnets'] = [{'id': subnet_id,
'cidr': '1.2.3.0/24'}]
with mock.patch.object(ri,
'_process_arp_cache_for_internal_port') as parp:
ri._set_subnet_arp_info(subnet_id)
self.assertEqual(1, parp.call_count)
self.mock_ip_dev.neigh.add.assert_called_once_with(
'1.2.3.4', '00:11:22:33:44:55')
# Test negative case
router['distributed'] = False
ri._set_subnet_arp_info(subnet_id)
self.mock_ip_dev.neigh.add.never_called()
def test_add_arp_entry(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['distributed'] = True
subnet_id = l3_test_common.get_subnet_id(
router[lib_constants.INTERFACE_KEY][0])
arp_table = {'ip_address': '1.7.23.11',
'mac_address': '00:11:22:33:44:55',
'subnet_id': subnet_id}
payload = {'arp_table': arp_table, 'router_id': router['id']}
agent._router_added(router['id'], router)
agent.add_arp_entry(None, payload)
agent.router_deleted(None, router['id'])
self.mock_ip_dev.neigh.add.assert_called_once_with(
'1.7.23.11', '00:11:22:33:44:55')
def test_add_arp_entry_no_routerinfo(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(num_internal_ports=2)
subnet_id = l3_test_common.get_subnet_id(
router[lib_constants.INTERFACE_KEY][0])
arp_table = {'ip_address': '1.7.23.11',
'mac_address': '00:11:22:33:44:55',
'subnet_id': subnet_id}
payload = {'arp_table': arp_table, 'router_id': router['id']}
agent.add_arp_entry(None, payload)
def test__update_arp_entry_with_no_subnet(self):
self._set_ri_kwargs(mock.sentinel.agent,
'foo_router_id',
{'distributed': True, 'gw_port_host': HOSTNAME})
ri = dvr_router.DvrLocalRouter(HOSTNAME, **self.ri_kwargs)
ri.get_internal_device_name = mock.Mock()
ri._update_arp_entry(mock.ANY, mock.ANY, 'foo_subnet_id', 'add')
self.assertFalse(ri.get_internal_device_name.call_count)
def _setup_test_for_arp_entry_cache(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['distributed'] = True
self._set_ri_kwargs(agent, router['id'], router)
ri = dvr_router.DvrLocalRouter(HOSTNAME, **self.ri_kwargs)
subnet_id = l3_test_common.get_subnet_id(
ri.router[lib_constants.INTERFACE_KEY][0])
return ri, subnet_id
def test__update_arp_entry_calls_arp_cache_with_no_device(self):
ri, subnet_id = self._setup_test_for_arp_entry_cache()
state = True
with mock.patch('neutron.agent.linux.ip_lib.IPDevice') as rtrdev,\
mock.patch.object(ri, '_cache_arp_entry') as arp_cache:
rtrdev.return_value.exists.return_value = False
state = ri._update_arp_entry(
mock.ANY, mock.ANY, subnet_id, 'add')
self.assertFalse(state)
self.assertTrue(arp_cache.called)
arp_cache.assert_called_once_with(mock.ANY, mock.ANY,
subnet_id, 'add')
self.assertFalse(rtrdev.neigh.add.called)
def test__process_arp_cache_for_internal_port(self):
ri, subnet_id = self._setup_test_for_arp_entry_cache()
ri._cache_arp_entry('1.7.23.11', '00:11:22:33:44:55',
subnet_id, 'add')
self.assertEqual(1, len(ri._pending_arp_set))
with mock.patch.object(ri, '_update_arp_entry') as update_arp:
update_arp.return_value = True
ri._process_arp_cache_for_internal_port(subnet_id)
self.assertEqual(0, len(ri._pending_arp_set))
def test__delete_arp_cache_for_internal_port(self):
ri, subnet_id = self._setup_test_for_arp_entry_cache()
ri._cache_arp_entry('1.7.23.11', '00:11:22:33:44:55',
subnet_id, 'add')
self.assertEqual(1, len(ri._pending_arp_set))
ri._delete_arp_cache_for_internal_port(subnet_id)
self.assertEqual(0, len(ri._pending_arp_set))
def test_del_arp_entry(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['distributed'] = True
subnet_id = l3_test_common.get_subnet_id(
router[lib_constants.INTERFACE_KEY][0])
arp_table = {'ip_address': '1.5.25.15',
'mac_address': '00:44:33:22:11:55',
'subnet_id': subnet_id}
payload = {'arp_table': arp_table, 'router_id': router['id']}
agent._router_added(router['id'], router)
# first add the entry
agent.add_arp_entry(None, payload)
# now delete it
agent.del_arp_entry(None, payload)
self.mock_ip_dev.neigh.delete.assert_called_once_with(
'1.5.25.15', '00:44:33:22:11:55')
agent.router_deleted(None, router['id'])
def test_get_floating_agent_gw_interfaces(self):
fake_network_id = _uuid()
subnet_id = _uuid()
agent_gateway_port = (
[{'fixed_ips': [{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': subnet_id}],
'subnets': [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
portbindings.HOST_ID: 'myhost',
'device_owner': lib_constants.DEVICE_OWNER_AGENT_GW,
'network_id': fake_network_id,
'mac_address': 'ca:fe:de:ad:be:ef'}]
)
router = l3_test_common.prepare_router_data(enable_snat=True)
router[lib_constants.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port
router['distributed'] = True
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self._set_ri_kwargs(agent, router['id'], router)
ri = dvr_router.DvrLocalRouter(HOSTNAME, **self.ri_kwargs)
self.assertEqual(
agent_gateway_port[0],
ri.get_floating_agent_gw_interface(fake_network_id))
def test_process_router_dist_floating_ip_add(self):
fake_floatingips = {'floatingips': [
{'id': _uuid(),
'host': HOSTNAME,
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.1',
'floating_network_id': mock.sentinel.ext_net_id,
'port_id': _uuid()},
{'id': _uuid(),
'host': 'some-other-host',
'floating_ip_address': '15.1.2.4',
'fixed_ip_address': '192.168.0.10',
'floating_network_id': mock.sentinel.ext_net_id,
'port_id': _uuid()}]}
router = l3_test_common.prepare_router_data(enable_snat=True)
router[lib_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips']
router['distributed'] = True
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self._set_ri_kwargs(agent, router['id'], router)
ri = dvr_router.DvrLocalRouter(HOSTNAME, **self.ri_kwargs)
ri.iptables_manager.ipv4['nat'] = mock.MagicMock()
fip_ns = agent.get_fip_ns(mock.sentinel.ext_net_id)
subnet_id = _uuid()
fip_ns.agent_gateway_port = (
{'fixed_ips': [{'ip_address': '20.0.0.30',
'subnet_id': subnet_id}],
'subnets': [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef'}
)
def _test_ext_gw_updated_dvr_agent_mode(self, host,
agent_mode, expected_call_count):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self._set_ri_kwargs(agent, router['id'], router)
ri = dvr_router.DvrLocalRouter(HOSTNAME, **self.ri_kwargs)
interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(self,
ri)
ri._external_gateway_added = mock.Mock()
# test agent mode = dvr (compute node)
router['gw_port_host'] = host
agent.conf.agent_mode = agent_mode
ri.external_gateway_updated(ex_gw_port, interface_name)
# no gateway should be added on dvr node
self.assertEqual(expected_call_count,
ri._external_gateway_added.call_count)
def test_ext_gw_updated_dvr_agent_mode(self):
# no gateway should be added on dvr node
self._test_ext_gw_updated_dvr_agent_mode('any-foo', 'dvr', 0)
def test_ext_gw_updated_dvr_agent_mode_host(self):
# no gateway should be added on dvr node
self._test_ext_gw_updated_dvr_agent_mode(HOSTNAME,
'dvr', 0)
def test_external_gateway_removed_ext_gw_port_and_fip(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.conf.agent_mode = lib_constants.L3_AGENT_MODE_DVR_SNAT
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['gw_port_host'] = HOSTNAME
self.mock_driver.unplug.reset_mock()
external_net_id = router['gw_port']['network_id']
self._set_ri_kwargs(agent, router['id'], router)
ri = dvr_edge_rtr.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs)
ri.remove_floating_ip = mock.Mock()
agent._fetch_external_net_id = mock.Mock(return_value=external_net_id)
ri.ex_gw_port = ri.router['gw_port']
del ri.router['gw_port']
ri.external_gateway_added(
ri.ex_gw_port,
ri.get_external_device_name(ri.ex_gw_port['id']))
ri.fip_ns = None
nat = ri.iptables_manager.ipv4['nat']
nat.clear_rules_by_tag = mock.Mock()
nat.add_rule = mock.Mock()
ri.fip_ns = agent.get_fip_ns(external_net_id)
subnet_id = _uuid()
ri.fip_ns.agent_gateway_port = {
'fixed_ips': [{
'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': subnet_id
}],
'subnets': [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': external_net_id,
'mac_address': 'ca:fe:de:ad:be:ef'}
vm_floating_ip = '19.4.4.2'
ri.floating_ips_dict[vm_floating_ip] = FIP_PRI
ri.rtr_fip_subnet = ri.fip_ns.local_subnets.allocate(ri.router_id)
_, fip_to_rtr = ri.rtr_fip_subnet.get_pair()
self.mock_ip.get_devices.return_value = [
l3_test_common.FakeDev(ri.fip_ns.get_ext_device_name(_uuid()))]
ri.get_router_cidrs = mock.Mock(
return_value={vm_floating_ip + '/32', '19.4.4.1/24'})
self.device_exists.return_value = True
ri.external_gateway_removed(
ri.ex_gw_port,
ri.get_external_device_name(ri.ex_gw_port['id']))
ri.remove_floating_ip.assert_called_once_with(self.mock_ip_dev,
'19.4.4.2/32')
def test_get_router_cidrs_no_fip_ns(self):
router = mock.MagicMock()
router.get.return_value = [{'host': HOSTNAME},
{'host': mock.sentinel.otherhost}]
ri = self._create_router(router)
device = mock.Mock()
self.assertFalse(ri.get_router_cidrs(device))
def test_get_router_cidrs_no_device_exists(self):
router = mock.MagicMock()
router.get.return_value = [{'host': HOSTNAME},
{'host': mock.sentinel.otherhost}]
ri = self._create_router(router)
fake_fip_ns = mock.Mock(return_value=True)
fake_fip_ns.get_name = mock.Mock(return_value=None)
fake_fip_ns.get_int_device_name = mock.Mock(return_value=None)
ri.fip_ns = fake_fip_ns
device = mock.Mock()
device.exists = mock.Mock(return_value=False)
with mock.patch.object(ip_lib, 'IPDevice', return_value=device):
self.assertFalse(ri.get_router_cidrs(device))
@mock.patch.object(router_info.RouterInfo, '_add_snat_rules')
@mock.patch.object(dvr_router.DvrLocalRouter, '_handle_router_snat_rules')
def test_handle_snat_rule_for_centralized_fip(
self, _add_snat_rules, _handle_router_snat_rules):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.conf.agent_mode = lib_constants.L3_AGENT_MODE_DVR_SNAT
self.mock_driver.unplug.reset_mock()
router = l3_test_common.prepare_router_data(enable_floating_ip=True)
router['gw_port_host'] = HOSTNAME
self._set_ri_kwargs(agent, router['id'], router)
ri = dvr_edge_rtr.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs)
ri.snat_iptables_manager = mock.MagicMock()
ipv4_nat = ri.snat_iptables_manager.ipv4['nat']
interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(self,
ri)
ri._handle_router_snat_rules(ex_gw_port, interface_name)
ipv4_nat.add_rule.assert_called_once_with('snat', '-j $float-snat')
@mock.patch.object(dvr_edge_rtr.DvrEdgeRouter,
'add_centralized_floatingip')
def test_add_centralized_floatingip_dvr_ha(
self,
super_add_centralized_floatingip):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.conf.agent_mode = lib_constants.L3_AGENT_MODE_DVR_SNAT
router = l3_test_common.prepare_router_data(
num_internal_ports=2, enable_ha=True)
router['gw_port_host'] = HOSTNAME
self.mock_driver.unplug.reset_mock()
self._set_ri_kwargs(agent, router['id'], router)
fip = {'id': _uuid()}
fip_cidr = '11.22.33.44/24'
ri = dvr_edge_ha_rtr.DvrEdgeHaRouter(HOSTNAME, [], **self.ri_kwargs)
ri.is_router_master = mock.Mock(return_value=False)
ri._add_vip = mock.Mock()
interface_name = ri.get_snat_external_device_interface_name(
ri.get_ex_gw_port())
ri.add_centralized_floatingip(fip, fip_cidr)
ri._add_vip.assert_called_once_with(fip_cidr, interface_name)
super_add_centralized_floatingip.assert_not_called()
router[lib_constants.HA_INTERFACE_KEY]['status'] = 'DOWN'
self._set_ri_kwargs(agent, router['id'], router)
ri_1 = dvr_edge_ha_rtr.DvrEdgeHaRouter(HOSTNAME, [], **self.ri_kwargs)
ri_1.is_router_master = mock.Mock(return_value=True)
ri_1._add_vip = mock.Mock()
interface_name = ri_1.get_snat_external_device_interface_name(
ri_1.get_ex_gw_port())
ri_1.add_centralized_floatingip(fip, fip_cidr)
ri_1._add_vip.assert_called_once_with(fip_cidr, interface_name)
super_add_centralized_floatingip.assert_not_called()
router[lib_constants.HA_INTERFACE_KEY]['status'] = 'ACTIVE'
self._set_ri_kwargs(agent, router['id'], router)
ri_2 = dvr_edge_ha_rtr.DvrEdgeHaRouter(HOSTNAME, [], **self.ri_kwargs)
ri_2.is_router_master = mock.Mock(return_value=True)
ri_2._add_vip = mock.Mock()
interface_name = ri_2.get_snat_external_device_interface_name(
ri_2.get_ex_gw_port())
ri_2.add_centralized_floatingip(fip, fip_cidr)
ri_2._add_vip.assert_called_once_with(fip_cidr, interface_name)
super_add_centralized_floatingip.assert_called_once_with(fip,
fip_cidr)
@mock.patch.object(dvr_edge_rtr.DvrEdgeRouter,
'remove_centralized_floatingip')
def test_remove_centralized_floatingip(self,
super_remove_centralized_floatingip):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.conf.agent_mode = lib_constants.L3_AGENT_MODE_DVR_SNAT
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['gw_port_host'] = HOSTNAME
self.mock_driver.unplug.reset_mock()
self._set_ri_kwargs(agent, router['id'], router)
fip_cidr = '11.22.33.44/24'
ri = dvr_edge_ha_rtr.DvrEdgeHaRouter(HOSTNAME, [], **self.ri_kwargs)
ri.is_router_master = mock.Mock(return_value=False)
ri._remove_vip = mock.Mock()
ri.remove_centralized_floatingip(fip_cidr)
ri._remove_vip.assert_called_once_with(fip_cidr)
super_remove_centralized_floatingip.assert_not_called()
ri1 = dvr_edge_ha_rtr.DvrEdgeHaRouter(HOSTNAME, [], **self.ri_kwargs)
ri1.is_router_master = mock.Mock(return_value=True)
ri1._remove_vip = mock.Mock()
ri1.remove_centralized_floatingip(fip_cidr)
ri1._remove_vip.assert_called_once_with(fip_cidr)
super_remove_centralized_floatingip.assert_called_once_with(fip_cidr)
| neutron/tests/unit/agent/l3/test_dvr_local_router.py | 41,077 | Copyright (c) 2015 OpenStack Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Validate that fip_ns.local_subnets is called when ri.rtr_fip_subnet is None TODO(mrsmith): add more asserts 0x650C0D00 is numerical value of 101.12.13.00 0x3D345705 is 30 bit xor folded crc32 of the ip_cidr crc32 of this ip_cidr is 0x1BD7 0x1BD7 + 0x3FFFFFFF = 0x40001BD6 Test basic case Test negative case first add the entry now delete it test agent mode = dvr (compute node) no gateway should be added on dvr node no gateway should be added on dvr node no gateway should be added on dvr node | 1,084 | en | 0.837708 |
import os
import pickle
import string
import time
import logging
import numpy as np
def get_logger(name=__file__, level=logging.INFO):
logger = logging.getLogger(name)
if getattr(logger, "_init_done__", None):
logger.setLevel(level)
return logger
logger._init_done__ = True
logger.propagate = False
logger.setLevel(level)
formatter = logging.Formatter("%(asctime)s:%(levelname)s::%(message)s")
handler = logging.StreamHandler()
handler.setFormatter(formatter)
handler.setLevel(0)
del logger.handlers[:]
logger.addHandler(handler)
return logger
## Utils
def load_jets():
root_dir = "data/"
filename = os.path.join(root_dir, "TruthBS_10")
with open(filename + ".pkl", "rb") as fd:
Truth10, BS10 = pickle.load(fd, encoding='latin-1')
return Truth10, BS10
def sumLogLH(jetList):
for jet in jetList:
jet["totLogLH"] = np.sum(jet["logLH"])
def getConstituents(jet, node_id, outers_list):
"""
Recursive function to get a list of the tree leaves
"""
if jet["tree"][node_id, 0] == -1:
outers_list.append(jet["content"][node_id])
else:
getConstituents(
jet,
jet["tree"][node_id, 0],
outers_list,)
getConstituents(
jet,
jet["tree"][node_id, 1],
outers_list,)
return outers_list
def get_leaves(jet):
return getConstituents(jet, jet["root_id"], []) | src/ClusterTrellis/utils.py | 1,452 | Recursive function to get a list of the tree leaves
Utils | 59 | en | 0.799789 |
"""
sphinx.util.cfamily
~~~~~~~~~~~~~~~~~~~
Utility functions common to the C and C++ domains.
:copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import warnings
from copy import deepcopy
from typing import (
Any, Callable, List, Match, Pattern, Tuple, Union
)
from docutils import nodes
from docutils.nodes import TextElement
from sphinx.config import Config
from sphinx.deprecation import RemovedInSphinx40Warning
from sphinx.util import logging
logger = logging.getLogger(__name__)
StringifyTransform = Callable[[Any], str]
_whitespace_re = re.compile(r'(?u)\s+')
anon_identifier_re = re.compile(r'(@[a-zA-Z0-9_])[a-zA-Z0-9_]*\b')
identifier_re = re.compile(r'''(?x)
( # This 'extends' _anon_identifier_re with the ordinary identifiers,
# make sure they are in sync.
(~?\b[a-zA-Z_]) # ordinary identifiers
| (@[a-zA-Z0-9_]) # our extension for names of anonymous entities
)
[a-zA-Z0-9_]*\b
''')
integer_literal_re = re.compile(r'[1-9][0-9]*')
octal_literal_re = re.compile(r'0[0-7]*')
hex_literal_re = re.compile(r'0[xX][0-9a-fA-F][0-9a-fA-F]*')
binary_literal_re = re.compile(r'0[bB][01][01]*')
float_literal_re = re.compile(r'''(?x)
[+-]?(
# decimal
([0-9]+[eE][+-]?[0-9]+)
| ([0-9]*\.[0-9]+([eE][+-]?[0-9]+)?)
| ([0-9]+\.([eE][+-]?[0-9]+)?)
# hex
| (0[xX][0-9a-fA-F]+[pP][+-]?[0-9a-fA-F]+)
| (0[xX][0-9a-fA-F]*\.[0-9a-fA-F]+([pP][+-]?[0-9a-fA-F]+)?)
| (0[xX][0-9a-fA-F]+\.([pP][+-]?[0-9a-fA-F]+)?)
)
''')
char_literal_re = re.compile(r'''(?x)
((?:u8)|u|U|L)?
'(
(?:[^\\'])
| (\\(
(?:['"?\\abfnrtv])
| (?:[0-7]{1,3})
| (?:x[0-9a-fA-F]{2})
| (?:u[0-9a-fA-F]{4})
| (?:U[0-9a-fA-F]{8})
))
)'
''')
def verify_description_mode(mode: str) -> None:
if mode not in ('lastIsName', 'noneIsName', 'markType', 'markName', 'param'):
raise Exception("Description mode '%s' is invalid." % mode)
class NoOldIdError(Exception):
# Used to avoid implementing unneeded id generation for old id schemes.
@property
def description(self) -> str:
warnings.warn('%s.description is deprecated. '
'Coerce the instance to a string instead.' % self.__class__.__name__,
RemovedInSphinx40Warning, stacklevel=2)
return str(self)
class ASTBaseBase:
def __eq__(self, other: Any) -> bool:
if type(self) is not type(other):
return False
try:
for key, value in self.__dict__.items():
if value != getattr(other, key):
return False
except AttributeError:
return False
return True
__hash__ = None # type: Callable[[], int]
def clone(self) -> Any:
"""Clone a definition expression node."""
return deepcopy(self)
def _stringify(self, transform: StringifyTransform) -> str:
raise NotImplementedError(repr(self))
def __str__(self) -> str:
return self._stringify(lambda ast: str(ast))
def get_display_string(self) -> str:
return self._stringify(lambda ast: ast.get_display_string())
def __repr__(self) -> str:
return '<%s>' % self.__class__.__name__
################################################################################
# Attributes
################################################################################
class ASTAttribute(ASTBaseBase):
def describe_signature(self, signode: TextElement) -> None:
raise NotImplementedError(repr(self))
class ASTCPPAttribute(ASTAttribute):
def __init__(self, arg: str) -> None:
self.arg = arg
def _stringify(self, transform: StringifyTransform) -> str:
return "[[" + self.arg + "]]"
def describe_signature(self, signode: TextElement) -> None:
txt = str(self)
signode.append(nodes.Text(txt, txt))
class ASTGnuAttribute(ASTBaseBase):
def __init__(self, name: str, args: Any) -> None:
self.name = name
self.args = args
def _stringify(self, transform: StringifyTransform) -> str:
res = [self.name]
if self.args:
res.append('(')
res.append(transform(self.args))
res.append(')')
return ''.join(res)
class ASTGnuAttributeList(ASTAttribute):
def __init__(self, attrs: List[ASTGnuAttribute]) -> None:
self.attrs = attrs
def _stringify(self, transform: StringifyTransform) -> str:
res = ['__attribute__((']
first = True
for attr in self.attrs:
if not first:
res.append(', ')
first = False
res.append(transform(attr))
res.append('))')
return ''.join(res)
def describe_signature(self, signode: TextElement) -> None:
txt = str(self)
signode.append(nodes.Text(txt, txt))
class ASTIdAttribute(ASTAttribute):
"""For simple attributes defined by the user."""
def __init__(self, id: str) -> None:
self.id = id
def _stringify(self, transform: StringifyTransform) -> str:
return self.id
def describe_signature(self, signode: TextElement) -> None:
signode.append(nodes.Text(self.id, self.id))
class ASTParenAttribute(ASTAttribute):
"""For paren attributes defined by the user."""
def __init__(self, id: str, arg: str) -> None:
self.id = id
self.arg = arg
def _stringify(self, transform: StringifyTransform) -> str:
return self.id + '(' + self.arg + ')'
def describe_signature(self, signode: TextElement) -> None:
txt = str(self)
signode.append(nodes.Text(txt, txt))
################################################################################
class UnsupportedMultiCharacterCharLiteral(Exception):
@property
def decoded(self) -> str:
warnings.warn('%s.decoded is deprecated. '
'Coerce the instance to a string instead.' % self.__class__.__name__,
RemovedInSphinx40Warning, stacklevel=2)
return str(self)
class DefinitionError(Exception):
@property
def description(self) -> str:
warnings.warn('%s.description is deprecated. '
'Coerce the instance to a string instead.' % self.__class__.__name__,
RemovedInSphinx40Warning, stacklevel=2)
return str(self)
class BaseParser:
def __init__(self, definition: str, *,
location: Union[nodes.Node, Tuple[str, int]],
config: "Config") -> None:
self.definition = definition.strip()
self.location = location # for warnings
self.config = config
self.pos = 0
self.end = len(self.definition)
self.last_match = None # type: Match
self._previous_state = (0, None) # type: Tuple[int, Match]
self.otherErrors = [] # type: List[DefinitionError]
# in our tests the following is set to False to capture bad parsing
self.allowFallbackExpressionParsing = True
def _make_multi_error(self, errors: List[Any], header: str) -> DefinitionError:
if len(errors) == 1:
if len(header) > 0:
return DefinitionError(header + '\n' + str(errors[0][0]))
else:
return DefinitionError(str(errors[0][0]))
result = [header, '\n']
for e in errors:
if len(e[1]) > 0:
indent = ' '
result.append(e[1])
result.append(':\n')
for line in str(e[0]).split('\n'):
if len(line) == 0:
continue
result.append(indent)
result.append(line)
result.append('\n')
else:
result.append(str(e[0]))
return DefinitionError(''.join(result))
@property
def language(self) -> str:
raise NotImplementedError
def status(self, msg: str) -> None:
# for debugging
indicator = '-' * self.pos + '^'
print("%s\n%s\n%s" % (msg, self.definition, indicator))
def fail(self, msg: str) -> None:
errors = []
indicator = '-' * self.pos + '^'
exMain = DefinitionError(
'Invalid %s declaration: %s [error at %d]\n %s\n %s' %
(self.language, msg, self.pos, self.definition, indicator))
errors.append((exMain, "Main error"))
for err in self.otherErrors:
errors.append((err, "Potential other error"))
self.otherErrors = []
raise self._make_multi_error(errors, '')
def warn(self, msg: str) -> None:
logger.warning(msg, location=self.location)
def match(self, regex: Pattern) -> bool:
match = regex.match(self.definition, self.pos)
if match is not None:
self._previous_state = (self.pos, self.last_match)
self.pos = match.end()
self.last_match = match
return True
return False
def skip_string(self, string: str) -> bool:
strlen = len(string)
if self.definition[self.pos:self.pos + strlen] == string:
self.pos += strlen
return True
return False
def skip_word(self, word: str) -> bool:
return self.match(re.compile(r'\b%s\b' % re.escape(word)))
def skip_ws(self) -> bool:
return self.match(_whitespace_re)
def skip_word_and_ws(self, word: str) -> bool:
if self.skip_word(word):
self.skip_ws()
return True
return False
def skip_string_and_ws(self, string: str) -> bool:
if self.skip_string(string):
self.skip_ws()
return True
return False
@property
def eof(self) -> bool:
return self.pos >= self.end
@property
def current_char(self) -> str:
try:
return self.definition[self.pos]
except IndexError:
return 'EOF'
@property
def matched_text(self) -> str:
if self.last_match is not None:
return self.last_match.group()
else:
return None
def read_rest(self) -> str:
rv = self.definition[self.pos:]
self.pos = self.end
return rv
def assert_end(self, *, allowSemicolon: bool = False) -> None:
self.skip_ws()
if allowSemicolon:
if not self.eof and self.definition[self.pos:] != ';':
self.fail('Expected end of definition or ;.')
else:
if not self.eof:
self.fail('Expected end of definition.')
################################################################################
@property
def id_attributes(self):
raise NotImplementedError
@property
def paren_attributes(self):
raise NotImplementedError
def _parse_balanced_token_seq(self, end: List[str]) -> str:
# TODO: add handling of string literals and similar
brackets = {'(': ')', '[': ']', '{': '}'}
startPos = self.pos
symbols = [] # type: List[str]
while not self.eof:
if len(symbols) == 0 and self.current_char in end:
break
if self.current_char in brackets.keys():
symbols.append(brackets[self.current_char])
elif len(symbols) > 0 and self.current_char == symbols[-1]:
symbols.pop()
elif self.current_char in ")]}":
self.fail("Unexpected '%s' in balanced-token-seq." % self.current_char)
self.pos += 1
if self.eof:
self.fail("Could not find end of balanced-token-seq starting at %d."
% startPos)
return self.definition[startPos:self.pos]
def _parse_attribute(self) -> ASTAttribute:
self.skip_ws()
# try C++11 style
startPos = self.pos
if self.skip_string_and_ws('['):
if not self.skip_string('['):
self.pos = startPos
else:
# TODO: actually implement the correct grammar
arg = self._parse_balanced_token_seq(end=[']'])
if not self.skip_string_and_ws(']'):
self.fail("Expected ']' in end of attribute.")
if not self.skip_string_and_ws(']'):
self.fail("Expected ']' in end of attribute after [[...]")
return ASTCPPAttribute(arg)
# try GNU style
if self.skip_word_and_ws('__attribute__'):
if not self.skip_string_and_ws('('):
self.fail("Expected '(' after '__attribute__'.")
if not self.skip_string_and_ws('('):
self.fail("Expected '(' after '__attribute__('.")
attrs = []
while 1:
if self.match(identifier_re):
name = self.matched_text
self.skip_ws()
if self.skip_string_and_ws('('):
self.fail('Parameterized GNU style attribute not yet supported.')
attrs.append(ASTGnuAttribute(name, None))
# TODO: parse arguments for the attribute
if self.skip_string_and_ws(','):
continue
elif self.skip_string_and_ws(')'):
break
else:
self.fail("Expected identifier, ')', or ',' in __attribute__.")
if not self.skip_string_and_ws(')'):
self.fail("Expected ')' after '__attribute__((...)'")
return ASTGnuAttributeList(attrs)
# try the simple id attributes defined by the user
for id in self.id_attributes:
if self.skip_word_and_ws(id):
return ASTIdAttribute(id)
# try the paren attributes defined by the user
for id in self.paren_attributes:
if not self.skip_string_and_ws(id):
continue
if not self.skip_string('('):
self.fail("Expected '(' after user-defined paren-attribute.")
arg = self._parse_balanced_token_seq(end=[')'])
if not self.skip_string(')'):
self.fail("Expected ')' to end user-defined paren-attribute.")
return ASTParenAttribute(id, arg)
return None
| sphinx/util/cfamily.py | 14,476 | For simple attributes defined by the user.
For paren attributes defined by the user.
Clone a definition expression node.
sphinx.util.cfamily
~~~~~~~~~~~~~~~~~~~
Utility functions common to the C and C++ domains.
:copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
Used to avoid implementing unneeded id generation for old id schemes. type: Callable[[], int] Attributes for warnings type: Match type: Tuple[int, Match] type: List[DefinitionError] in our tests the following is set to False to capture bad parsing for debugging TODO: add handling of string literals and similar type: List[str] try C++11 style TODO: actually implement the correct grammar try GNU style TODO: parse arguments for the attribute try the simple id attributes defined by the user try the paren attributes defined by the user | 857 | en | 0.676079 |
#!/usr/bin/env python
"""
Generate Sequence from a pdbfile and to modify the squences.
Author: {0} ({1})
This module is part of CADEE, the framework for
Computer-Aided Directed Evolution of Enzymes.
"""
from __future__ import print_function
import logging
import os
import sys
import time
import config
__author__ = "Beat Amrein"
__email__ = "beat.amrein@gmail.com"
logger = logging.getLogger('prep.genseqs')
# ERROR/EXIT CODES
ERR_USAGE = 1
ERR_OUTPUTFOLDER_EXISTS = 2
ERR_TOPO_GENERATION_WT = 3
ERR_QPREP5_INEXISTENT = 4
ERR_MKTOP_INEXISTENT = 5
ERR_NO_BABEL = 6
# CONSTANTS
NLC = '\n'
def genseq2(wtseq, mutations, keepdupes=False):
""" generate a sequences library based of wtseq
@param: list of tupel, [ (resid, library), (resid, library), ...]
@returns: list of sequences
"""
def estimator(mutations):
est = 1
for mut in mutations:
lib = mut[1]
est *= (len(lib)+1)
return est
logger.info('will mutate wtseq %s and create about %s mutations',
wtseq, estimator(mutations))
seqo = list(wtseq)
sequences = [seqo]
while len(mutations) > 0:
newseqs = sequences[:]
res, lib = mutations.pop()
for seqo in sequences:
res = int(res)
if res < 1:
raise ValueError('Impossible: resid < 1!', res)
pos = res - 1
for aa in lib:
if len(aa) != 1:
raise ValueError('Impossible 1-letter aminoacid',
aa, 'in lib', lib)
seqn = seqo[:]
seqn[pos] = aa
if keepdupes or seqn not in newseqs:
newseqs.append(seqn)
sequences = newseqs
return sequences
def combine(lib, pos):
"""generate combinations of up to 7.
@param lib: library
@param pos: positions to mutate
# TODO: implement in readable (recursively)
"""
numseqs = 1
for each in lib:
numseqs *= len(each)
logger.info('Generating %s %s', numseqs, 'sequeces. Please wait.')
seqlib = []
logger.info('Library %s, Positions %s', lib, pos)
for every in lib[0]:
if len(pos) > 1:
for every2, in lib[1]:
if len(pos) > 2:
for every3, in lib[2]:
if len(pos) > 3:
for every4, in lib[3]:
if len(pos) > 4:
for every5, in lib[4]:
if len(pos) > 5:
for every6, in lib[5]:
if len(pos) > 6:
for every7 in lib[6]:
seqlib.append([every,
every2,
every3,
every4,
every5,
every6,
every7])
else:
seqlib.append([every,
every2,
every3,
every4,
every5,
every6])
else:
seqlib.append([every,
every2,
every3,
every4,
every5])
else:
seqlib.append([every, every2, every3,
every4, every4])
else:
seqlib.append([every, every2, every3])
else:
seqlib.append([every, every2])
else:
seqlib.append([every])
return seqlib
def gen_seqlib(sequence, pos, lib):
"""
Generates sequences, mutating at pos[x] to all as in lib[x]
Generates sequences, mutating at pos[x] if len(lib)==1,
the same lib will be used for all
Return sequences
"""
# is lib a string?
if isinstance(lib, str):
lib = [lib]
# when only 1 library is given, reuse it
if len(lib) == 1:
while range(1, len(pos)):
lib.append(lib[0])
if len(pos) != len(lib):
msg = 'Bad Input: Dimensions of pos and lib must be equal: '
msg += 'found: #pos: {0}, #lib {1}'.format(len(pos), len(lib))
raise (Exception, msg)
seqlib = combine(lib, pos)
# insert combinations into sequence
sequences_1d = {}
for i in range(0, len(seqlib)):
nfa = list(sequence)
for j, posj in pos:
if nfa[posj].upper() != seqlib[i][j].upper():
nfa[posj] = seqlib[i][j]
modseq = ''.join(nfa)
sequences_1d[modseq] = 1
return sequences_1d
def get_fasta(wtpdb):
"""Return fasta code of wtpdb"""
# preparations
from pyscwrl import babel_pdb_for_scwrl
babel_pdb_for_scwrl(wtpdb)
# read fasta
fasta = ''
for line in open('proper.fasta'):
line = line[:-1]
if line[0] == '>':
# fasta-comment, ignore line
continue
for char in line:
fasta += char.lower()
return fasta
def get_sequences(wtpdb, resids, library):
"""Return list of sequences for resids, created with library"""
print(wtpdb, resids)
# Get the fasta sequence from pdbfile
fasta = get_fasta(wtpdb)
posids = []
# position - ids start from 0 (not 1), so we have to convert
for resid in resids:
posids.append(int(resid)-1)
# generate sequences:
sequences = gen_seqlib(fasta, posids, [library])
return sequences
if __name__ == "__main__":
# Parse Command Line
LIB = config.SatLibs.ALL
def usage():
"""Print Usage and exit"""
print('')
print('Usage:')
print(' ' + sys.argv[0] + ' qprep-wt.pdb res1 [ res2 ...] ]')
print('')
sys.exit(ERR_USAGE)
def get_resnumbers(args):
"""Return residue-numbers as list-of-integers"""
resids = []
for resid in args:
try:
resids.append(int(resid))
except ValueError:
print('ValueError with ', resid, ' expected: Integer')
usage()
if len(resids) > 7:
print('FATAL:')
print('You ask me to mutate more than 7 residues at one time.')
print('This is NOT IMPLEMENTED... ...probably a BAD IDEA :')
print('This is a bad idea, because we grow with LIBRARY^{#RES}!')
print('In your case ', len(LIB), '^', len(LIB), '=',
len(LIB)**len(resids), '!')
usage()
return resids
START = time.time()
if len(sys.argv) < 3:
usage()
if len(get_resnumbers) > 7:
usage()
get_sequences(os.path.abspath(sys.argv[1]),
get_resnumbers(sys.argv[2:]), LIB)
print('time', round(time.time()-START, 2), 's')
| cadee/prep/genseqs.py | 7,898 | generate combinations of up to 7.
@param lib: library
@param pos: positions to mutate
# TODO: implement in readable (recursively)
Generates sequences, mutating at pos[x] to all as in lib[x]
Generates sequences, mutating at pos[x] if len(lib)==1,
the same lib will be used for all
Return sequences
generate a sequences library based of wtseq
@param: list of tupel, [ (resid, library), (resid, library), ...]
@returns: list of sequences
Return fasta code of wtpdb
Return residue-numbers as list-of-integers
Return list of sequences for resids, created with library
Print Usage and exit
Generate Sequence from a pdbfile and to modify the squences.
Author: {0} ({1})
This module is part of CADEE, the framework for
Computer-Aided Directed Evolution of Enzymes.
!/usr/bin/env python ERROR/EXIT CODES CONSTANTS is lib a string? when only 1 library is given, reuse it insert combinations into sequence preparations read fasta fasta-comment, ignore line Get the fasta sequence from pdbfile position - ids start from 0 (not 1), so we have to convert generate sequences: Parse Command Line | 1,083 | en | 0.819523 |
from sklearn.cluster import MiniBatchKMeans
import numpy as np
import torch
from models import TransformerModel, Seq2SeqTransformer, generate_square_subsequent_mask
from models import LM_NAME, MLM_NAME, MT_NAME, NLAYERS, NUM2WORD
import os
from data_preprocessing import DATA_DIR_DEV, SAVE_DATA_MT_TRAIN
from data_preprocessing import SAVE_VOCAB_SRC, SAVE_VOCAB_TRG, PAD_WORD
import pickle
from torchtext.legacy.data import Dataset, BucketIterator
import pandas as pd
from analytics_helper import MostFreqToken, GetInter, GetMI, GetInterValues
from analytics_helper import MIN_SAMPLE_SIZE_DEV, MIN_SAMPLE_SIZE_FULL
from analytics_helper import N_FREQUENT_DEV, N_FREQUENT_FULL
from analytics_helper import N_CLUSTER_DEV, N_CLUSTER_FULL
from data_preprocessing import SAVE_MODEL_PATH, DEVELOPMENT_MODE
from MT_helpers import patch_trg, create_mask
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if DEVELOPMENT_MODE:
min_sample_size=MIN_SAMPLE_SIZE_DEV
N_frequent=N_FREQUENT_DEV
N_cluster=N_CLUSTER_DEV
data_dir=DATA_DIR_DEV
else:
min_sample_size=MIN_SAMPLE_SIZE_FULL
N_frequent=N_FREQUENT_FULL
N_cluster=N_CLUSTER_FULL
data_dir=DATA_DIR_FULL
MI_results_INP={LM_NAME.split('.')[0]:[],
f"{MLM_NAME.split('.')[0]}_SAME":[],
f"{MLM_NAME.split('.')[0]}_DIFF":[],
MT_NAME.split('.')[0]:[]}
MI_results_OUT={LM_NAME.split('.')[0]:[],
MLM_NAME.split('.')[0]:[]}
MODELS_INP=[LM_NAME, MLM_NAME, MT_NAME]
vocab_pkl_src = os.path.join(data_dir, SAVE_VOCAB_SRC)
vocab_pkl_trg = os.path.join(data_dir, SAVE_VOCAB_TRG)
train_pkl = os.path.join(data_dir, SAVE_DATA_MT_TRAIN)
field_src = pickle.load(open(vocab_pkl_src, 'rb'))
field_trg = pickle.load(open(vocab_pkl_trg, 'rb'))
src_pad_idx = field_src.vocab.stoi[PAD_WORD]
trg_pad_idx = field_trg.vocab.stoi[PAD_WORD]
train_examples = pickle.load(open(train_pkl, 'rb'))
fields = {'src':field_src , 'trg':field_trg}
train = Dataset(examples=train_examples, fields=fields)
train_iter = BucketIterator(train, batch_size=1, device=device, train=True, shuffle=False)
frequent_vocab = MostFreqToken(field_src, N_frequent, min_sample_size)
# token_reps_list saves NLAYERS dicts, for ith dict, the key is the token ID,
# the value is the representation of the ID in the ith layer.
token_reps_model_INP={}
token_reps_model_OUT={}
for this_model_name in MODELS_INP:
token_reps_list=[]
for _ in range(NLAYERS):
this_token_reps={}
for this_token_id in frequent_vocab:
this_token_reps[this_token_id]=[]
token_reps_list.append(this_token_reps)
if this_model_name.startswith("MLM"):
token_reps_model_INP[f"{MLM_NAME.split('.')[0]}_SAME"]=token_reps_list
token_reps_model_INP[f"{MLM_NAME.split('.')[0]}_DIFF"]=token_reps_list
token_reps_model_OUT[this_model_name.split('.')[0]]=token_reps_list
elif this_model_name.startswith("LM"):
token_reps_model_INP[this_model_name.split('.')[0]]=token_reps_list
token_reps_model_OUT[this_model_name.split('.')[0]]=token_reps_list
elif this_model_name.startswith("MT"):
token_reps_model_INP[this_model_name.split('.')[0]]=token_reps_list
sample_size_dict_INP={}
sample_size_dict_OUT={}
for this_model_name in MODELS_INP:
if this_model_name.startswith("MLM"):
this_sample_size_dict_INP_SAME={}
this_sample_size_dict_INP_DIFF={}
this_sample_size_dict_OUT={}
for this_token_id in frequent_vocab:
this_sample_size_dict_INP_SAME[this_token_id]=0
this_sample_size_dict_INP_DIFF[this_token_id]=0
this_sample_size_dict_OUT[this_token_id]=0
sample_size_dict_INP[f"{this_model_name.split('.')[0]}_SAME"]=this_sample_size_dict_INP_SAME
sample_size_dict_INP[f"{this_model_name.split('.')[0]}_DIFF"]=this_sample_size_dict_INP_DIFF
sample_size_dict_OUT[this_model_name.split('.')[0]]=this_sample_size_dict_OUT
elif this_model_name.startswith("LM"):
this_sample_size_dict_INP={}
this_sample_size_dict_OUT={}
for this_token_id in frequent_vocab:
this_sample_size_dict_INP[this_token_id]=0
this_sample_size_dict_OUT[this_token_id]=0
sample_size_dict_INP[this_model_name.split('.')[0]]=this_sample_size_dict_INP
sample_size_dict_OUT[this_model_name.split('.')[0]]=this_sample_size_dict_OUT
elif this_model_name.startswith("MT"):
this_sample_size_dict_INP={}
for this_token_id in frequent_vocab:
this_sample_size_dict_INP[this_token_id]=0
sample_size_dict_INP[this_model_name.split('.')[0]]=this_sample_size_dict_INP
for batch in train_iter:
src_seq_MT = batch.src.to(device)
target_sample_INP_MT=GetInter(src_seq_MT.detach().numpy(), frequent_vocab)
src_seq_MLM_SAME = batch.src.to(device)
target_sample_INP_MLM_SAME=GetInter(src_seq_MLM_SAME.detach().numpy(), frequent_vocab)
src_seq=batch.src.to(device)
src_seq_MLM_DIFF = src_seq.clone()
src_mask = generate_square_subsequent_mask(src_seq.size(0))
rand_value = torch.rand(src_seq.shape)
rand_mask = (rand_value < 0.15) * (input != src_pad_idx)
mask_idx=(rand_mask.flatten() == True).nonzero().view(-1)
src_seq_MLM_DIFF = src_seq_MLM_DIFF.flatten()
src_seq_MLM_DIFF[mask_idx] = 103
src_seq_MLM_DIFF = src_seq_MLM_DIFF.view(src_seq.size())
target_sample_INP_MLM_DIFF=GetInter(src_seq_MLM_DIFF.detach().numpy(), frequent_vocab)
src_seq_LM = batch.src[:-1]
target_sample_INP_LM=GetInter(src_seq_LM.detach().numpy(), frequent_vocab)
trg = batch.trg
trg_seq_MT, gold = map(lambda x: x.to(device), patch_trg(trg, trg_pad_idx))
trg_seq_MT = trg_seq_MT.to(device)
trg_seq_LM = src_seq[1:].to(device)
target_sample_OUT_LM=GetInter(trg_seq_LM.detach().numpy(), frequent_vocab)
trg_seq_MLM = src_seq
target_sample_OUT_MLM=GetInter(trg_seq_MLM.detach().numpy(), frequent_vocab)
for this_model_name in MODELS_INP:
this_model = torch.load(os.path.join(SAVE_MODEL_PATH,this_model_name))
this_model.eval()
if this_model_name.startswith("MT") and len(target_sample_INP_MT)>0:
src_mask, trg_mask, src_padding_mask, trg_padding_mask = create_mask(src_seq_MT, trg_seq_MT, src_pad_idx, trg_pad_idx)
_ = this_model(src=src_seq_MT,
src_mask=src_mask,
trg=trg_seq_MT,
tgt_mask=trg_mask,
src_padding_mask=src_padding_mask,
tgt_padding_mask=trg_padding_mask,
memory_key_padding_mask=src_padding_mask)
token_reps_list=token_reps_model_INP[MT_NAME.split('.')[0]]
this_sample_size_dict=sample_size_dict_INP[this_model_name.split('.')[0]]
GetInterValues(this_model, target_sample_INP_MT, NUM2WORD, token_reps_list, this_sample_size_dict, min_sample_size, NLAYERS)
elif this_model_name.startswith("MLM"):
if len(target_sample_INP_MLM_SAME)>0:
src_mask = generate_square_subsequent_mask(src_seq_MLM_SAME.size(0))
src_padding_mask = (src_seq_MLM_SAME == src_pad_idx).transpose(0, 1)
_ = this_model(src_seq_MLM_SAME, src_mask.to(device),src_padding_mask.to(device))
token_reps_list=token_reps_model_INP[f"{MLM_NAME.split('.')[0]}_SAME"]
this_sample_size_dict=sample_size_dict_INP[f"{this_model_name.split('.')[0]}_SAME"]
GetInterValues(this_model, target_sample_INP_MLM_SAME, NUM2WORD, token_reps_list, this_sample_size_dict, min_sample_size, NLAYERS)
if len(target_sample_INP_MLM_DIFF)>0 and len(target_sample_OUT_MLM)>0:
src_mask = generate_square_subsequent_mask(src_seq_MLM_DIFF.size(0))
src_padding_mask = (src_seq_MLM_DIFF == src_pad_idx).transpose(0, 1)
_ = this_model(src_seq_MLM_DIFF.to(device), src_mask.to(device),src_padding_mask.to(device))
token_reps_list_INP=token_reps_model_INP[f"{MLM_NAME.split('.')[0]}_DIFF"]
this_sample_size_dict_INP=sample_size_dict_INP[f"{this_model_name.split('.')[0]}_DIFF"]
token_reps_list_OUT=token_reps_model_OUT[MLM_NAME.split('.')[0]]
this_sample_size_dict_OUT=sample_size_dict_OUT[this_model_name.split('.')[0]]
GetInterValues(this_model, target_sample_INP_MLM_DIFF, NUM2WORD, token_reps_list_INP, this_sample_size_dict_INP, min_sample_size, NLAYERS)
GetInterValues(this_model, target_sample_OUT_MLM, NUM2WORD, token_reps_list_OUT, this_sample_size_dict_OUT, min_sample_size, NLAYERS)
elif this_model_name.startswith("LM") and len(target_sample_INP_LM)>0 and len(target_sample_OUT_LM)>0:
src_mask = generate_square_subsequent_mask(src_seq_LM.size(0))
src_padding_mask = (src_seq_LM == src_pad_idx).transpose(0, 1)
_ = this_model(src_seq_LM, src_mask.to(device),src_padding_mask.to(device))
token_reps_list_INP=token_reps_model_INP[this_model_name.split('.')[0]]
token_reps_list_OUT=token_reps_model_OUT[this_model_name.split('.')[0]]
this_sample_size_dict_INP=sample_size_dict_INP[this_model_name.split('.')[0]]
this_sample_size_dict_OUT=sample_size_dict_OUT[this_model_name.split('.')[0]]
GetInterValues(this_model, target_sample_INP_LM, NUM2WORD, token_reps_list_INP, this_sample_size_dict_INP, min_sample_size, NLAYERS)
GetInterValues(this_model, target_sample_OUT_LM, NUM2WORD, token_reps_list_OUT, this_sample_size_dict_OUT, min_sample_size, NLAYERS)
# we only need to keep the minimum sample size that has been collected
this_min_sample_size_inp=float('inf')
this_min_sample_size_out=float('inf')
for model_name, this_sample_size_dict in sample_size_dict_INP.items():
for token_id, size in this_sample_size_dict.items():
if size<this_min_sample_size_inp:
this_min_sample_size_inp=size
for model_name, this_sample_size_dict in sample_size_dict_OUT.items():
for token_id, size in this_sample_size_dict.items():
if size<this_min_sample_size_out:
this_min_sample_size_out=size
is_enough=True
if this_min_sample_size_inp>=min_sample_size and this_min_sample_size_out>=min_sample_size:
for model_name, reps_dict in token_reps_model_INP.items():
if is_enough is False:
break
for this_layer in reps_dict:
if is_enough is False:
break
for token_id, rep_list in this_layer.items():
if len(rep_list)<min_sample_size:
is_enough=False
break
for model_name, reps_list in token_reps_model_OUT.items():
if is_enough is False:
break
for this_layer in reps_dict:
if is_enough is False:
break
for token_id, rep_list in this_layer.items():
if len(rep_list)<min_sample_size:
is_enough=False
break
else:
is_enough=False
if is_enough:
break
if is_enough is False:
assert 1==0, "We have not collected enough data!"
for this_model_name in MODELS_INP:
if this_model_name.startswith("MLM"):
token_reps_list=token_reps_model_INP[f"{MLM_NAME.split('.')[0]}_SAME"]
result_list=MI_results_INP[f"{MLM_NAME.split('.')[0]}_SAME"]
GetMI(token_reps_list, N_frequent, N_cluster, NLAYERS, result_list)
token_reps_list=token_reps_model_INP[f"{MLM_NAME.split('.')[0]}_DIFF"]
result_list=MI_results_INP[f"{MLM_NAME.split('.')[0]}_DIFF"]
GetMI(token_reps_list, N_frequent, N_cluster, NLAYERS, result_list)
token_reps_list=token_reps_model_OUT[MLM_NAME.split('.')[0]]
result_list=MI_results_OUT[MLM_NAME.split('.')[0]]
GetMI(token_reps_list, N_frequent, N_cluster, NLAYERS, result_list)
elif this_model_name.startswith("MT"):
token_reps_list=token_reps_model_INP[this_model_name.split('.')[0]]
result_list=MI_results_INP[this_model_name.split('.')[0]]
GetMI(token_reps_list, N_frequent, N_cluster, NLAYERS, result_list)
elif this_model_name.startswith("LM"):
token_reps_list=token_reps_model_INP[this_model_name.split('.')[0]]
result_list=MI_results_INP[this_model_name.split('.')[0]]
GetMI(token_reps_list, N_frequent, N_cluster, NLAYERS, result_list)
token_reps_list=token_reps_model_OUT[MLM_NAME.split('.')[0]]
result_list=MI_results_OUT[this_model_name.split('.')[0]]
GetMI(token_reps_list, N_frequent, N_cluster, NLAYERS, result_list)
print("result",MI_results_INP)
print("result",MI_results_OUT)
| NLP/The_Bottom_up_Evolution_of_Representations_in_the_Transformer/analytics.py | 12,957 | token_reps_list saves NLAYERS dicts, for ith dict, the key is the token ID, the value is the representation of the ID in the ith layer. we only need to keep the minimum sample size that has been collected | 204 | en | 0.944934 |
# coding: utf-8
"""
ThingsBoard REST API
ThingsBoard Professional Edition IoT platform REST API documentation. # noqa: E501
OpenAPI spec version: 3.3.3PAAS-RC1
Contact: info@thingsboard.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from tb_rest_client.api_client import ApiClient
class WidgetsBundleControllerApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_widgets_bundle_using_delete(self, widgets_bundle_id, **kwargs): # noqa: E501
"""Delete widgets bundle (deleteWidgetsBundle) # noqa: E501
Deletes the widget bundle. Referencing non-existing Widget Bundle Id will cause an error. Available for users with 'SYS_ADMIN' or 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_widgets_bundle_using_delete(widgets_bundle_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str widgets_bundle_id: A string value representing the widget bundle id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_widgets_bundle_using_delete_with_http_info(widgets_bundle_id, **kwargs) # noqa: E501
else:
(data) = self.delete_widgets_bundle_using_delete_with_http_info(widgets_bundle_id, **kwargs) # noqa: E501
return data
def delete_widgets_bundle_using_delete_with_http_info(self, widgets_bundle_id, **kwargs): # noqa: E501
"""Delete widgets bundle (deleteWidgetsBundle) # noqa: E501
Deletes the widget bundle. Referencing non-existing Widget Bundle Id will cause an error. Available for users with 'SYS_ADMIN' or 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_widgets_bundle_using_delete_with_http_info(widgets_bundle_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str widgets_bundle_id: A string value representing the widget bundle id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['widgets_bundle_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_widgets_bundle_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'widgets_bundle_id' is set
if ('widgets_bundle_id' not in params or
params['widgets_bundle_id'] is None):
raise ValueError("Missing the required parameter `widgets_bundle_id` when calling `delete_widgets_bundle_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'widgets_bundle_id' in params:
path_params['widgetsBundleId'] = params['widgets_bundle_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/widgetsBundle/{widgetsBundleId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_widgets_bundle_by_id_using_get(self, widgets_bundle_id, **kwargs): # noqa: E501
"""Get Widget Bundle (getWidgetsBundleById) # noqa: E501
Get the Widget Bundle based on the provided Widget Bundle Id. Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. Available for any authorized user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_widgets_bundle_by_id_using_get(widgets_bundle_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str widgets_bundle_id: A string value representing the widget bundle id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: WidgetsBundle
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_widgets_bundle_by_id_using_get_with_http_info(widgets_bundle_id, **kwargs) # noqa: E501
else:
(data) = self.get_widgets_bundle_by_id_using_get_with_http_info(widgets_bundle_id, **kwargs) # noqa: E501
return data
def get_widgets_bundle_by_id_using_get_with_http_info(self, widgets_bundle_id, **kwargs): # noqa: E501
"""Get Widget Bundle (getWidgetsBundleById) # noqa: E501
Get the Widget Bundle based on the provided Widget Bundle Id. Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. Available for any authorized user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_widgets_bundle_by_id_using_get_with_http_info(widgets_bundle_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str widgets_bundle_id: A string value representing the widget bundle id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: WidgetsBundle
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['widgets_bundle_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_widgets_bundle_by_id_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'widgets_bundle_id' is set
if ('widgets_bundle_id' not in params or
params['widgets_bundle_id'] is None):
raise ValueError("Missing the required parameter `widgets_bundle_id` when calling `get_widgets_bundle_by_id_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'widgets_bundle_id' in params:
path_params['widgetsBundleId'] = params['widgets_bundle_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/widgetsBundle/{widgetsBundleId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WidgetsBundle', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_widgets_bundles_using_get(self, **kwargs): # noqa: E501
"""Get all Widget Bundles (getWidgetsBundles) # noqa: E501
Returns an array of Widget Bundle objects that are available for current user.Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. Available for any authorized user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_widgets_bundles_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[WidgetsBundle]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_widgets_bundles_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_widgets_bundles_using_get_with_http_info(**kwargs) # noqa: E501
return data
def get_widgets_bundles_using_get_with_http_info(self, **kwargs): # noqa: E501
"""Get all Widget Bundles (getWidgetsBundles) # noqa: E501
Returns an array of Widget Bundle objects that are available for current user.Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. Available for any authorized user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_widgets_bundles_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[WidgetsBundle]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_widgets_bundles_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/widgetsBundles', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[WidgetsBundle]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_widgets_bundles_using_get1(self, page_size, page, **kwargs): # noqa: E501
"""Get Widget Bundles (getWidgetsBundles) # noqa: E501
Returns a page of Widget Bundle objects available for current user. Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for any authorized user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_widgets_bundles_using_get1(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str text_search: The case insensitive 'startsWith' filter based on the widget bundle title.
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: PageDataWidgetsBundle
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_widgets_bundles_using_get1_with_http_info(page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_widgets_bundles_using_get1_with_http_info(page_size, page, **kwargs) # noqa: E501
return data
def get_widgets_bundles_using_get1_with_http_info(self, page_size, page, **kwargs): # noqa: E501
"""Get Widget Bundles (getWidgetsBundles) # noqa: E501
Returns a page of Widget Bundle objects available for current user. Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for any authorized user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_widgets_bundles_using_get1_with_http_info(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str text_search: The case insensitive 'startsWith' filter based on the widget bundle title.
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: PageDataWidgetsBundle
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_size', 'page', 'text_search', 'sort_property', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_widgets_bundles_using_get1" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_widgets_bundles_using_get1`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_widgets_bundles_using_get1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/widgetsBundles{?page,pageSize,sortOrder,sortProperty,textSearch}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataWidgetsBundle', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def save_widgets_bundle_using_post(self, **kwargs): # noqa: E501
"""Create Or Update Widget Bundle (saveWidgetsBundle) # noqa: E501
Create or update the Widget Bundle. Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. When creating the bundle, platform generates Widget Bundle Id as [time-based UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_1_(date-time_and_MAC_address)). The newly created Widget Bundle Id will be present in the response. Specify existing Widget Bundle id to update the Widget Bundle. Referencing non-existing Widget Bundle Id will cause 'Not Found' error. Widget Bundle alias is unique in the scope of tenant. Special Tenant Id '13814000-1dd2-11b2-8080-808080808080' is automatically used if the create bundle request is sent by user with 'SYS_ADMIN' authority. Available for users with 'SYS_ADMIN' or 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.save_widgets_bundle_using_post(async_req=True)
>>> result = thread.get()
:param async_req bool
:param WidgetsBundle body:
:return: WidgetsBundle
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.save_widgets_bundle_using_post_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.save_widgets_bundle_using_post_with_http_info(**kwargs) # noqa: E501
return data
def save_widgets_bundle_using_post_with_http_info(self, **kwargs): # noqa: E501
"""Create Or Update Widget Bundle (saveWidgetsBundle) # noqa: E501
Create or update the Widget Bundle. Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. When creating the bundle, platform generates Widget Bundle Id as [time-based UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_1_(date-time_and_MAC_address)). The newly created Widget Bundle Id will be present in the response. Specify existing Widget Bundle id to update the Widget Bundle. Referencing non-existing Widget Bundle Id will cause 'Not Found' error. Widget Bundle alias is unique in the scope of tenant. Special Tenant Id '13814000-1dd2-11b2-8080-808080808080' is automatically used if the create bundle request is sent by user with 'SYS_ADMIN' authority. Available for users with 'SYS_ADMIN' or 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.save_widgets_bundle_using_post_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param WidgetsBundle body:
:return: WidgetsBundle
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method save_widgets_bundle_using_post" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/widgetsBundle', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WidgetsBundle', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| tb_rest_client/api/api_pe/widgets_bundle_controller_api.py | 24,781 | NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
Delete widgets bundle (deleteWidgetsBundle) # noqa: E501
Deletes the widget bundle. Referencing non-existing Widget Bundle Id will cause an error. Available for users with 'SYS_ADMIN' or 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_widgets_bundle_using_delete(widgets_bundle_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str widgets_bundle_id: A string value representing the widget bundle id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: None
If the method is called asynchronously,
returns the request thread.
Delete widgets bundle (deleteWidgetsBundle) # noqa: E501
Deletes the widget bundle. Referencing non-existing Widget Bundle Id will cause an error. Available for users with 'SYS_ADMIN' or 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_widgets_bundle_using_delete_with_http_info(widgets_bundle_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str widgets_bundle_id: A string value representing the widget bundle id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: None
If the method is called asynchronously,
returns the request thread.
Get Widget Bundle (getWidgetsBundleById) # noqa: E501
Get the Widget Bundle based on the provided Widget Bundle Id. Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. Available for any authorized user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_widgets_bundle_by_id_using_get(widgets_bundle_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str widgets_bundle_id: A string value representing the widget bundle id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: WidgetsBundle
If the method is called asynchronously,
returns the request thread.
Get Widget Bundle (getWidgetsBundleById) # noqa: E501
Get the Widget Bundle based on the provided Widget Bundle Id. Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. Available for any authorized user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_widgets_bundle_by_id_using_get_with_http_info(widgets_bundle_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str widgets_bundle_id: A string value representing the widget bundle id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: WidgetsBundle
If the method is called asynchronously,
returns the request thread.
Get all Widget Bundles (getWidgetsBundles) # noqa: E501
Returns an array of Widget Bundle objects that are available for current user.Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. Available for any authorized user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_widgets_bundles_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[WidgetsBundle]
If the method is called asynchronously,
returns the request thread.
Get Widget Bundles (getWidgetsBundles) # noqa: E501
Returns a page of Widget Bundle objects available for current user. Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for any authorized user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_widgets_bundles_using_get1(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str text_search: The case insensitive 'startsWith' filter based on the widget bundle title.
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: PageDataWidgetsBundle
If the method is called asynchronously,
returns the request thread.
Get Widget Bundles (getWidgetsBundles) # noqa: E501
Returns a page of Widget Bundle objects available for current user. Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Available for any authorized user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_widgets_bundles_using_get1_with_http_info(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str text_search: The case insensitive 'startsWith' filter based on the widget bundle title.
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: PageDataWidgetsBundle
If the method is called asynchronously,
returns the request thread.
Get all Widget Bundles (getWidgetsBundles) # noqa: E501
Returns an array of Widget Bundle objects that are available for current user.Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. Available for any authorized user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_widgets_bundles_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[WidgetsBundle]
If the method is called asynchronously,
returns the request thread.
Create Or Update Widget Bundle (saveWidgetsBundle) # noqa: E501
Create or update the Widget Bundle. Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. When creating the bundle, platform generates Widget Bundle Id as [time-based UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_1_(date-time_and_MAC_address)). The newly created Widget Bundle Id will be present in the response. Specify existing Widget Bundle id to update the Widget Bundle. Referencing non-existing Widget Bundle Id will cause 'Not Found' error. Widget Bundle alias is unique in the scope of tenant. Special Tenant Id '13814000-1dd2-11b2-8080-808080808080' is automatically used if the create bundle request is sent by user with 'SYS_ADMIN' authority. Available for users with 'SYS_ADMIN' or 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.save_widgets_bundle_using_post(async_req=True)
>>> result = thread.get()
:param async_req bool
:param WidgetsBundle body:
:return: WidgetsBundle
If the method is called asynchronously,
returns the request thread.
Create Or Update Widget Bundle (saveWidgetsBundle) # noqa: E501
Create or update the Widget Bundle. Widget Bundle represents a group(bundle) of widgets. Widgets are grouped into bundle by type or use case. When creating the bundle, platform generates Widget Bundle Id as [time-based UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_1_(date-time_and_MAC_address)). The newly created Widget Bundle Id will be present in the response. Specify existing Widget Bundle id to update the Widget Bundle. Referencing non-existing Widget Bundle Id will cause 'Not Found' error. Widget Bundle alias is unique in the scope of tenant. Special Tenant Id '13814000-1dd2-11b2-8080-808080808080' is automatically used if the create bundle request is sent by user with 'SYS_ADMIN' authority. Available for users with 'SYS_ADMIN' or 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.save_widgets_bundle_using_post_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param WidgetsBundle body:
:return: WidgetsBundle
If the method is called asynchronously,
returns the request thread.
ThingsBoard REST API
ThingsBoard Professional Edition IoT platform REST API documentation. # noqa: E501
OpenAPI spec version: 3.3.3PAAS-RC1
Contact: info@thingsboard.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
coding: utf-8 noqa: F401 python 2 and python 3 compatibility library noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'widgets_bundle_id' is set noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'widgets_bundle_id' is set noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'page_size' is set noqa: E501 verify the required parameter 'page' is set noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 HTTP header `Content-Type` noqa: E501 noqa: E501 Authentication setting noqa: E501 noqa: E501 | 10,875 | en | 0.637281 |
#!/usr/bin/env python
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
from collections import defaultdict
import in_generator
import template_expander
import name_utilities
from make_qualified_names import MakeQualifiedNamesWriter
class MakeElementFactoryWriter(MakeQualifiedNamesWriter):
pass
if __name__ == "__main__":
in_generator.Maker(MakeElementFactoryWriter).main(sys.argv)
| sky/engine/build/scripts/make_element_factory.py | 1,886 | !/usr/bin/env python Copyright (C) 2013 Google Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 1,495 | en | 0.884062 |
"""
Module to handle gamma matrices expressed as tensor objects.
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, LorentzIndex
>>> from sympy.tensor.tensor import tensor_indices
>>> i = tensor_indices('i', LorentzIndex)
>>> G(i)
GammaMatrix(i)
Note that there is already an instance of GammaMatrixHead in four dimensions:
GammaMatrix, which is simply declare as
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix
>>> from sympy.tensor.tensor import tensor_indices
>>> i = tensor_indices('i', LorentzIndex)
>>> GammaMatrix(i)
GammaMatrix(i)
To access the metric tensor
>>> LorentzIndex.metric
metric(LorentzIndex,LorentzIndex)
"""
from sympy import S, Mul, eye, trace
from sympy.tensor.tensor import TensorIndexType, TensorIndex,\
TensMul, TensAdd, tensor_mul, Tensor, TensorHead, TensorSymmetry
from sympy.core.compatibility import range
# DiracSpinorIndex = TensorIndexType('DiracSpinorIndex', dim=4, dummy_fmt="S")
LorentzIndex = TensorIndexType('LorentzIndex', dim=4, dummy_fmt="L")
GammaMatrix = TensorHead("GammaMatrix", [LorentzIndex],
TensorSymmetry.no_symmetry(1), comm=None)
def extract_type_tens(expression, component):
"""
Extract from a ``TensExpr`` all tensors with `component`.
Returns two tensor expressions:
* the first contains all ``Tensor`` of having `component`.
* the second contains all remaining.
"""
if isinstance(expression, Tensor):
sp = [expression]
elif isinstance(expression, TensMul):
sp = expression.args
else:
raise ValueError('wrong type')
# Collect all gamma matrices of the same dimension
new_expr = S.One
residual_expr = S.One
for i in sp:
if isinstance(i, Tensor) and i.component == component:
new_expr *= i
else:
residual_expr *= i
return new_expr, residual_expr
def simplify_gamma_expression(expression):
extracted_expr, residual_expr = extract_type_tens(expression, GammaMatrix)
res_expr = _simplify_single_line(extracted_expr)
return res_expr * residual_expr
def simplify_gpgp(ex, sort=True):
"""
simplify products ``G(i)*p(-i)*G(j)*p(-j) -> p(i)*p(-i)``
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, \
LorentzIndex, simplify_gpgp
>>> from sympy.tensor.tensor import tensor_indices, tensor_heads
>>> p, q = tensor_heads('p, q', [LorentzIndex])
>>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', LorentzIndex)
>>> ps = p(i0)*G(-i0)
>>> qs = q(i0)*G(-i0)
>>> simplify_gpgp(ps*qs*qs)
GammaMatrix(-L_0)*p(L_0)*q(L_1)*q(-L_1)
"""
def _simplify_gpgp(ex):
components = ex.components
a = []
comp_map = []
for i, comp in enumerate(components):
comp_map.extend([i]*comp.rank)
dum = [(i[0], i[1], comp_map[i[0]], comp_map[i[1]]) for i in ex.dum]
for i in range(len(components)):
if components[i] != GammaMatrix:
continue
for dx in dum:
if dx[2] == i:
p_pos1 = dx[3]
elif dx[3] == i:
p_pos1 = dx[2]
else:
continue
comp1 = components[p_pos1]
if comp1.comm == 0 and comp1.rank == 1:
a.append((i, p_pos1))
if not a:
return ex
elim = set()
tv = []
hit = True
coeff = S.One
ta = None
while hit:
hit = False
for i, ai in enumerate(a[:-1]):
if ai[0] in elim:
continue
if ai[0] != a[i + 1][0] - 1:
continue
if components[ai[1]] != components[a[i + 1][1]]:
continue
elim.add(ai[0])
elim.add(ai[1])
elim.add(a[i + 1][0])
elim.add(a[i + 1][1])
if not ta:
ta = ex.split()
mu = TensorIndex('mu', LorentzIndex)
hit = True
if i == 0:
coeff = ex.coeff
tx = components[ai[1]](mu)*components[ai[1]](-mu)
if len(a) == 2:
tx *= 4 # eye(4)
tv.append(tx)
break
if tv:
a = [x for j, x in enumerate(ta) if j not in elim]
a.extend(tv)
t = tensor_mul(*a)*coeff
# t = t.replace(lambda x: x.is_Matrix, lambda x: 1)
return t
else:
return ex
if sort:
ex = ex.sorted_components()
# this would be better off with pattern matching
while 1:
t = _simplify_gpgp(ex)
if t != ex:
ex = t
else:
return t
def gamma_trace(t):
"""
trace of a single line of gamma matrices
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, \
gamma_trace, LorentzIndex
>>> from sympy.tensor.tensor import tensor_indices, tensor_heads
>>> p, q = tensor_heads('p, q', [LorentzIndex])
>>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', LorentzIndex)
>>> ps = p(i0)*G(-i0)
>>> qs = q(i0)*G(-i0)
>>> gamma_trace(G(i0)*G(i1))
4*metric(i0, i1)
>>> gamma_trace(ps*ps) - 4*p(i0)*p(-i0)
0
>>> gamma_trace(ps*qs + ps*ps) - 4*p(i0)*p(-i0) - 4*p(i0)*q(-i0)
0
"""
if isinstance(t, TensAdd):
res = TensAdd(*[_trace_single_line(x) for x in t.args])
return res
t = _simplify_single_line(t)
res = _trace_single_line(t)
return res
def _simplify_single_line(expression):
"""
Simplify single-line product of gamma matrices.
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, \
LorentzIndex, _simplify_single_line
>>> from sympy.tensor.tensor import tensor_indices, TensorHead
>>> p = TensorHead('p', [LorentzIndex])
>>> i0,i1 = tensor_indices('i0:2', LorentzIndex)
>>> _simplify_single_line(G(i0)*G(i1)*p(-i1)*G(-i0)) + 2*G(i0)*p(-i0)
0
"""
t1, t2 = extract_type_tens(expression, GammaMatrix)
if t1 != 1:
t1 = kahane_simplify(t1)
res = t1*t2
return res
def _trace_single_line(t):
"""
Evaluate the trace of a single gamma matrix line inside a ``TensExpr``.
Notes
=====
If there are ``DiracSpinorIndex.auto_left`` and ``DiracSpinorIndex.auto_right``
indices trace over them; otherwise traces are not implied (explain)
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, \
LorentzIndex, _trace_single_line
>>> from sympy.tensor.tensor import tensor_indices, TensorHead
>>> p = TensorHead('p', [LorentzIndex])
>>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', LorentzIndex)
>>> _trace_single_line(G(i0)*G(i1))
4*metric(i0, i1)
>>> _trace_single_line(G(i0)*p(-i0)*G(i1)*p(-i1)) - 4*p(i0)*p(-i0)
0
"""
def _trace_single_line1(t):
t = t.sorted_components()
components = t.components
ncomps = len(components)
g = LorentzIndex.metric
# gamma matirices are in a[i:j]
hit = 0
for i in range(ncomps):
if components[i] == GammaMatrix:
hit = 1
break
for j in range(i + hit, ncomps):
if components[j] != GammaMatrix:
break
else:
j = ncomps
numG = j - i
if numG == 0:
tcoeff = t.coeff
return t.nocoeff if tcoeff else t
if numG % 2 == 1:
return TensMul.from_data(S.Zero, [], [], [])
elif numG > 4:
# find the open matrix indices and connect them:
a = t.split()
ind1 = a[i].get_indices()[0]
ind2 = a[i + 1].get_indices()[0]
aa = a[:i] + a[i + 2:]
t1 = tensor_mul(*aa)*g(ind1, ind2)
t1 = t1.contract_metric(g)
args = [t1]
sign = 1
for k in range(i + 2, j):
sign = -sign
ind2 = a[k].get_indices()[0]
aa = a[:i] + a[i + 1:k] + a[k + 1:]
t2 = sign*tensor_mul(*aa)*g(ind1, ind2)
t2 = t2.contract_metric(g)
t2 = simplify_gpgp(t2, False)
args.append(t2)
t3 = TensAdd(*args)
t3 = _trace_single_line(t3)
return t3
else:
a = t.split()
t1 = _gamma_trace1(*a[i:j])
a2 = a[:i] + a[j:]
t2 = tensor_mul(*a2)
t3 = t1*t2
if not t3:
return t3
t3 = t3.contract_metric(g)
return t3
t = t.expand()
if isinstance(t, TensAdd):
a = [_trace_single_line1(x)*x.coeff for x in t.args]
return TensAdd(*a)
elif isinstance(t, (Tensor, TensMul)):
r = t.coeff*_trace_single_line1(t)
return r
else:
return trace(t)
def _gamma_trace1(*a):
gctr = 4 # FIXME specific for d=4
g = LorentzIndex.metric
if not a:
return gctr
n = len(a)
if n%2 == 1:
#return TensMul.from_data(S.Zero, [], [], [])
return S.Zero
if n == 2:
ind0 = a[0].get_indices()[0]
ind1 = a[1].get_indices()[0]
return gctr*g(ind0, ind1)
if n == 4:
ind0 = a[0].get_indices()[0]
ind1 = a[1].get_indices()[0]
ind2 = a[2].get_indices()[0]
ind3 = a[3].get_indices()[0]
return gctr*(g(ind0, ind1)*g(ind2, ind3) - \
g(ind0, ind2)*g(ind1, ind3) + g(ind0, ind3)*g(ind1, ind2))
def kahane_simplify(expression):
r"""
This function cancels contracted elements in a product of four
dimensional gamma matrices, resulting in an expression equal to the given
one, without the contracted gamma matrices.
Parameters
==========
`expression` the tensor expression containing the gamma matrices to simplify.
Notes
=====
If spinor indices are given, the matrices must be given in
the order given in the product.
Algorithm
=========
The idea behind the algorithm is to use some well-known identities,
i.e., for contractions enclosing an even number of `\gamma` matrices
`\gamma^\mu \gamma_{a_1} \cdots \gamma_{a_{2N}} \gamma_\mu = 2 (\gamma_{a_{2N}} \gamma_{a_1} \cdots \gamma_{a_{2N-1}} + \gamma_{a_{2N-1}} \cdots \gamma_{a_1} \gamma_{a_{2N}} )`
for an odd number of `\gamma` matrices
`\gamma^\mu \gamma_{a_1} \cdots \gamma_{a_{2N+1}} \gamma_\mu = -2 \gamma_{a_{2N+1}} \gamma_{a_{2N}} \cdots \gamma_{a_{1}}`
Instead of repeatedly applying these identities to cancel out all contracted indices,
it is possible to recognize the links that would result from such an operation,
the problem is thus reduced to a simple rearrangement of free gamma matrices.
Examples
========
When using, always remember that the original expression coefficient
has to be handled separately
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, LorentzIndex
>>> from sympy.physics.hep.gamma_matrices import kahane_simplify
>>> from sympy.tensor.tensor import tensor_indices
>>> i0, i1, i2 = tensor_indices('i0:3', LorentzIndex)
>>> ta = G(i0)*G(-i0)
>>> kahane_simplify(ta)
Matrix([
[4, 0, 0, 0],
[0, 4, 0, 0],
[0, 0, 4, 0],
[0, 0, 0, 4]])
>>> tb = G(i0)*G(i1)*G(-i0)
>>> kahane_simplify(tb)
-2*GammaMatrix(i1)
>>> t = G(i0)*G(-i0)
>>> kahane_simplify(t)
Matrix([
[4, 0, 0, 0],
[0, 4, 0, 0],
[0, 0, 4, 0],
[0, 0, 0, 4]])
>>> t = G(i0)*G(-i0)
>>> kahane_simplify(t)
Matrix([
[4, 0, 0, 0],
[0, 4, 0, 0],
[0, 0, 4, 0],
[0, 0, 0, 4]])
If there are no contractions, the same expression is returned
>>> tc = G(i0)*G(i1)
>>> kahane_simplify(tc)
GammaMatrix(i0)*GammaMatrix(i1)
References
==========
[1] Algorithm for Reducing Contracted Products of gamma Matrices,
Joseph Kahane, Journal of Mathematical Physics, Vol. 9, No. 10, October 1968.
"""
if isinstance(expression, Mul):
return expression
if isinstance(expression, TensAdd):
return TensAdd(*[kahane_simplify(arg) for arg in expression.args])
if isinstance(expression, Tensor):
return expression
assert isinstance(expression, TensMul)
gammas = expression.args
for gamma in gammas:
assert gamma.component == GammaMatrix
free = expression.free
# spinor_free = [_ for _ in expression.free_in_args if _[1] != 0]
# if len(spinor_free) == 2:
# spinor_free.sort(key=lambda x: x[2])
# assert spinor_free[0][1] == 1 and spinor_free[-1][1] == 2
# assert spinor_free[0][2] == 0
# elif spinor_free:
# raise ValueError('spinor indices do not match')
dum = []
for dum_pair in expression.dum:
if expression.index_types[dum_pair[0]] == LorentzIndex:
dum.append((dum_pair[0], dum_pair[1]))
dum = sorted(dum)
if len(dum) == 0: # or GammaMatrixHead:
# no contractions in `expression`, just return it.
return expression
# find the `first_dum_pos`, i.e. the position of the first contracted
# gamma matrix, Kahane's algorithm as described in his paper requires the
# gamma matrix expression to start with a contracted gamma matrix, this is
# a workaround which ignores possible initial free indices, and re-adds
# them later.
first_dum_pos = min(map(min, dum))
# for p1, p2, a1, a2 in expression.dum_in_args:
# if p1 != 0 or p2 != 0:
# # only Lorentz indices, skip Dirac indices:
# continue
# first_dum_pos = min(p1, p2)
# break
total_number = len(free) + len(dum)*2
number_of_contractions = len(dum)
free_pos = [None]*total_number
for i in free:
free_pos[i[1]] = i[0]
# `index_is_free` is a list of booleans, to identify index position
# and whether that index is free or dummy.
index_is_free = [False]*total_number
for i, indx in enumerate(free):
index_is_free[indx[1]] = True
# `links` is a dictionary containing the graph described in Kahane's paper,
# to every key correspond one or two values, representing the linked indices.
# All values in `links` are integers, negative numbers are used in the case
# where it is necessary to insert gamma matrices between free indices, in
# order to make Kahane's algorithm work (see paper).
links = dict()
for i in range(first_dum_pos, total_number):
links[i] = []
# `cum_sign` is a step variable to mark the sign of every index, see paper.
cum_sign = -1
# `cum_sign_list` keeps storage for all `cum_sign` (every index).
cum_sign_list = [None]*total_number
block_free_count = 0
# multiply `resulting_coeff` by the coefficient parameter, the rest
# of the algorithm ignores a scalar coefficient.
resulting_coeff = S.One
# initialize a list of lists of indices. The outer list will contain all
# additive tensor expressions, while the inner list will contain the
# free indices (rearranged according to the algorithm).
resulting_indices = [[]]
# start to count the `connected_components`, which together with the number
# of contractions, determines a -1 or +1 factor to be multiplied.
connected_components = 1
# First loop: here we fill `cum_sign_list`, and draw the links
# among consecutive indices (they are stored in `links`). Links among
# non-consecutive indices will be drawn later.
for i, is_free in enumerate(index_is_free):
# if `expression` starts with free indices, they are ignored here;
# they are later added as they are to the beginning of all
# `resulting_indices` list of lists of indices.
if i < first_dum_pos:
continue
if is_free:
block_free_count += 1
# if previous index was free as well, draw an arch in `links`.
if block_free_count > 1:
links[i - 1].append(i)
links[i].append(i - 1)
else:
# Change the sign of the index (`cum_sign`) if the number of free
# indices preceding it is even.
cum_sign *= 1 if (block_free_count % 2) else -1
if block_free_count == 0 and i != first_dum_pos:
# check if there are two consecutive dummy indices:
# in this case create virtual indices with negative position,
# these "virtual" indices represent the insertion of two
# gamma^0 matrices to separate consecutive dummy indices, as
# Kahane's algorithm requires dummy indices to be separated by
# free indices. The product of two gamma^0 matrices is unity,
# so the new expression being examined is the same as the
# original one.
if cum_sign == -1:
links[-1-i] = [-1-i+1]
links[-1-i+1] = [-1-i]
if (i - cum_sign) in links:
if i != first_dum_pos:
links[i].append(i - cum_sign)
if block_free_count != 0:
if i - cum_sign < len(index_is_free):
if index_is_free[i - cum_sign]:
links[i - cum_sign].append(i)
block_free_count = 0
cum_sign_list[i] = cum_sign
# The previous loop has only created links between consecutive free indices,
# it is necessary to properly create links among dummy (contracted) indices,
# according to the rules described in Kahane's paper. There is only one exception
# to Kahane's rules: the negative indices, which handle the case of some
# consecutive free indices (Kahane's paper just describes dummy indices
# separated by free indices, hinting that free indices can be added without
# altering the expression result).
for i in dum:
# get the positions of the two contracted indices:
pos1 = i[0]
pos2 = i[1]
# create Kahane's upper links, i.e. the upper arcs between dummy
# (i.e. contracted) indices:
links[pos1].append(pos2)
links[pos2].append(pos1)
# create Kahane's lower links, this corresponds to the arcs below
# the line described in the paper:
# first we move `pos1` and `pos2` according to the sign of the indices:
linkpos1 = pos1 + cum_sign_list[pos1]
linkpos2 = pos2 + cum_sign_list[pos2]
# otherwise, perform some checks before creating the lower arcs:
# make sure we are not exceeding the total number of indices:
if linkpos1 >= total_number:
continue
if linkpos2 >= total_number:
continue
# make sure we are not below the first dummy index in `expression`:
if linkpos1 < first_dum_pos:
continue
if linkpos2 < first_dum_pos:
continue
# check if the previous loop created "virtual" indices between dummy
# indices, in such a case relink `linkpos1` and `linkpos2`:
if (-1-linkpos1) in links:
linkpos1 = -1-linkpos1
if (-1-linkpos2) in links:
linkpos2 = -1-linkpos2
# move only if not next to free index:
if linkpos1 >= 0 and not index_is_free[linkpos1]:
linkpos1 = pos1
if linkpos2 >=0 and not index_is_free[linkpos2]:
linkpos2 = pos2
# create the lower arcs:
if linkpos2 not in links[linkpos1]:
links[linkpos1].append(linkpos2)
if linkpos1 not in links[linkpos2]:
links[linkpos2].append(linkpos1)
# This loop starts from the `first_dum_pos` index (first dummy index)
# walks through the graph deleting the visited indices from `links`,
# it adds a gamma matrix for every free index in encounters, while it
# completely ignores dummy indices and virtual indices.
pointer = first_dum_pos
previous_pointer = 0
while True:
if pointer in links:
next_ones = links.pop(pointer)
else:
break
if previous_pointer in next_ones:
next_ones.remove(previous_pointer)
previous_pointer = pointer
if next_ones:
pointer = next_ones[0]
else:
break
if pointer == previous_pointer:
break
if pointer >=0 and free_pos[pointer] is not None:
for ri in resulting_indices:
ri.append(free_pos[pointer])
# The following loop removes the remaining connected components in `links`.
# If there are free indices inside a connected component, it gives a
# contribution to the resulting expression given by the factor
# `gamma_a gamma_b ... gamma_z + gamma_z ... gamma_b gamma_a`, in Kahanes's
# paper represented as {gamma_a, gamma_b, ... , gamma_z},
# virtual indices are ignored. The variable `connected_components` is
# increased by one for every connected component this loop encounters.
# If the connected component has virtual and dummy indices only
# (no free indices), it contributes to `resulting_indices` by a factor of two.
# The multiplication by two is a result of the
# factor {gamma^0, gamma^0} = 2 I, as it appears in Kahane's paper.
# Note: curly brackets are meant as in the paper, as a generalized
# multi-element anticommutator!
while links:
connected_components += 1
pointer = min(links.keys())
previous_pointer = pointer
# the inner loop erases the visited indices from `links`, and it adds
# all free indices to `prepend_indices` list, virtual indices are
# ignored.
prepend_indices = []
while True:
if pointer in links:
next_ones = links.pop(pointer)
else:
break
if previous_pointer in next_ones:
if len(next_ones) > 1:
next_ones.remove(previous_pointer)
previous_pointer = pointer
if next_ones:
pointer = next_ones[0]
if pointer >= first_dum_pos and free_pos[pointer] is not None:
prepend_indices.insert(0, free_pos[pointer])
# if `prepend_indices` is void, it means there are no free indices
# in the loop (and it can be shown that there must be a virtual index),
# loops of virtual indices only contribute by a factor of two:
if len(prepend_indices) == 0:
resulting_coeff *= 2
# otherwise, add the free indices in `prepend_indices` to
# the `resulting_indices`:
else:
expr1 = prepend_indices
expr2 = list(reversed(prepend_indices))
resulting_indices = [expri + ri for ri in resulting_indices for expri in (expr1, expr2)]
# sign correction, as described in Kahane's paper:
resulting_coeff *= -1 if (number_of_contractions - connected_components + 1) % 2 else 1
# power of two factor, as described in Kahane's paper:
resulting_coeff *= 2**(number_of_contractions)
# If `first_dum_pos` is not zero, it means that there are trailing free gamma
# matrices in front of `expression`, so multiply by them:
for i in range(0, first_dum_pos):
[ri.insert(0, free_pos[i]) for ri in resulting_indices]
resulting_expr = S.Zero
for i in resulting_indices:
temp_expr = S.One
for j in i:
temp_expr *= GammaMatrix(j)
resulting_expr += temp_expr
t = resulting_coeff * resulting_expr
t1 = None
if isinstance(t, TensAdd):
t1 = t.args[0]
elif isinstance(t, TensMul):
t1 = t
if t1:
pass
else:
t = eye(4)*t
return t
| venv/lib/python3.7/site-packages/sympy/physics/hep/gamma_matrices.py | 24,225 | Simplify single-line product of gamma matrices.
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, LorentzIndex, _simplify_single_line
>>> from sympy.tensor.tensor import tensor_indices, TensorHead
>>> p = TensorHead('p', [LorentzIndex])
>>> i0,i1 = tensor_indices('i0:2', LorentzIndex)
>>> _simplify_single_line(G(i0)*G(i1)*p(-i1)*G(-i0)) + 2*G(i0)*p(-i0)
0
Evaluate the trace of a single gamma matrix line inside a ``TensExpr``.
Notes
=====
If there are ``DiracSpinorIndex.auto_left`` and ``DiracSpinorIndex.auto_right``
indices trace over them; otherwise traces are not implied (explain)
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, LorentzIndex, _trace_single_line
>>> from sympy.tensor.tensor import tensor_indices, TensorHead
>>> p = TensorHead('p', [LorentzIndex])
>>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', LorentzIndex)
>>> _trace_single_line(G(i0)*G(i1))
4*metric(i0, i1)
>>> _trace_single_line(G(i0)*p(-i0)*G(i1)*p(-i1)) - 4*p(i0)*p(-i0)
0
Extract from a ``TensExpr`` all tensors with `component`.
Returns two tensor expressions:
* the first contains all ``Tensor`` of having `component`.
* the second contains all remaining.
trace of a single line of gamma matrices
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, gamma_trace, LorentzIndex
>>> from sympy.tensor.tensor import tensor_indices, tensor_heads
>>> p, q = tensor_heads('p, q', [LorentzIndex])
>>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', LorentzIndex)
>>> ps = p(i0)*G(-i0)
>>> qs = q(i0)*G(-i0)
>>> gamma_trace(G(i0)*G(i1))
4*metric(i0, i1)
>>> gamma_trace(ps*ps) - 4*p(i0)*p(-i0)
0
>>> gamma_trace(ps*qs + ps*ps) - 4*p(i0)*p(-i0) - 4*p(i0)*q(-i0)
0
This function cancels contracted elements in a product of four
dimensional gamma matrices, resulting in an expression equal to the given
one, without the contracted gamma matrices.
Parameters
==========
`expression` the tensor expression containing the gamma matrices to simplify.
Notes
=====
If spinor indices are given, the matrices must be given in
the order given in the product.
Algorithm
=========
The idea behind the algorithm is to use some well-known identities,
i.e., for contractions enclosing an even number of `\gamma` matrices
`\gamma^\mu \gamma_{a_1} \cdots \gamma_{a_{2N}} \gamma_\mu = 2 (\gamma_{a_{2N}} \gamma_{a_1} \cdots \gamma_{a_{2N-1}} + \gamma_{a_{2N-1}} \cdots \gamma_{a_1} \gamma_{a_{2N}} )`
for an odd number of `\gamma` matrices
`\gamma^\mu \gamma_{a_1} \cdots \gamma_{a_{2N+1}} \gamma_\mu = -2 \gamma_{a_{2N+1}} \gamma_{a_{2N}} \cdots \gamma_{a_{1}}`
Instead of repeatedly applying these identities to cancel out all contracted indices,
it is possible to recognize the links that would result from such an operation,
the problem is thus reduced to a simple rearrangement of free gamma matrices.
Examples
========
When using, always remember that the original expression coefficient
has to be handled separately
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, LorentzIndex
>>> from sympy.physics.hep.gamma_matrices import kahane_simplify
>>> from sympy.tensor.tensor import tensor_indices
>>> i0, i1, i2 = tensor_indices('i0:3', LorentzIndex)
>>> ta = G(i0)*G(-i0)
>>> kahane_simplify(ta)
Matrix([
[4, 0, 0, 0],
[0, 4, 0, 0],
[0, 0, 4, 0],
[0, 0, 0, 4]])
>>> tb = G(i0)*G(i1)*G(-i0)
>>> kahane_simplify(tb)
-2*GammaMatrix(i1)
>>> t = G(i0)*G(-i0)
>>> kahane_simplify(t)
Matrix([
[4, 0, 0, 0],
[0, 4, 0, 0],
[0, 0, 4, 0],
[0, 0, 0, 4]])
>>> t = G(i0)*G(-i0)
>>> kahane_simplify(t)
Matrix([
[4, 0, 0, 0],
[0, 4, 0, 0],
[0, 0, 4, 0],
[0, 0, 0, 4]])
If there are no contractions, the same expression is returned
>>> tc = G(i0)*G(i1)
>>> kahane_simplify(tc)
GammaMatrix(i0)*GammaMatrix(i1)
References
==========
[1] Algorithm for Reducing Contracted Products of gamma Matrices,
Joseph Kahane, Journal of Mathematical Physics, Vol. 9, No. 10, October 1968.
simplify products ``G(i)*p(-i)*G(j)*p(-j) -> p(i)*p(-i)``
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, LorentzIndex, simplify_gpgp
>>> from sympy.tensor.tensor import tensor_indices, tensor_heads
>>> p, q = tensor_heads('p, q', [LorentzIndex])
>>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', LorentzIndex)
>>> ps = p(i0)*G(-i0)
>>> qs = q(i0)*G(-i0)
>>> simplify_gpgp(ps*qs*qs)
GammaMatrix(-L_0)*p(L_0)*q(L_1)*q(-L_1)
Module to handle gamma matrices expressed as tensor objects.
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, LorentzIndex
>>> from sympy.tensor.tensor import tensor_indices
>>> i = tensor_indices('i', LorentzIndex)
>>> G(i)
GammaMatrix(i)
Note that there is already an instance of GammaMatrixHead in four dimensions:
GammaMatrix, which is simply declare as
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix
>>> from sympy.tensor.tensor import tensor_indices
>>> i = tensor_indices('i', LorentzIndex)
>>> GammaMatrix(i)
GammaMatrix(i)
To access the metric tensor
>>> LorentzIndex.metric
metric(LorentzIndex,LorentzIndex)
DiracSpinorIndex = TensorIndexType('DiracSpinorIndex', dim=4, dummy_fmt="S") Collect all gamma matrices of the same dimension eye(4) t = t.replace(lambda x: x.is_Matrix, lambda x: 1) this would be better off with pattern matching gamma matirices are in a[i:j] find the open matrix indices and connect them: FIXME specific for d=4return TensMul.from_data(S.Zero, [], [], []) spinor_free = [_ for _ in expression.free_in_args if _[1] != 0] if len(spinor_free) == 2: spinor_free.sort(key=lambda x: x[2]) assert spinor_free[0][1] == 1 and spinor_free[-1][1] == 2 assert spinor_free[0][2] == 0 elif spinor_free: raise ValueError('spinor indices do not match') or GammaMatrixHead: no contractions in `expression`, just return it. find the `first_dum_pos`, i.e. the position of the first contracted gamma matrix, Kahane's algorithm as described in his paper requires the gamma matrix expression to start with a contracted gamma matrix, this is a workaround which ignores possible initial free indices, and re-adds them later. for p1, p2, a1, a2 in expression.dum_in_args: if p1 != 0 or p2 != 0: only Lorentz indices, skip Dirac indices: continue first_dum_pos = min(p1, p2) break `index_is_free` is a list of booleans, to identify index position and whether that index is free or dummy. `links` is a dictionary containing the graph described in Kahane's paper, to every key correspond one or two values, representing the linked indices. All values in `links` are integers, negative numbers are used in the case where it is necessary to insert gamma matrices between free indices, in order to make Kahane's algorithm work (see paper). `cum_sign` is a step variable to mark the sign of every index, see paper. `cum_sign_list` keeps storage for all `cum_sign` (every index). multiply `resulting_coeff` by the coefficient parameter, the rest of the algorithm ignores a scalar coefficient. initialize a list of lists of indices. The outer list will contain all additive tensor expressions, while the inner list will contain the free indices (rearranged according to the algorithm). start to count the `connected_components`, which together with the number of contractions, determines a -1 or +1 factor to be multiplied. First loop: here we fill `cum_sign_list`, and draw the links among consecutive indices (they are stored in `links`). Links among non-consecutive indices will be drawn later. if `expression` starts with free indices, they are ignored here; they are later added as they are to the beginning of all `resulting_indices` list of lists of indices. if previous index was free as well, draw an arch in `links`. Change the sign of the index (`cum_sign`) if the number of free indices preceding it is even. check if there are two consecutive dummy indices: in this case create virtual indices with negative position, these "virtual" indices represent the insertion of two gamma^0 matrices to separate consecutive dummy indices, as Kahane's algorithm requires dummy indices to be separated by free indices. The product of two gamma^0 matrices is unity, so the new expression being examined is the same as the original one. The previous loop has only created links between consecutive free indices, it is necessary to properly create links among dummy (contracted) indices, according to the rules described in Kahane's paper. There is only one exception to Kahane's rules: the negative indices, which handle the case of some consecutive free indices (Kahane's paper just describes dummy indices separated by free indices, hinting that free indices can be added without altering the expression result). get the positions of the two contracted indices: create Kahane's upper links, i.e. the upper arcs between dummy (i.e. contracted) indices: create Kahane's lower links, this corresponds to the arcs below the line described in the paper: first we move `pos1` and `pos2` according to the sign of the indices: otherwise, perform some checks before creating the lower arcs: make sure we are not exceeding the total number of indices: make sure we are not below the first dummy index in `expression`: check if the previous loop created "virtual" indices between dummy indices, in such a case relink `linkpos1` and `linkpos2`: move only if not next to free index: create the lower arcs: This loop starts from the `first_dum_pos` index (first dummy index) walks through the graph deleting the visited indices from `links`, it adds a gamma matrix for every free index in encounters, while it completely ignores dummy indices and virtual indices. The following loop removes the remaining connected components in `links`. If there are free indices inside a connected component, it gives a contribution to the resulting expression given by the factor `gamma_a gamma_b ... gamma_z + gamma_z ... gamma_b gamma_a`, in Kahanes's paper represented as {gamma_a, gamma_b, ... , gamma_z}, virtual indices are ignored. The variable `connected_components` is increased by one for every connected component this loop encounters. If the connected component has virtual and dummy indices only (no free indices), it contributes to `resulting_indices` by a factor of two. The multiplication by two is a result of the factor {gamma^0, gamma^0} = 2 I, as it appears in Kahane's paper. Note: curly brackets are meant as in the paper, as a generalized multi-element anticommutator! the inner loop erases the visited indices from `links`, and it adds all free indices to `prepend_indices` list, virtual indices are ignored. if `prepend_indices` is void, it means there are no free indices in the loop (and it can be shown that there must be a virtual index), loops of virtual indices only contribute by a factor of two: otherwise, add the free indices in `prepend_indices` to the `resulting_indices`: sign correction, as described in Kahane's paper: power of two factor, as described in Kahane's paper: If `first_dum_pos` is not zero, it means that there are trailing free gamma matrices in front of `expression`, so multiply by them: | 11,182 | en | 0.778061 |
"""
Test Contacts API Endpoint | Cannlytics API
Author: Keegan Skeate
Contact: <keegan@cannlytics.com>
Created: 7/19/2021
Updated: 7/19/2021
License: MIT License <https://opensource.org/licenses/MIT>
"""
import os
import requests
from dotenv import load_dotenv
# Test using development server.
BASE = 'http://127.0.0.1:8000/api'
# Uncomment to test with production server.
# BASE = 'https://console.cannlytics.com/api'
# Load your API key.
load_dotenv('../../.env')
API_KEY = os.getenv('CANNLYTICS_API_KEY')
# Pass your API key through the authorization header as a bearer token.
HEADERS = {
'Authorization': 'Bearer %s' % API_KEY,
'Content-type': 'application/json',
}
# Identify the organization that you are working with.
ORG_ID = 'test-company'
# Define the endpoint.
ENDPOINT = 'contacts'
#------------------------------------------------------------------------------
# Create a contact.
#------------------------------------------------------------------------------
data = {
'address': '',
'city': '',
'contact_id': 'TEST',
'county': '',
'email': '',
'latitude': '',
'longitude': '',
'organization': 'Cannlytics Test Contact',
'phone': '',
'state': '',
'street': '',
'website': '',
'zip_code': ''
}
url = f'{BASE}/{ENDPOINT}?organization_id={ORG_ID}'
response = requests.post(url, json=data, headers=HEADERS)
assert response.status_code == 200
print('Created:', response.json()['data'])
#------------------------------------------------------------------------------
# Get contacts.
#------------------------------------------------------------------------------
organization_id = 'test-company'
url = f'{BASE}/{ENDPOINT}?organization_id={ORG_ID}'
response = requests.get(url, headers=HEADERS)
assert response.status_code == 200
data = response.json()['data']
print('Found:', len(data))
#------------------------------------------------------------------------------
# Update a contact.
#------------------------------------------------------------------------------
data = {
'contact_id': 'TEST',
'city': 'Tulsa',
'state': 'OK',
}
url = f'{BASE}/{ENDPOINT}?organization_id={ORG_ID}'
response = requests.post(url, json=data, headers=HEADERS)
assert response.status_code == 200
print('Updated:', response.json()['data'])
#------------------------------------------------------------------------------
# Delete a contact.
#------------------------------------------------------------------------------
data = {
'contact_id': 'TEST',
}
url = f'{BASE}/{ENDPOINT}?organization_id={ORG_ID}'
response = requests.delete(url, json=data, headers=HEADERS)
assert response.status_code == 200
print('Deleted:', response.json()['data'])
| tests/api/test_contacts_endpoint.py | 2,718 | Test Contacts API Endpoint | Cannlytics API
Author: Keegan Skeate
Contact: <keegan@cannlytics.com>
Created: 7/19/2021
Updated: 7/19/2021
License: MIT License <https://opensource.org/licenses/MIT>
Test using development server. Uncomment to test with production server. BASE = 'https://console.cannlytics.com/api' Load your API key. Pass your API key through the authorization header as a bearer token. Identify the organization that you are working with. Define the endpoint.------------------------------------------------------------------------------ Create a contact.------------------------------------------------------------------------------------------------------------------------------------------------------------ Get contacts.------------------------------------------------------------------------------------------------------------------------------------------------------------ Update a contact.------------------------------------------------------------------------------------------------------------------------------------------------------------ Delete a contact.------------------------------------------------------------------------------ | 1,170 | en | 0.412272 |
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pysnmp/license.html
#
# ASN.1 source http://mibs.snmplabs.com:80/asn1/SNMPv2-TM
# Produced by pysmi-0.4.0 at Sun Feb 17 08:56:38 2019
#
# Parts of otherwise autogenerated MIB has been updated manually.
#
try:
from socket import inet_ntop, inet_pton, AF_INET
except ImportError:
from socket import inet_ntoa, inet_aton, AF_INET
inet_ntop = lambda x, y: inet_ntoa(y)
inet_pton = lambda x, y: inet_aton(y)
from pyasn1.compat.octets import int2oct
from pyasn1.compat.octets import oct2int
if 'mibBuilder' not in globals():
import sys
sys.stderr.write(__doc__)
sys.exit(1)
(Integer,
OctetString,
ObjectIdentifier) = mibBuilder.importSymbols(
"ASN1",
"Integer",
"OctetString",
"ObjectIdentifier")
(NamedValues,) = mibBuilder.importSymbols(
"ASN1-ENUMERATION",
"NamedValues")
(ConstraintsIntersection,
SingleValueConstraint,
ValueRangeConstraint,
ValueSizeConstraint,
ConstraintsUnion) = mibBuilder.importSymbols(
"ASN1-REFINEMENT",
"ConstraintsIntersection",
"SingleValueConstraint",
"ValueRangeConstraint",
"ValueSizeConstraint",
"ConstraintsUnion")
(Counter64,
iso,
NotificationType,
ObjectIdentity,
Bits,
ModuleIdentity,
TimeTicks,
Counter32,
IpAddress,
snmpProxys,
MibScalar,
MibTable,
MibTableRow,
MibTableColumn,
Gauge32,
Unsigned32,
snmpDomains,
Integer32,
MibIdentifier,
snmpModules) = mibBuilder.importSymbols(
"SNMPv2-SMI",
"Counter64",
"iso",
"NotificationType",
"ObjectIdentity",
"Bits",
"ModuleIdentity",
"TimeTicks",
"Counter32",
"IpAddress",
"snmpProxys",
"MibScalar",
"MibTable",
"MibTableRow",
"MibTableColumn",
"Gauge32",
"Unsigned32",
"snmpDomains",
"Integer32",
"MibIdentifier",
"snmpModules")
(TextualConvention,) = mibBuilder.importSymbols(
"SNMPv2-TC",
"TextualConvention")
snmpv2tm = ModuleIdentity(
(1, 3, 6, 1, 6, 3, 19)
)
snmpv2tm.setRevisions(
("2002-10-16 00:00",
"1996-01-01 00:00",
"1993-04-01 00:00")
)
snmpv2tm.setLastUpdated("200210160000Z")
if mibBuilder.loadTexts:
snmpv2tm.setOrganization("""\
IETF SNMPv3 Working Group
""")
snmpv2tm.setContactInfo("""\
WG-EMail: snmpv3@lists.tislabs.com Subscribe: snmpv3-request@lists.tislabs.com
Co-Chair: Russ Mundy Network Associates Laboratories postal: 15204 Omega Drive,
Suite 300 Rockville, MD 20850-4601 USA EMail: mundy@tislabs.com phone: +1 301
947-7107 Co-Chair: David Harrington Enterasys Networks postal: 35 Industrial
Way P. O. Box 5005 Rochester, NH 03866-5005 USA EMail: dbh@enterasys.com phone:
+1 603 337-2614 Editor: Randy Presuhn BMC Software, Inc. postal: 2141 North
First Street San Jose, CA 95131 USA EMail: randy_presuhn@bmc.com phone: +1 408
546-1006
""")
if mibBuilder.loadTexts:
snmpv2tm.setDescription("""\
The MIB module for SNMP transport mappings. Copyright (C) The Internet Society
(2002). This version of this MIB module is part of RFC 3417; see the RFC itself
for full legal notices.
""")
class SnmpUDPAddress(TextualConvention, OctetString):
status = "current"
displayHint = "1d.1d.1d.1d/2d"
subtypeSpec = OctetString.subtypeSpec
subtypeSpec += ConstraintsUnion(
ValueSizeConstraint(6, 6),
)
if mibBuilder.loadTexts:
description = """\
Represents a UDP over IPv4 address: octets contents encoding 1-4 IP-address
network-byte order 5-6 UDP-port network-byte order
"""
fixedLength = 6
def prettyIn(self, value):
if isinstance(value, tuple):
# Wild hack -- need to implement TextualConvention.prettyIn
value = inet_pton(AF_INET, value[0]) + int2oct((value[1] >> 8) & 0xff) + int2oct(value[1] & 0xff)
return OctetString.prettyIn(self, value)
# Socket address syntax coercion
def __asSocketAddress(self):
if not hasattr(self, '__tuple_value'):
v = self.asOctets()
self.__tuple_value = (
inet_ntop(AF_INET, v[:4]),
oct2int(v[4]) << 8 | oct2int(v[5])
)
return self.__tuple_value
def __iter__(self):
return iter(self.__asSocketAddress())
def __getitem__(self, item):
return self.__asSocketAddress()[item]
class SnmpOSIAddress(TextualConvention, OctetString):
status = "current"
displayHint = "*1x:/1x:"
subtypeSpec = OctetString.subtypeSpec
subtypeSpec += ConstraintsUnion(
ValueSizeConstraint(1, 1),
ValueSizeConstraint(4, 85),
)
if mibBuilder.loadTexts:
description = """\
Represents an OSI transport-address: octets contents encoding 1 length of NSAP
'n' as an unsigned-integer (either 0 or from 3 to 20) 2..(n+1) NSAP concrete
binary representation (n+2)..m TSEL string of (up to 64) octets
"""
class SnmpNBPAddress(TextualConvention, OctetString):
status = "current"
subtypeSpec = OctetString.subtypeSpec
subtypeSpec += ConstraintsUnion(
ValueSizeConstraint(3, 99),
)
if mibBuilder.loadTexts:
description = """\
Represents an NBP name: octets contents encoding 1 length of object 'n' as an
unsigned integer 2..(n+1) object string of (up to 32) octets n+2 length of type
'p' as an unsigned integer (n+3)..(n+2+p) type string of (up to 32) octets
n+3+p length of zone 'q' as an unsigned integer (n+4+p)..(n+3+p+q) zone string
of (up to 32) octets For comparison purposes, strings are case-insensitive. All
strings may contain any octet other than 255 (hex ff).
"""
class SnmpIPXAddress(TextualConvention, OctetString):
status = "current"
displayHint = "4x.1x:1x:1x:1x:1x:1x.2d"
subtypeSpec = OctetString.subtypeSpec
subtypeSpec += ConstraintsUnion(
ValueSizeConstraint(12, 12),
)
if mibBuilder.loadTexts:
description = """\
Represents an IPX address: octets contents encoding 1-4 network-number network-
byte order 5-10 physical-address network-byte order 11-12 socket-number
network-byte order
"""
fixedLength = 12
_SnmpUDPDomain_ObjectIdentity = ObjectIdentity
snmpUDPDomain = _SnmpUDPDomain_ObjectIdentity(
(1, 3, 6, 1, 6, 1, 1)
)
if mibBuilder.loadTexts:
snmpUDPDomain.setStatus("current")
if mibBuilder.loadTexts:
snmpUDPDomain.setDescription("""\
The SNMP over UDP over IPv4 transport domain. The corresponding transport
address is of type SnmpUDPAddress.
""")
_SnmpCLNSDomain_ObjectIdentity = ObjectIdentity
snmpCLNSDomain = _SnmpCLNSDomain_ObjectIdentity(
(1, 3, 6, 1, 6, 1, 2)
)
if mibBuilder.loadTexts:
snmpCLNSDomain.setStatus("current")
if mibBuilder.loadTexts:
snmpCLNSDomain.setDescription("""\
The SNMP over CLNS transport domain. The corresponding transport address is of
type SnmpOSIAddress.
""")
_SnmpCONSDomain_ObjectIdentity = ObjectIdentity
snmpCONSDomain = _SnmpCONSDomain_ObjectIdentity(
(1, 3, 6, 1, 6, 1, 3)
)
if mibBuilder.loadTexts:
snmpCONSDomain.setStatus("current")
if mibBuilder.loadTexts:
snmpCONSDomain.setDescription("""\
The SNMP over CONS transport domain. The corresponding transport address is of
type SnmpOSIAddress.
""")
_SnmpDDPDomain_ObjectIdentity = ObjectIdentity
snmpDDPDomain = _SnmpDDPDomain_ObjectIdentity(
(1, 3, 6, 1, 6, 1, 4)
)
if mibBuilder.loadTexts:
snmpDDPDomain.setStatus("current")
if mibBuilder.loadTexts:
snmpDDPDomain.setDescription("""\
The SNMP over DDP transport domain. The corresponding transport address is of
type SnmpNBPAddress.
""")
_SnmpIPXDomain_ObjectIdentity = ObjectIdentity
snmpIPXDomain = _SnmpIPXDomain_ObjectIdentity(
(1, 3, 6, 1, 6, 1, 5)
)
if mibBuilder.loadTexts:
snmpIPXDomain.setStatus("current")
if mibBuilder.loadTexts:
snmpIPXDomain.setDescription("""\
The SNMP over IPX transport domain. The corresponding transport address is of
type SnmpIPXAddress.
""")
_Rfc1157Proxy_ObjectIdentity = ObjectIdentity
rfc1157Proxy = _Rfc1157Proxy_ObjectIdentity(
(1, 3, 6, 1, 6, 2, 1)
)
_Rfc1157Domain_ObjectIdentity = ObjectIdentity
rfc1157Domain = _Rfc1157Domain_ObjectIdentity(
(1, 3, 6, 1, 6, 2, 1, 1)
)
if mibBuilder.loadTexts:
rfc1157Domain.setStatus("deprecated")
if mibBuilder.loadTexts:
rfc1157Domain.setDescription("""\
The transport domain for SNMPv1 over UDP over IPv4. The corresponding transport
address is of type SnmpUDPAddress.
""")
mibBuilder.exportSymbols(
"SNMPv2-TM",
**{"SnmpUDPAddress": SnmpUDPAddress,
"SnmpOSIAddress": SnmpOSIAddress,
"SnmpNBPAddress": SnmpNBPAddress,
"SnmpIPXAddress": SnmpIPXAddress,
"snmpUDPDomain": snmpUDPDomain,
"snmpCLNSDomain": snmpCLNSDomain,
"snmpCONSDomain": snmpCONSDomain,
"snmpDDPDomain": snmpDDPDomain,
"snmpIPXDomain": snmpIPXDomain,
"rfc1157Proxy": rfc1157Proxy,
"rfc1157Domain": rfc1157Domain,
"snmpv2tm": snmpv2tm}
)
| pysnmp/smi/mibs/SNMPv2-TM.py | 8,937 | This file is part of pysnmp software. Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com> License: http://snmplabs.com/pysnmp/license.html ASN.1 source http://mibs.snmplabs.com:80/asn1/SNMPv2-TM Produced by pysmi-0.4.0 at Sun Feb 17 08:56:38 2019 Parts of otherwise autogenerated MIB has been updated manually. Wild hack -- need to implement TextualConvention.prettyIn Socket address syntax coercion | 405 | en | 0.697784 |
# coding=utf-8
"""Tests for certbot._internal.main."""
# pylint: disable=too-many-lines
import datetime
from importlib import reload as reload_module
import io
import itertools
import json
import shutil
import sys
import tempfile
import traceback
import unittest
from typing import List
import josepy as jose
import pytz
from certbot import crypto_util
from certbot import errors
from certbot import interfaces # pylint: disable=unused-import
from certbot import util
from certbot._internal import account
from certbot._internal import cli
from certbot._internal import configuration
from certbot._internal import constants
from certbot._internal import main
from certbot._internal import updater
from certbot._internal.plugins import disco
from certbot._internal.plugins import manual
from certbot._internal.plugins import null
from certbot.compat import filesystem
from certbot.compat import os
from certbot.plugins import enhancements
import certbot.tests.util as test_util
try:
import mock
except ImportError: # pragma: no cover
from unittest import mock
CERT_PATH = test_util.vector_path('cert_512.pem')
CERT = test_util.vector_path('cert_512.pem')
CSR = test_util.vector_path('csr_512.der')
KEY = test_util.vector_path('rsa256_key.pem')
JWK = jose.JWKRSA.load(test_util.load_vector('rsa512_key.pem'))
RSA2048_KEY_PATH = test_util.vector_path('rsa2048_key.pem')
SS_CERT_PATH = test_util.vector_path('cert_2048.pem')
class TestHandleCerts(unittest.TestCase):
"""Test for certbot._internal.main._handle_* methods"""
@mock.patch("certbot._internal.main._handle_unexpected_key_type_migration")
def test_handle_identical_cert_request_pending(self, mock_handle_migration):
mock_lineage = mock.Mock()
mock_lineage.ensure_deployed.return_value = False
# pylint: disable=protected-access
ret = main._handle_identical_cert_request(mock.Mock(), mock_lineage)
self.assertEqual(ret, ("reinstall", mock_lineage))
self.assertTrue(mock_handle_migration.called)
@mock.patch("certbot._internal.main._handle_unexpected_key_type_migration")
def test_handle_subset_cert_request(self, mock_handle_migration):
mock_config = mock.Mock()
mock_config.expand = True
mock_lineage = mock.Mock()
mock_lineage.names.return_value = ["dummy1", "dummy2"]
ret = main._handle_subset_cert_request(mock_config, ["dummy1"], mock_lineage)
self.assertEqual(ret, ("renew", mock_lineage))
self.assertTrue(mock_handle_migration.called)
@mock.patch("certbot._internal.main.cli.set_by_cli")
def test_handle_unexpected_key_type_migration(self, mock_set):
config = mock.Mock()
config.key_type = "rsa"
cert = mock.Mock()
cert.private_key_type = "ecdsa"
mock_set.return_value = True
main._handle_unexpected_key_type_migration(config, cert)
mock_set.return_value = False
with self.assertRaises(errors.Error) as raised:
main._handle_unexpected_key_type_migration(config, cert)
self.assertTrue("Please provide both --cert-name and --key-type" in str(raised.exception))
mock_set.side_effect = lambda var: var != "certname"
with self.assertRaises(errors.Error) as raised:
main._handle_unexpected_key_type_migration(config, cert)
self.assertTrue("Please provide both --cert-name and --key-type" in str(raised.exception))
mock_set.side_effect = lambda var: var != "key_type"
with self.assertRaises(errors.Error) as raised:
main._handle_unexpected_key_type_migration(config, cert)
self.assertTrue("Please provide both --cert-name and --key-type" in str(raised.exception))
class RunTest(test_util.ConfigTestCase):
"""Tests for certbot._internal.main.run."""
def setUp(self):
super().setUp()
self.domain = 'example.org'
patches = [
mock.patch('certbot._internal.main._get_and_save_cert'),
mock.patch('certbot._internal.main.display_ops.success_installation'),
mock.patch('certbot._internal.main.display_ops.success_renewal'),
mock.patch('certbot._internal.main._init_le_client'),
mock.patch('certbot._internal.main._suggest_donation_if_appropriate'),
mock.patch('certbot._internal.main._report_new_cert'),
mock.patch('certbot._internal.main._find_cert'),
mock.patch('certbot._internal.eff.handle_subscription'),
]
self.mock_auth = patches[0].start()
self.mock_success_installation = patches[1].start()
self.mock_success_renewal = patches[2].start()
self.mock_init = patches[3].start()
self.mock_suggest_donation = patches[4].start()
self.mock_report_cert = patches[5].start()
self.mock_find_cert = patches[6].start()
self.mock_subscription = patches[7].start()
for patch in patches:
self.addCleanup(patch.stop)
def _call(self):
args = '-a webroot -i null -d {0}'.format(self.domain).split()
plugins = disco.PluginsRegistry.find_all()
config = configuration.NamespaceConfig(
cli.prepare_and_parse_args(plugins, args))
from certbot._internal.main import run
run(config, plugins)
def test_newcert_success(self):
self.mock_auth.return_value = mock.Mock()
self.mock_find_cert.return_value = True, None
self._call()
self.mock_success_installation.assert_called_once_with([self.domain])
def test_reinstall_success(self):
self.mock_auth.return_value = mock.Mock()
self.mock_find_cert.return_value = False, mock.Mock()
self._call()
self.mock_success_installation.assert_called_once_with([self.domain])
def test_renewal_success(self):
self.mock_auth.return_value = mock.Mock()
self.mock_find_cert.return_value = True, mock.Mock()
self._call()
self.mock_success_renewal.assert_called_once_with([self.domain])
@mock.patch('certbot._internal.main.plug_sel.choose_configurator_plugins')
def test_run_enhancement_not_supported(self, mock_choose):
mock_choose.return_value = (null.Installer(self.config, "null"), None)
plugins = disco.PluginsRegistry.find_all()
self.config.auto_hsts = True
self.assertRaises(errors.NotSupportedError,
main.run,
self.config, plugins)
class CertonlyTest(unittest.TestCase):
"""Tests for certbot._internal.main.certonly."""
def setUp(self):
self.get_utility_patch = test_util.patch_get_utility()
self.mock_get_utility = self.get_utility_patch.start()
def tearDown(self):
self.get_utility_patch.stop()
def _call(self, args):
plugins = disco.PluginsRegistry.find_all()
config = configuration.NamespaceConfig(
cli.prepare_and_parse_args(plugins, args))
with mock.patch('certbot._internal.main._init_le_client') as mock_init:
with mock.patch('certbot._internal.main._suggest_donation_if_appropriate'):
with mock.patch('certbot._internal.eff.handle_subscription'):
main.certonly(config, plugins)
return mock_init() # returns the client
@mock.patch('certbot._internal.main._find_cert')
@mock.patch('certbot._internal.main._get_and_save_cert')
@mock.patch('certbot._internal.main._report_new_cert')
def test_no_reinstall_text_pause(self, unused_report, mock_auth,
mock_find_cert):
mock_notification = self.mock_get_utility().notification
mock_notification.side_effect = self._assert_no_pause
mock_auth.return_value = mock.Mock()
mock_find_cert.return_value = False, None
self._call('certonly --webroot -d example.com'.split())
def _assert_no_pause(self, message, pause=True): # pylint: disable=unused-argument
self.assertFalse(pause)
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.cert_manager.domains_for_certname')
@mock.patch('certbot._internal.renewal.renew_cert')
@mock.patch('certbot._internal.main._handle_unexpected_key_type_migration')
@mock.patch('certbot._internal.main._report_new_cert')
def test_find_lineage_for_domains_and_certname(self, mock_report_cert,
mock_handle_type, mock_renew_cert, mock_domains, mock_lineage):
domains = ['example.com', 'test.org']
mock_domains.return_value = domains
mock_lineage.names.return_value = domains
self._call(('certonly --webroot -d example.com -d test.org '
'--cert-name example.com').split())
self.assertEqual(mock_lineage.call_count, 1)
self.assertEqual(mock_domains.call_count, 1)
self.assertEqual(mock_renew_cert.call_count, 1)
self.assertEqual(mock_report_cert.call_count, 1)
self.assertEqual(mock_handle_type.call_count, 1)
# user confirms updating lineage with new domains
self._call(('certonly --webroot -d example.com -d test.com '
'--cert-name example.com').split())
self.assertEqual(mock_lineage.call_count, 2)
self.assertEqual(mock_domains.call_count, 2)
self.assertEqual(mock_renew_cert.call_count, 2)
self.assertEqual(mock_report_cert.call_count, 2)
self.assertEqual(mock_handle_type.call_count, 2)
# error in _ask_user_to_confirm_new_names
self.mock_get_utility().yesno.return_value = False
self.assertRaises(errors.ConfigurationError, self._call,
'certonly --webroot -d example.com -d test.com --cert-name example.com'.split())
@mock.patch('certbot._internal.cert_manager.domains_for_certname')
@mock.patch('certbot.display.ops.choose_names')
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.main._report_new_cert')
def test_find_lineage_for_domains_new_certname(self, mock_report_cert,
mock_lineage, mock_choose_names, mock_domains_for_certname):
mock_lineage.return_value = None
# no lineage with this name but we specified domains so create a new cert
self._call(('certonly --webroot -d example.com -d test.com '
'--cert-name example.com').split())
self.assertEqual(mock_lineage.call_count, 1)
self.assertEqual(mock_report_cert.call_count, 1)
# no lineage with this name and we didn't give domains
mock_choose_names.return_value = ["somename"]
mock_domains_for_certname.return_value = None
self._call(('certonly --webroot --cert-name example.com').split())
self.assertIs(mock_choose_names.called, True)
class FindDomainsOrCertnameTest(unittest.TestCase):
"""Tests for certbot._internal.main._find_domains_or_certname."""
@mock.patch('certbot.display.ops.choose_names')
def test_display_ops(self, mock_choose_names):
mock_config = mock.Mock(domains=None, certname=None)
mock_choose_names.return_value = "domainname"
# pylint: disable=protected-access
self.assertEqual(main._find_domains_or_certname(mock_config, None),
("domainname", None))
@mock.patch('certbot.display.ops.choose_names')
def test_no_results(self, mock_choose_names):
mock_config = mock.Mock(domains=None, certname=None)
mock_choose_names.return_value = []
# pylint: disable=protected-access
self.assertRaises(errors.Error, main._find_domains_or_certname, mock_config, None)
@mock.patch('certbot._internal.cert_manager.domains_for_certname')
def test_grab_domains(self, mock_domains):
mock_config = mock.Mock(domains=None, certname="one.com")
mock_domains.return_value = ["one.com", "two.com"]
# pylint: disable=protected-access
self.assertEqual(main._find_domains_or_certname(mock_config, None),
(["one.com", "two.com"], "one.com"))
class RevokeTest(test_util.TempDirTestCase):
"""Tests for certbot._internal.main.revoke."""
def setUp(self):
super().setUp()
shutil.copy(CERT_PATH, self.tempdir)
self.tmp_cert_path = os.path.abspath(os.path.join(self.tempdir, 'cert_512.pem'))
patches = [
mock.patch('acme.client.BackwardsCompatibleClientV2'),
mock.patch('certbot._internal.client.Client'),
mock.patch('certbot._internal.main._determine_account'),
mock.patch('certbot._internal.main.display_ops.success_revocation')
]
self.mock_acme_client = patches[0].start()
patches[1].start()
self.mock_determine_account = patches[2].start()
self.mock_success_revoke = patches[3].start()
for patch in patches:
self.addCleanup(patch.stop)
from certbot._internal.account import Account
self.regr = mock.MagicMock()
self.meta = Account.Meta(
creation_host="test.certbot.org",
creation_dt=datetime.datetime(
2015, 7, 4, 14, 4, 10, tzinfo=pytz.UTC))
self.acc = Account(self.regr, JWK, self.meta)
self.mock_determine_account.return_value = (self.acc, None)
def _call(self, args=None):
if not args:
args = 'revoke --cert-path={0} '
args = args.format(self.tmp_cert_path).split()
cli.set_by_cli.detector = None # required to reset set_by_cli state
plugins = disco.PluginsRegistry.find_all()
config = configuration.NamespaceConfig(
cli.prepare_and_parse_args(plugins, args))
from certbot._internal.main import revoke
revoke(config, plugins)
@mock.patch('certbot._internal.main._delete_if_appropriate')
@mock.patch('certbot._internal.main.client.acme_client')
def test_revoke_with_reason(self, mock_acme_client,
mock_delete_if_appropriate):
mock_delete_if_appropriate.return_value = False
mock_revoke = mock_acme_client.BackwardsCompatibleClientV2().revoke
expected = []
for reason, code in constants.REVOCATION_REASONS.items():
args = 'revoke --cert-path={0} --reason {1}'.format(self.tmp_cert_path, reason).split()
self._call(args)
expected.append(mock.call(mock.ANY, code))
args = 'revoke --cert-path={0} --reason {1}'.format(self.tmp_cert_path,
reason.upper()).split()
self._call(args)
expected.append(mock.call(mock.ANY, code))
self.assertEqual(expected, mock_revoke.call_args_list)
@mock.patch('certbot._internal.main._delete_if_appropriate')
@mock.patch('certbot._internal.storage.RenewableCert')
@mock.patch('certbot._internal.storage.renewal_file_for_certname')
def test_revoke_by_certname(self, unused_mock_renewal_file_for_certname,
mock_cert, mock_delete_if_appropriate):
mock_cert.return_value = mock.MagicMock(cert_path=self.tmp_cert_path,
server="https://acme.example")
args = 'revoke --cert-name=example.com'.split()
mock_delete_if_appropriate.return_value = False
self._call(args)
self.mock_acme_client.assert_called_once_with(mock.ANY, mock.ANY, 'https://acme.example')
self.mock_success_revoke.assert_called_once_with(self.tmp_cert_path)
@mock.patch('certbot._internal.main._delete_if_appropriate')
@mock.patch('certbot._internal.storage.RenewableCert')
@mock.patch('certbot._internal.storage.renewal_file_for_certname')
def test_revoke_by_certname_and_server(self, unused_mock_renewal_file_for_certname,
mock_cert, mock_delete_if_appropriate):
"""Revoking with --server should use the server from the CLI"""
mock_cert.return_value = mock.MagicMock(cert_path=self.tmp_cert_path,
server="https://acme.example")
args = 'revoke --cert-name=example.com --server https://other.example'.split()
mock_delete_if_appropriate.return_value = False
self._call(args)
self.mock_acme_client.assert_called_once_with(mock.ANY, mock.ANY, 'https://other.example')
self.mock_success_revoke.assert_called_once_with(self.tmp_cert_path)
@mock.patch('certbot._internal.main._delete_if_appropriate')
@mock.patch('certbot._internal.storage.RenewableCert')
@mock.patch('certbot._internal.storage.renewal_file_for_certname')
def test_revoke_by_certname_empty_server(self, unused_mock_renewal_file_for_certname,
mock_cert, mock_delete_if_appropriate):
"""Revoking with --cert-name where the lineage server is empty shouldn't crash """
mock_cert.return_value = mock.MagicMock(cert_path=self.tmp_cert_path, server=None)
args = 'revoke --cert-name=example.com'.split()
mock_delete_if_appropriate.return_value = False
self._call(args)
self.mock_acme_client.assert_called_once_with(
mock.ANY, mock.ANY, constants.CLI_DEFAULTS['server'])
self.mock_success_revoke.assert_called_once_with(self.tmp_cert_path)
@mock.patch('certbot._internal.main._delete_if_appropriate')
def test_revocation_success(self, mock_delete_if_appropriate):
self._call()
mock_delete_if_appropriate.return_value = False
self.mock_success_revoke.assert_called_once_with(self.tmp_cert_path)
def test_revocation_error(self):
from acme import errors as acme_errors
self.mock_acme_client.side_effect = acme_errors.ClientError()
self.assertRaises(acme_errors.ClientError, self._call)
self.mock_success_revoke.assert_not_called()
@mock.patch('certbot._internal.main._delete_if_appropriate')
@mock.patch('certbot._internal.cert_manager.delete')
@test_util.patch_get_utility()
def test_revocation_with_prompt(self, mock_get_utility,
mock_delete, mock_delete_if_appropriate):
mock_get_utility().yesno.return_value = False
mock_delete_if_appropriate.return_value = False
self._call()
self.assertFalse(mock_delete.called)
class DeleteIfAppropriateTest(test_util.ConfigTestCase):
"""Tests for certbot._internal.main._delete_if_appropriate """
def _call(self, mock_config):
from certbot._internal.main import _delete_if_appropriate
_delete_if_appropriate(mock_config)
def _test_delete_opt_out_common(self):
with mock.patch('certbot._internal.cert_manager.delete') as mock_delete:
self._call(self.config)
mock_delete.assert_not_called()
@test_util.patch_get_utility()
def test_delete_flag_opt_out(self, unused_mock_get_utility):
self.config.delete_after_revoke = False
self._test_delete_opt_out_common()
@test_util.patch_get_utility()
def test_delete_prompt_opt_out(self, mock_get_utility):
util_mock = mock_get_utility()
util_mock.yesno.return_value = False
self._test_delete_opt_out_common()
@mock.patch("certbot._internal.main.logger.warning")
@mock.patch('certbot._internal.storage.renewal_file_for_certname')
@mock.patch('certbot._internal.cert_manager.delete')
@mock.patch('certbot._internal.cert_manager.match_and_check_overlaps')
@mock.patch('certbot._internal.storage.full_archive_path')
@mock.patch('certbot._internal.cert_manager.cert_path_to_lineage')
@test_util.patch_get_utility()
def test_overlapping_archive_dirs(self, mock_get_utility,
mock_cert_path_to_lineage, mock_archive,
mock_match_and_check_overlaps, mock_delete,
mock_renewal_file_for_certname, mock_warning):
# pylint: disable = unused-argument
config = self.config
config.cert_path = "/some/reasonable/path"
config.certname = ""
mock_cert_path_to_lineage.return_value = "example.com"
mock_match_and_check_overlaps.side_effect = errors.OverlappingMatchFound()
self._call(config)
mock_delete.assert_not_called()
self.assertEqual(mock_warning.call_count, 1)
@mock.patch('certbot._internal.storage.renewal_file_for_certname')
@mock.patch('certbot._internal.cert_manager.match_and_check_overlaps')
@mock.patch('certbot._internal.storage.full_archive_path')
@mock.patch('certbot._internal.cert_manager.delete')
@mock.patch('certbot._internal.cert_manager.cert_path_to_lineage')
@test_util.patch_get_utility()
def test_cert_path_only(self, mock_get_utility,
mock_cert_path_to_lineage, mock_delete, mock_archive,
mock_overlapping_archive_dirs, mock_renewal_file_for_certname):
# pylint: disable = unused-argument
config = self.config
config.cert_path = "/some/reasonable/path"
config.certname = ""
mock_cert_path_to_lineage.return_value = "example.com"
mock_overlapping_archive_dirs.return_value = False
self._call(config)
self.assertEqual(mock_delete.call_count, 1)
@mock.patch('certbot._internal.storage.renewal_file_for_certname')
@mock.patch('certbot._internal.cert_manager.match_and_check_overlaps')
@mock.patch('certbot._internal.storage.full_archive_path')
@mock.patch('certbot._internal.cert_manager.cert_path_to_lineage')
@mock.patch('certbot._internal.cert_manager.delete')
@test_util.patch_get_utility()
def test_noninteractive_deletion(self, mock_get_utility, mock_delete,
mock_cert_path_to_lineage, mock_full_archive_dir,
mock_match_and_check_overlaps, mock_renewal_file_for_certname):
# pylint: disable = unused-argument
config = self.config
config.namespace.noninteractive_mode = True
config.cert_path = "/some/reasonable/path"
config.certname = ""
mock_cert_path_to_lineage.return_value = "example.com"
mock_full_archive_dir.return_value = ""
mock_match_and_check_overlaps.return_value = ""
self._call(config)
self.assertEqual(mock_delete.call_count, 1)
@mock.patch('certbot._internal.storage.renewal_file_for_certname')
@mock.patch('certbot._internal.cert_manager.match_and_check_overlaps')
@mock.patch('certbot._internal.storage.full_archive_path')
@mock.patch('certbot._internal.cert_manager.cert_path_to_lineage')
@mock.patch('certbot._internal.cert_manager.delete')
@test_util.patch_get_utility()
def test_opt_in_deletion(self, mock_get_utility, mock_delete,
mock_cert_path_to_lineage, mock_full_archive_dir,
mock_match_and_check_overlaps, mock_renewal_file_for_certname):
# pylint: disable = unused-argument
config = self.config
config.namespace.delete_after_revoke = True
config.cert_path = "/some/reasonable/path"
config.certname = ""
mock_cert_path_to_lineage.return_value = "example.com"
mock_full_archive_dir.return_value = ""
mock_match_and_check_overlaps.return_value = ""
self._call(config)
self.assertEqual(mock_delete.call_count, 1)
self.assertFalse(mock_get_utility().yesno.called)
class DetermineAccountTest(test_util.ConfigTestCase):
"""Tests for certbot._internal.main._determine_account."""
def setUp(self):
super().setUp()
self.config.account = None
self.config.email = None
self.config.register_unsafely_without_email = False
self.accs = [mock.MagicMock(id='x'), mock.MagicMock(id='y')]
self.account_storage = account.AccountMemoryStorage()
# For use in saving accounts: fake out the new_authz URL.
self.mock_client = mock.MagicMock()
self.mock_client.directory.new_authz = "hi"
def _call(self):
# pylint: disable=protected-access
from certbot._internal.main import _determine_account
with mock.patch('certbot._internal.main.account.AccountFileStorage') as mock_storage, \
test_util.patch_get_utility():
mock_storage.return_value = self.account_storage
return _determine_account(self.config)
def test_args_account_set(self):
self.account_storage.save(self.accs[1], self.mock_client)
self.config.account = self.accs[1].id
self.assertEqual((self.accs[1], None), self._call())
self.assertEqual(self.accs[1].id, self.config.account)
self.assertTrue(self.config.email is None)
def test_single_account(self):
self.account_storage.save(self.accs[0], self.mock_client)
self.assertEqual((self.accs[0], None), self._call())
self.assertEqual(self.accs[0].id, self.config.account)
self.assertTrue(self.config.email is None)
@mock.patch('certbot._internal.client.display_ops.choose_account')
def test_multiple_accounts(self, mock_choose_accounts):
for acc in self.accs:
self.account_storage.save(acc, self.mock_client)
mock_choose_accounts.return_value = self.accs[1]
self.assertEqual((self.accs[1], None), self._call())
self.assertEqual(
set(mock_choose_accounts.call_args[0][0]), set(self.accs))
self.assertEqual(self.accs[1].id, self.config.account)
self.assertTrue(self.config.email is None)
@mock.patch('certbot._internal.client.display_ops.get_email')
@mock.patch('certbot._internal.main.display_util.notify')
def test_no_accounts_no_email(self, mock_notify, mock_get_email):
mock_get_email.return_value = 'foo@bar.baz'
with mock.patch('certbot._internal.main.client') as client:
client.register.return_value = (
self.accs[0], mock.sentinel.acme)
self.assertEqual((self.accs[0], mock.sentinel.acme), self._call())
client.register.assert_called_once_with(
self.config, self.account_storage, tos_cb=mock.ANY)
self.assertEqual(self.accs[0].id, self.config.account)
self.assertEqual('foo@bar.baz', self.config.email)
mock_notify.assert_called_once_with('Account registered.')
def test_no_accounts_email(self):
self.config.email = 'other email'
with mock.patch('certbot._internal.main.client') as client:
client.register.return_value = (self.accs[1], mock.sentinel.acme)
self._call()
self.assertEqual(self.accs[1].id, self.config.account)
self.assertEqual('other email', self.config.email)
class MainTest(test_util.ConfigTestCase):
"""Tests for different commands."""
def setUp(self):
super().setUp()
filesystem.mkdir(self.config.logs_dir)
self.standard_args = ['--config-dir', self.config.config_dir,
'--work-dir', self.config.work_dir,
'--logs-dir', self.config.logs_dir, '--text']
self.mock_sleep = mock.patch('time.sleep').start()
def tearDown(self):
# Reset globals in cli
reload_module(cli)
super().tearDown()
def _call(self, args, stdout=None, mockisfile=False):
"""Run the cli with output streams, actual client and optionally
os.path.isfile() mocked out"""
if mockisfile:
orig_open = os.path.isfile
def mock_isfile(fn, *args, **kwargs): # pylint: disable=unused-argument
"""Mock os.path.isfile()"""
if (fn.endswith("cert") or
fn.endswith("chain") or
fn.endswith("privkey")):
return True
return orig_open(fn)
with mock.patch("certbot.compat.os.path.isfile") as mock_if:
mock_if.side_effect = mock_isfile
with mock.patch('certbot._internal.main.client') as client:
ret, stdout, stderr = self._call_no_clientmock(args, stdout)
return ret, stdout, stderr, client
else:
with mock.patch('certbot._internal.main.client') as client:
ret, stdout, stderr = self._call_no_clientmock(args, stdout)
return ret, stdout, stderr, client
def _call_no_clientmock(self, args, stdout=None):
"Run the client with output streams mocked out"
args = self.standard_args + args
toy_stdout = stdout if stdout else io.StringIO()
with mock.patch('certbot._internal.main.sys.stdout', new=toy_stdout):
with mock.patch('certbot._internal.main.sys.stderr') as stderr:
with mock.patch("certbot.util.atexit"):
ret = main.main(args[:]) # NOTE: parser can alter its args!
return ret, toy_stdout, stderr
def test_no_flags(self):
with mock.patch('certbot._internal.main.run') as mock_run:
self._call([])
self.assertEqual(1, mock_run.call_count)
def test_version_string_program_name(self):
toy_out = io.StringIO()
toy_err = io.StringIO()
with mock.patch('certbot._internal.main.sys.stdout', new=toy_out):
with mock.patch('certbot._internal.main.sys.stderr', new=toy_err):
try:
main.main(["--version"])
except SystemExit:
pass
finally:
output = toy_out.getvalue() or toy_err.getvalue()
self.assertTrue("certbot" in output, "Output is {0}".format(output))
def _cli_missing_flag(self, args, message):
"Ensure that a particular error raises a missing cli flag error containing message"
exc = None
try:
with mock.patch('certbot._internal.main.sys.stderr'):
main.main(self.standard_args + args[:]) # NOTE: parser can alter its args!
except errors.MissingCommandlineFlag as exc_:
exc = exc_
self.assertTrue(message in str(exc))
self.assertTrue(exc is not None)
@mock.patch('certbot._internal.log.post_arg_parse_setup')
def test_noninteractive(self, _):
args = ['-n', 'certonly']
self._cli_missing_flag(args, "specify a plugin")
args.extend(['--standalone', '-d', 'eg.is'])
self._cli_missing_flag(args, "register before running")
@mock.patch('certbot._internal.eff.handle_subscription')
@mock.patch('certbot._internal.log.post_arg_parse_setup')
@mock.patch('certbot._internal.main._report_new_cert')
@mock.patch('certbot._internal.main.client.acme_client.Client')
@mock.patch('certbot._internal.main._determine_account')
@mock.patch('certbot._internal.main.client.Client.obtain_and_enroll_certificate')
@mock.patch('certbot._internal.main._get_and_save_cert')
def test_user_agent(self, gsc, _obt, det, _client, _, __, ___):
# Normally the client is totally mocked out, but here we need more
# arguments to automate it...
args = ["--standalone", "certonly", "-m", "none@none.com",
"-d", "example.com", '--agree-tos'] + self.standard_args
det.return_value = mock.MagicMock(), None
gsc.return_value = mock.MagicMock()
with mock.patch('certbot._internal.main.client.acme_client.ClientNetwork') as acme_net:
self._call_no_clientmock(args)
os_ver = util.get_os_info_ua()
ua = acme_net.call_args[1]["user_agent"]
self.assertTrue(os_ver in ua)
import platform
plat = platform.platform()
if "linux" in plat.lower():
self.assertTrue(util.get_os_info_ua() in ua)
with mock.patch('certbot._internal.main.client.acme_client.ClientNetwork') as acme_net:
ua = "bandersnatch"
args += ["--user-agent", ua]
self._call_no_clientmock(args)
acme_net.assert_called_once_with(mock.ANY, account=mock.ANY, verify_ssl=True,
user_agent=ua)
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
def test_installer_selection(self, mock_pick_installer, _rec):
self._call(['install', '--domains', 'foo.bar', '--cert-path', 'cert',
'--key-path', 'privkey', '--chain-path', 'chain'], mockisfile=True)
self.assertEqual(mock_pick_installer.call_count, 1)
@mock.patch('certbot._internal.main._install_cert')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
def test_installer_certname(self, _inst, _rec, mock_install):
mock_lineage = mock.MagicMock(cert_path=test_util.temp_join('cert'),
chain_path=test_util.temp_join('chain'),
fullchain_path=test_util.temp_join('chain'),
key_path=test_util.temp_join('privkey'))
with mock.patch("certbot._internal.cert_manager.lineage_for_certname") as mock_getlin:
mock_getlin.return_value = mock_lineage
self._call(['install', '--cert-name', 'whatever'], mockisfile=True)
call_config = mock_install.call_args[0][0]
self.assertEqual(call_config.cert_path, test_util.temp_join('cert'))
self.assertEqual(call_config.fullchain_path, test_util.temp_join('chain'))
self.assertEqual(call_config.key_path, test_util.temp_join('privkey'))
@mock.patch('certbot._internal.log.post_arg_parse_setup')
@mock.patch('certbot._internal.main._install_cert')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
def test_installer_param_override(self, _inst, _rec, mock_install, _):
mock_lineage = mock.MagicMock(cert_path=test_util.temp_join('cert'),
chain_path=test_util.temp_join('chain'),
fullchain_path=test_util.temp_join('chain'),
key_path=test_util.temp_join('privkey'))
with mock.patch("certbot._internal.cert_manager.lineage_for_certname") as mock_getlin:
mock_getlin.return_value = mock_lineage
self._call(['install', '--cert-name', 'whatever',
'--key-path', test_util.temp_join('overriding_privkey')], mockisfile=True)
call_config = mock_install.call_args[0][0]
self.assertEqual(call_config.cert_path, test_util.temp_join('cert'))
self.assertEqual(call_config.fullchain_path, test_util.temp_join('chain'))
self.assertEqual(call_config.chain_path, test_util.temp_join('chain'))
self.assertEqual(call_config.key_path, test_util.temp_join('overriding_privkey'))
mock_install.reset()
self._call(['install', '--cert-name', 'whatever',
'--cert-path', test_util.temp_join('overriding_cert')], mockisfile=True)
call_config = mock_install.call_args[0][0]
self.assertEqual(call_config.cert_path, test_util.temp_join('overriding_cert'))
self.assertEqual(call_config.fullchain_path, test_util.temp_join('chain'))
self.assertEqual(call_config.key_path, test_util.temp_join('privkey'))
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
def test_installer_param_error(self, _inst, _rec):
self.assertRaises(errors.ConfigurationError,
self._call,
['install', '--cert-name', 'notfound',
'--key-path', 'invalid'])
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
@mock.patch('certbot._internal.cert_manager.get_certnames')
@mock.patch('certbot._internal.main._install_cert')
def test_installer_select_cert(self, mock_inst, mock_getcert, _inst, _rec):
mock_lineage = mock.MagicMock(cert_path=test_util.temp_join('cert'),
chain_path=test_util.temp_join('chain'),
fullchain_path=test_util.temp_join('chain'),
key_path=test_util.temp_join('privkey'))
with mock.patch("certbot._internal.cert_manager.lineage_for_certname") as mock_getlin:
mock_getlin.return_value = mock_lineage
self._call(['install'], mockisfile=True)
self.assertTrue(mock_getcert.called)
self.assertTrue(mock_inst.called)
@mock.patch('certbot._internal.eff.handle_subscription')
@mock.patch('certbot._internal.log.post_arg_parse_setup')
@mock.patch('certbot._internal.main._report_new_cert')
@mock.patch('certbot.util.exe_exists')
def test_configurator_selection(self, mock_exe_exists, _, __, ___):
mock_exe_exists.return_value = True
real_plugins = disco.PluginsRegistry.find_all()
args = ['--apache', '--authenticator', 'standalone']
# This needed two calls to find_all(), which we're avoiding for now
# because of possible side effects:
# https://github.com/letsencrypt/letsencrypt/commit/51ed2b681f87b1eb29088dd48718a54f401e4855
# with mock.patch('certbot._internal.cli.plugins_testable') as plugins:
# plugins.return_value = {"apache": True, "nginx": True}
# ret, _, _, _ = self._call(args)
# self.assertTrue("Too many flags setting" in ret)
args = ["install", "--nginx", "--cert-path",
test_util.temp_join('blah'), "--key-path", test_util.temp_join('blah'),
"--nginx-server-root", "/nonexistent/thing", "-d",
"example.com", "--debug"]
if "nginx" in real_plugins:
# Sending nginx a non-existent conf dir will simulate misconfiguration
# (we can only do that if certbot-nginx is actually present)
ret, _, _, _ = self._call(args)
self.assertTrue("The nginx plugin is not working" in ret)
self.assertTrue("MisconfigurationError" in ret)
self._cli_missing_flag(["--standalone"], "With the standalone plugin, you probably")
with mock.patch("certbot._internal.main._init_le_client") as mock_init:
with mock.patch("certbot._internal.main._get_and_save_cert") as mock_gsc:
mock_gsc.return_value = mock.MagicMock()
self._call(["certonly", "--manual", "-d", "foo.bar"])
unused_config, auth, unused_installer = mock_init.call_args[0]
self.assertTrue(isinstance(auth, manual.Authenticator))
with mock.patch('certbot._internal.main.certonly') as mock_certonly:
self._call(["auth", "--standalone"])
self.assertEqual(1, mock_certonly.call_count)
@mock.patch('certbot._internal.log.post_arg_parse_setup')
def test_rollback(self, _):
_, _, _, client = self._call(['rollback'])
self.assertEqual(1, client.rollback.call_count)
_, _, _, client = self._call(['rollback', '--checkpoints', '123'])
client.rollback.assert_called_once_with(
mock.ANY, 123, mock.ANY, mock.ANY)
@mock.patch('certbot._internal.cert_manager.update_live_symlinks')
def test_update_symlinks(self, mock_cert_manager):
self._call_no_clientmock(['update_symlinks'])
self.assertEqual(1, mock_cert_manager.call_count)
@mock.patch('certbot._internal.cert_manager.certificates')
def test_certificates(self, mock_cert_manager):
self._call_no_clientmock(['certificates'])
self.assertEqual(1, mock_cert_manager.call_count)
@mock.patch('certbot._internal.cert_manager.delete')
def test_delete(self, mock_cert_manager):
self._call_no_clientmock(['delete'])
self.assertEqual(1, mock_cert_manager.call_count)
@mock.patch('certbot._internal.main.plugins_disco')
@mock.patch('certbot._internal.main.cli.HelpfulArgumentParser.determine_help_topics')
@mock.patch('certbot._internal.log.post_arg_parse_setup')
def test_plugins(self, _, _det, mock_disco):
flags = ['--init', '--prepare', '--authenticators', '--installers']
for args in itertools.chain(
*(itertools.combinations(flags, r)
for r in range(len(flags)))):
self._call(['plugins'] + list(args))
@mock.patch('certbot._internal.main.plugins_disco')
@mock.patch('certbot._internal.main.cli.HelpfulArgumentParser.determine_help_topics')
def test_plugins_no_args(self, _det, mock_disco):
ifaces: List[interfaces.IPlugin] = []
plugins = mock_disco.PluginsRegistry.find_all()
stdout = io.StringIO()
with test_util.patch_get_utility_with_stdout(stdout=stdout):
_, stdout, _, _ = self._call(['plugins'], stdout)
plugins.visible.assert_called_once_with()
plugins.visible().ifaces.assert_called_once_with(ifaces)
filtered = plugins.visible().ifaces()
self.assertEqual(stdout.getvalue().strip(), str(filtered))
@mock.patch('certbot._internal.main.plugins_disco')
@mock.patch('certbot._internal.main.cli.HelpfulArgumentParser.determine_help_topics')
def test_plugins_no_args_unprivileged(self, _det, mock_disco):
ifaces: List[interfaces.IPlugin] = []
plugins = mock_disco.PluginsRegistry.find_all()
def throw_error(directory, mode, strict):
"""Raises error.Error."""
_, _, _ = directory, mode, strict
raise errors.Error()
stdout = io.StringIO()
with mock.patch('certbot.util.set_up_core_dir') as mock_set_up_core_dir:
with test_util.patch_get_utility_with_stdout(stdout=stdout):
mock_set_up_core_dir.side_effect = throw_error
_, stdout, _, _ = self._call(['plugins'], stdout)
plugins.visible.assert_called_once_with()
plugins.visible().ifaces.assert_called_once_with(ifaces)
filtered = plugins.visible().ifaces()
self.assertEqual(stdout.getvalue().strip(), str(filtered))
@mock.patch('certbot._internal.main.plugins_disco')
@mock.patch('certbot._internal.main.cli.HelpfulArgumentParser.determine_help_topics')
def test_plugins_init(self, _det, mock_disco):
ifaces: List[interfaces.IPlugin] = []
plugins = mock_disco.PluginsRegistry.find_all()
stdout = io.StringIO()
with test_util.patch_get_utility_with_stdout(stdout=stdout):
_, stdout, _, _ = self._call(['plugins', '--init'], stdout)
plugins.visible.assert_called_once_with()
plugins.visible().ifaces.assert_called_once_with(ifaces)
filtered = plugins.visible().ifaces()
self.assertEqual(filtered.init.call_count, 1)
filtered.verify.assert_called_once_with(ifaces)
verified = filtered.verify()
self.assertEqual(stdout.getvalue().strip(), str(verified))
@mock.patch('certbot._internal.main.plugins_disco')
@mock.patch('certbot._internal.main.cli.HelpfulArgumentParser.determine_help_topics')
def test_plugins_prepare(self, _det, mock_disco):
ifaces: List[interfaces.IPlugin] = []
plugins = mock_disco.PluginsRegistry.find_all()
stdout = io.StringIO()
with test_util.patch_get_utility_with_stdout(stdout=stdout):
_, stdout, _, _ = self._call(['plugins', '--init', '--prepare'], stdout)
plugins.visible.assert_called_once_with()
plugins.visible().ifaces.assert_called_once_with(ifaces)
filtered = plugins.visible().ifaces()
self.assertEqual(filtered.init.call_count, 1)
filtered.verify.assert_called_once_with(ifaces)
verified = filtered.verify()
verified.prepare.assert_called_once_with()
verified.available.assert_called_once_with()
available = verified.available()
self.assertEqual(stdout.getvalue().strip(), str(available))
def test_certonly_abspath(self):
cert = 'cert'
key = 'key'
chain = 'chain'
fullchain = 'fullchain'
with mock.patch('certbot._internal.main.certonly') as mock_certonly:
self._call(['certonly', '--cert-path', cert, '--key-path', 'key',
'--chain-path', 'chain',
'--fullchain-path', 'fullchain'])
config, unused_plugins = mock_certonly.call_args[0]
self.assertEqual(config.cert_path, os.path.abspath(cert))
self.assertEqual(config.key_path, os.path.abspath(key))
self.assertEqual(config.chain_path, os.path.abspath(chain))
self.assertEqual(config.fullchain_path, os.path.abspath(fullchain))
def test_certonly_bad_args(self):
try:
self._call(['-a', 'bad_auth', 'certonly'])
assert False, "Exception should have been raised"
except errors.PluginSelectionError as e:
self.assertTrue('The requested bad_auth plugin does not appear' in str(e))
def test_check_config_sanity_domain(self):
# FQDN
self.assertRaises(errors.ConfigurationError,
self._call,
['-d', 'a' * 64])
# FQDN 2
self.assertRaises(errors.ConfigurationError,
self._call,
['-d', (('a' * 50) + '.') * 10])
# Bare IP address (this is actually a different error message now)
self.assertRaises(errors.ConfigurationError,
self._call,
['-d', '204.11.231.35'])
def test_csr_with_besteffort(self):
self.assertRaises(
errors.Error, self._call,
'certonly --csr {0} --allow-subset-of-names'.format(CSR).split())
def test_run_with_csr(self):
# This is an error because you can only use --csr with certonly
try:
self._call(['--csr', CSR])
except errors.Error as e:
assert "Please try the certonly" in repr(e)
return
assert False, "Expected supplying --csr to fail with default verb"
def test_csr_with_no_domains(self):
self.assertRaises(
errors.Error, self._call,
'certonly --csr {0}'.format(
test_util.vector_path('csr-nonames_512.pem')).split())
def test_csr_with_inconsistent_domains(self):
self.assertRaises(
errors.Error, self._call,
'certonly -d example.org --csr {0}'.format(CSR).split())
def _certonly_new_request_common(self, mock_client, args=None):
with mock.patch('certbot._internal.main._find_lineage_for_domains_and_certname') \
as mock_renewal:
mock_renewal.return_value = ("newcert", None)
with mock.patch('certbot._internal.main._init_le_client') as mock_init:
mock_init.return_value = mock_client
if args is None:
args = []
args += '-d foo.bar -a standalone certonly'.split()
self._call(args)
@test_util.patch_get_utility()
def test_certonly_dry_run_new_request_success(self, mock_get_utility):
mock_client = mock.MagicMock()
mock_client.obtain_and_enroll_certificate.return_value = None
self._certonly_new_request_common(mock_client, ['--dry-run'])
self.assertEqual(
mock_client.obtain_and_enroll_certificate.call_count, 1)
self.assertTrue(
'dry run' in mock_get_utility().add_message.call_args[0][0])
# Asserts we don't suggest donating after a successful dry run
self.assertEqual(mock_get_utility().add_message.call_count, 1)
@mock.patch('certbot._internal.eff.handle_subscription')
@mock.patch('certbot.crypto_util.notAfter')
@test_util.patch_get_utility()
def test_certonly_new_request_success(self, mock_get_utility, mock_notAfter, mock_subscription):
cert_path = os.path.normpath(os.path.join(self.config.config_dir, 'live/foo.bar'))
key_path = os.path.normpath(os.path.join(self.config.config_dir, 'live/baz.qux'))
date = '1970-01-01'
mock_notAfter().date.return_value = date
mock_lineage = mock.MagicMock(cert=cert_path, fullchain=cert_path,
fullchain_path=cert_path, key_path=key_path)
mock_client = mock.MagicMock()
mock_client.obtain_and_enroll_certificate.return_value = mock_lineage
self._certonly_new_request_common(mock_client)
self.assertEqual(
mock_client.obtain_and_enroll_certificate.call_count, 1)
cert_msg = mock_get_utility().add_message.call_args_list[0][0][0]
self.assertTrue(cert_path in cert_msg)
self.assertTrue(date in cert_msg)
self.assertTrue(key_path in cert_msg)
self.assertTrue(
'donate' in mock_get_utility().add_message.call_args[0][0])
self.assertTrue(mock_subscription.called)
@mock.patch('certbot._internal.eff.handle_subscription')
def test_certonly_new_request_failure(self, mock_subscription):
mock_client = mock.MagicMock()
mock_client.obtain_and_enroll_certificate.return_value = False
self.assertRaises(errors.Error,
self._certonly_new_request_common, mock_client)
self.assertFalse(mock_subscription.called)
def _test_renewal_common(self, due_for_renewal, extra_args, log_out=None,
args=None, should_renew=True, error_expected=False,
quiet_mode=False, expiry_date=datetime.datetime.now(),
reuse_key=False):
cert_path = test_util.vector_path('cert_512.pem')
chain_path = os.path.normpath(os.path.join(self.config.config_dir,
'live/foo.bar/fullchain.pem'))
mock_lineage = mock.MagicMock(cert=cert_path, fullchain=chain_path,
cert_path=cert_path, fullchain_path=chain_path)
mock_lineage.should_autorenew.return_value = due_for_renewal
mock_lineage.has_pending_deployment.return_value = False
mock_lineage.names.return_value = ['isnot.org']
mock_lineage.private_key_type = 'RSA'
mock_certr = mock.MagicMock()
mock_key = mock.MagicMock(pem='pem_key')
mock_client = mock.MagicMock()
stdout = io.StringIO()
mock_client.obtain_certificate.return_value = (mock_certr, 'chain',
mock_key, 'csr')
def write_msg(message, *args, **kwargs): # pylint: disable=unused-argument
"""Write message to stdout."""
stdout.write(message)
try:
with mock.patch('certbot._internal.cert_manager.find_duplicative_certs') as mock_fdc:
mock_fdc.return_value = (mock_lineage, None)
with mock.patch('certbot._internal.main._init_le_client') as mock_init:
mock_init.return_value = mock_client
with test_util.patch_get_utility() as mock_get_utility:
if not quiet_mode:
mock_get_utility().notification.side_effect = write_msg
with mock.patch('certbot._internal.main.renewal.OpenSSL') as mock_ssl:
mock_latest = mock.MagicMock()
mock_latest.get_issuer.return_value = "Artificial pretend"
mock_ssl.crypto.load_certificate.return_value = mock_latest
with mock.patch('certbot._internal.main.renewal.crypto_util') \
as mock_crypto_util:
mock_crypto_util.notAfter.return_value = expiry_date
with mock.patch('certbot._internal.eff.handle_subscription'):
if not args:
args = ['-d', 'isnot.org', '-a', 'standalone', 'certonly']
if extra_args:
args += extra_args
try:
ret, stdout, _, _ = self._call(args, stdout)
if ret:
print("Returned", ret)
raise AssertionError(ret)
assert not error_expected, "renewal should have errored"
except: # pylint: disable=bare-except
if not error_expected:
raise AssertionError(
"Unexpected renewal error:\n" +
traceback.format_exc())
if should_renew:
if reuse_key:
# The location of the previous live privkey.pem is passed
# to obtain_certificate
mock_client.obtain_certificate.assert_called_once_with(['isnot.org'],
os.path.normpath(os.path.join(
self.config.config_dir, "live/sample-renewal/privkey.pem")))
else:
mock_client.obtain_certificate.assert_called_once_with(['isnot.org'], None)
else:
self.assertEqual(mock_client.obtain_certificate.call_count, 0)
except:
self._dump_log()
raise
finally:
if log_out:
with open(os.path.join(self.config.logs_dir, "letsencrypt.log")) as lf:
self.assertTrue(log_out in lf.read())
return mock_lineage, mock_get_utility, stdout
@mock.patch('certbot.crypto_util.notAfter')
def test_certonly_renewal(self, _):
lineage, get_utility, _ = self._test_renewal_common(True, [])
self.assertEqual(lineage.save_successor.call_count, 1)
lineage.update_all_links_to.assert_called_once_with(
lineage.latest_common_version())
cert_msg = get_utility().add_message.call_args_list[0][0][0]
self.assertTrue('fullchain.pem' in cert_msg)
self.assertTrue('donate' in get_utility().add_message.call_args[0][0])
@mock.patch('certbot._internal.log.logging.handlers.RotatingFileHandler.doRollover')
@mock.patch('certbot.crypto_util.notAfter')
def test_certonly_renewal_triggers(self, _, __):
# --dry-run should force renewal
_, get_utility, _ = self._test_renewal_common(False, ['--dry-run', '--keep'],
log_out="simulating renewal")
self.assertEqual(get_utility().add_message.call_count, 1)
self.assertTrue('dry run' in get_utility().add_message.call_args[0][0])
self._test_renewal_common(False, ['--renew-by-default', '-tvv', '--debug'],
log_out="Auto-renewal forced")
self.assertEqual(get_utility().add_message.call_count, 1)
self._test_renewal_common(False, ['-tvv', '--debug', '--keep'],
log_out="not yet due", should_renew=False)
def _dump_log(self):
print("Logs:")
log_path = os.path.join(self.config.logs_dir, "letsencrypt.log")
if os.path.exists(log_path):
with open(log_path) as lf:
print(lf.read())
def test_renew_verb(self):
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--dry-run", "-tvv"]
self._test_renewal_common(True, [], args=args, should_renew=True)
def test_reuse_key(self):
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--dry-run", "--reuse-key"]
self._test_renewal_common(True, [], args=args, should_renew=True, reuse_key=True)
@mock.patch('certbot._internal.storage.RenewableCert.save_successor')
def test_reuse_key_no_dry_run(self, unused_save_successor):
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--reuse-key"]
self._test_renewal_common(True, [], args=args, should_renew=True, reuse_key=True)
@mock.patch('sys.stdin')
def test_noninteractive_renewal_delay(self, stdin):
stdin.isatty.return_value = False
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--dry-run", "-tvv"]
self._test_renewal_common(True, [], args=args, should_renew=True)
self.assertEqual(self.mock_sleep.call_count, 1)
# in main.py:
# sleep_time = random.randint(1, 60*8)
sleep_call_arg = self.mock_sleep.call_args[0][0]
self.assertTrue(1 <= sleep_call_arg <= 60*8)
@mock.patch('sys.stdin')
def test_interactive_no_renewal_delay(self, stdin):
stdin.isatty.return_value = True
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--dry-run", "-tvv"]
self._test_renewal_common(True, [], args=args, should_renew=True)
self.assertEqual(self.mock_sleep.call_count, 0)
@mock.patch('certbot._internal.renewal.should_renew')
def test_renew_skips_recent_certs(self, should_renew):
should_renew.return_value = False
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
expiry = datetime.datetime.now() + datetime.timedelta(days=90)
_, _, stdout = self._test_renewal_common(False, extra_args=None, should_renew=False,
args=['renew'], expiry_date=expiry)
self.assertTrue('No renewals were attempted.' in stdout.getvalue())
self.assertTrue('The following certificates are not due for renewal yet:' in stdout.getvalue())
@mock.patch('certbot._internal.log.post_arg_parse_setup')
def test_quiet_renew(self, _):
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--dry-run"]
_, _, stdout = self._test_renewal_common(True, [], args=args, should_renew=True)
out = stdout.getvalue()
self.assertTrue("renew" in out)
args = ["renew", "--dry-run", "-q"]
_, _, stdout = self._test_renewal_common(True, [], args=args,
should_renew=True, quiet_mode=True)
out = stdout.getvalue()
self.assertEqual("", out)
def test_renew_hook_validation(self):
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--dry-run", "--post-hook=no-such-command"]
self._test_renewal_common(True, [], args=args, should_renew=False,
error_expected=True)
def test_renew_no_hook_validation(self):
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
args = ["renew", "--dry-run", "--post-hook=no-such-command",
"--disable-hook-validation"]
with mock.patch("certbot._internal.hooks.post_hook"):
self._test_renewal_common(True, [], args=args, should_renew=True,
error_expected=False)
def test_renew_verb_empty_config(self):
rd = os.path.join(self.config.config_dir, 'renewal')
if not os.path.exists(rd):
filesystem.makedirs(rd)
with open(os.path.join(rd, 'empty.conf'), 'w'):
pass # leave the file empty
args = ["renew", "--dry-run", "-tvv"]
self._test_renewal_common(False, [], args=args, should_renew=False, error_expected=True)
def test_renew_with_certname(self):
test_util.make_lineage(self.config.config_dir, 'sample-renewal.conf')
self._test_renewal_common(True, [], should_renew=True,
args=['renew', '--dry-run', '--cert-name', 'sample-renewal'])
def test_renew_with_bad_certname(self):
self._test_renewal_common(True, [], should_renew=False,
args=['renew', '--dry-run', '--cert-name', 'sample-renewal'],
error_expected=True)
def _make_dummy_renewal_config(self):
renewer_configs_dir = os.path.join(self.config.config_dir, 'renewal')
filesystem.makedirs(renewer_configs_dir)
with open(os.path.join(renewer_configs_dir, 'test.conf'), 'w') as f:
f.write("My contents don't matter")
def _test_renew_common(self, renewalparams=None, names=None,
assert_oc_called=None, **kwargs):
self._make_dummy_renewal_config()
with mock.patch('certbot._internal.storage.RenewableCert') as mock_rc:
mock_lineage = mock.MagicMock()
mock_lineage.fullchain = "somepath/fullchain.pem"
if renewalparams is not None:
mock_lineage.configuration = {'renewalparams': renewalparams}
if names is not None:
mock_lineage.names.return_value = names
mock_rc.return_value = mock_lineage
with mock.patch('certbot._internal.main.renew_cert') as mock_renew_cert:
kwargs.setdefault('args', ['renew'])
self._test_renewal_common(True, None, should_renew=False, **kwargs)
if assert_oc_called is not None:
if assert_oc_called:
self.assertTrue(mock_renew_cert.called)
else:
self.assertFalse(mock_renew_cert.called)
def test_renew_no_renewalparams(self):
self._test_renew_common(assert_oc_called=False, error_expected=True)
def test_renew_no_authenticator(self):
self._test_renew_common(renewalparams={}, assert_oc_called=False,
error_expected=True)
def test_renew_with_bad_int(self):
renewalparams = {'authenticator': 'webroot',
'rsa_key_size': 'over 9000'}
self._test_renew_common(renewalparams=renewalparams, error_expected=True,
assert_oc_called=False)
def test_renew_with_nonetype_http01(self):
renewalparams = {'authenticator': 'webroot',
'http01_port': 'None'}
self._test_renew_common(renewalparams=renewalparams,
assert_oc_called=True)
def test_renew_with_bad_domain(self):
renewalparams = {'authenticator': 'webroot'}
names = ['uniçodé.com']
self._test_renew_common(renewalparams=renewalparams, error_expected=True,
names=names, assert_oc_called=False)
@mock.patch('certbot._internal.plugins.selection.choose_configurator_plugins')
def test_renew_with_configurator(self, mock_sel):
mock_sel.return_value = (mock.MagicMock(), mock.MagicMock())
renewalparams = {'authenticator': 'webroot'}
self._test_renew_common(
renewalparams=renewalparams, assert_oc_called=True,
args='renew --configurator apache'.split())
def test_renew_plugin_config_restoration(self):
renewalparams = {'authenticator': 'webroot',
'webroot_path': 'None',
'webroot_imaginary_flag': '42'}
self._test_renew_common(renewalparams=renewalparams,
assert_oc_called=True)
def test_renew_with_webroot_map(self):
renewalparams = {'authenticator': 'webroot'}
self._test_renew_common(
renewalparams=renewalparams, assert_oc_called=True,
args=['renew', '--webroot-map', json.dumps({'example.com': tempfile.gettempdir()})])
def test_renew_reconstitute_error(self):
# pylint: disable=protected-access
with mock.patch('certbot._internal.main.renewal._reconstitute') as mock_reconstitute:
mock_reconstitute.side_effect = Exception
self._test_renew_common(assert_oc_called=False, error_expected=True)
def test_renew_obtain_cert_error(self):
self._make_dummy_renewal_config()
with mock.patch('certbot._internal.storage.RenewableCert') as mock_rc:
mock_lineage = mock.MagicMock()
mock_lineage.fullchain = "somewhere/fullchain.pem"
mock_rc.return_value = mock_lineage
mock_lineage.configuration = {
'renewalparams': {'authenticator': 'webroot'}}
with mock.patch('certbot._internal.main.renew_cert') as mock_renew_cert:
mock_renew_cert.side_effect = Exception
self._test_renewal_common(True, None, error_expected=True,
args=['renew'], should_renew=False)
def test_renew_with_bad_cli_args(self):
self._test_renewal_common(True, None, args='renew -d example.com'.split(),
should_renew=False, error_expected=True)
self._test_renewal_common(True, None, args='renew --csr {0}'.format(CSR).split(),
should_renew=False, error_expected=True)
def test_no_renewal_with_hooks(self):
_, _, stdout = self._test_renewal_common(
due_for_renewal=False, extra_args=None, should_renew=False,
args=['renew', '--post-hook',
'{0} -c "print(\'hello world\');"'
.format(sys.executable)])
self.assertTrue('No hooks were run.' in stdout.getvalue())
@test_util.patch_get_utility()
@mock.patch('certbot._internal.main._find_lineage_for_domains_and_certname')
@mock.patch('certbot._internal.main._init_le_client')
@mock.patch('certbot._internal.main._report_new_cert')
def test_certonly_reinstall(self, mock_report_new_cert, mock_init,
mock_renewal, mock_get_utility):
mock_renewal.return_value = ('reinstall', mock.MagicMock())
mock_init.return_value = mock_client = mock.MagicMock()
self._call(['-d', 'foo.bar', '-a', 'standalone', 'certonly'])
self.assertFalse(mock_client.obtain_certificate.called)
self.assertFalse(mock_client.obtain_and_enroll_certificate.called)
self.assertEqual(mock_get_utility().add_message.call_count, 0)
mock_report_new_cert.assert_not_called()
#self.assertTrue('donate' not in mock_get_utility().add_message.call_args[0][0])
def _test_certonly_csr_common(self, extra_args=None):
certr = 'certr'
chain = 'chain'
mock_client = mock.MagicMock()
mock_client.obtain_certificate_from_csr.return_value = (certr, chain)
cert_path = os.path.normpath(os.path.join(
self.config.config_dir,
'live/example.com/cert_512.pem'))
full_path = os.path.normpath(os.path.join(
self.config.config_dir,
'live/example.com/fullchain.pem'))
mock_client.save_certificate.return_value = cert_path, None, full_path
with mock.patch('certbot._internal.main._init_le_client') as mock_init:
mock_init.return_value = mock_client
with test_util.patch_get_utility() as mock_get_utility:
chain_path = os.path.normpath(os.path.join(
self.config.config_dir,
'live/example.com/chain.pem'))
args = ('-a standalone certonly --csr {0} --cert-path {1} '
'--chain-path {2} --fullchain-path {3}').format(
CSR, cert_path, chain_path, full_path).split()
if extra_args:
args += extra_args
with mock.patch('certbot._internal.main.crypto_util'):
self._call(args)
if '--dry-run' in args:
self.assertFalse(mock_client.save_certificate.called)
else:
mock_client.save_certificate.assert_called_once_with(
certr, chain, cert_path, chain_path, full_path)
return mock_get_utility
@mock.patch('certbot._internal.eff.handle_subscription')
def test_certonly_csr(self, mock_subscription):
mock_get_utility = self._test_certonly_csr_common()
cert_msg = mock_get_utility().add_message.call_args_list[0][0][0]
self.assertTrue('fullchain.pem' in cert_msg)
self.assertFalse('Your key file has been saved at' in cert_msg)
self.assertTrue(
'donate' in mock_get_utility().add_message.call_args[0][0])
self.assertTrue(mock_subscription.called)
def test_certonly_csr_dry_run(self):
mock_get_utility = self._test_certonly_csr_common(['--dry-run'])
self.assertEqual(mock_get_utility().add_message.call_count, 1)
self.assertTrue(
'dry run' in mock_get_utility().add_message.call_args[0][0])
@mock.patch('certbot._internal.main._delete_if_appropriate')
@mock.patch('certbot._internal.main.client.acme_client')
def test_revoke_with_key(self, mock_acme_client,
mock_delete_if_appropriate):
mock_delete_if_appropriate.return_value = False
server = 'foo.bar'
self._call_no_clientmock(['--cert-path', SS_CERT_PATH, '--key-path', RSA2048_KEY_PATH,
'--server', server, 'revoke'])
with open(RSA2048_KEY_PATH, 'rb') as f:
mock_acme_client.BackwardsCompatibleClientV2.assert_called_once_with(
mock.ANY, jose.JWK.load(f.read()), server)
with open(SS_CERT_PATH, 'rb') as f:
cert = crypto_util.pyopenssl_load_certificate(f.read())[0]
mock_revoke = mock_acme_client.BackwardsCompatibleClientV2().revoke
mock_revoke.assert_called_once_with(
jose.ComparableX509(cert),
mock.ANY)
def test_revoke_with_key_mismatch(self):
server = 'foo.bar'
self.assertRaises(errors.Error, self._call_no_clientmock,
['--cert-path', CERT, '--key-path', KEY,
'--server', server, 'revoke'])
@mock.patch('certbot._internal.main._delete_if_appropriate')
@mock.patch('certbot._internal.main._determine_account')
def test_revoke_without_key(self, mock_determine_account,
mock_delete_if_appropriate):
mock_delete_if_appropriate.return_value = False
mock_determine_account.return_value = (mock.MagicMock(), None)
_, _, _, client = self._call(['--cert-path', CERT, 'revoke'])
with open(CERT) as f:
cert = crypto_util.pyopenssl_load_certificate(f.read())[0]
mock_revoke = client.acme_from_config_key().revoke
mock_revoke.assert_called_once_with(
jose.ComparableX509(cert),
mock.ANY)
@mock.patch('certbot._internal.log.post_arg_parse_setup')
def test_register(self, _):
with mock.patch('certbot._internal.main.client') as mocked_client:
acc = mock.MagicMock()
acc.id = "imaginary_account"
mocked_client.register.return_value = (acc, "worked")
self._call_no_clientmock(["register", "--email", "user@example.org"])
# TODO: It would be more correct to explicitly check that
# _determine_account() gets called in the above case,
# but coverage statistics should also show that it did.
with mock.patch('certbot._internal.main.account') as mocked_account:
mocked_storage = mock.MagicMock()
mocked_account.AccountFileStorage.return_value = mocked_storage
mocked_storage.find_all.return_value = ["an account"]
x = self._call_no_clientmock(["register", "--email", "user@example.org"])
self.assertTrue("There is an existing account" in x[0])
@mock.patch('certbot._internal.plugins.selection.choose_configurator_plugins')
@mock.patch('certbot._internal.updater._run_updaters')
def test_plugin_selection_error(self, mock_run, mock_choose):
mock_choose.side_effect = errors.PluginSelectionError
self.assertRaises(errors.PluginSelectionError, main.renew_cert,
None, None, None)
self.config.dry_run = False
updater.run_generic_updaters(self.config, None, None)
# Make sure we're returning None, and hence not trying to run the
# without installer
self.assertFalse(mock_run.called)
class UnregisterTest(unittest.TestCase):
def setUp(self):
self.patchers = {
'_determine_account': mock.patch('certbot._internal.main._determine_account'),
'account': mock.patch('certbot._internal.main.account'),
'client': mock.patch('certbot._internal.main.client'),
'get_utility': test_util.patch_get_utility()}
self.mocks = {k: v.start() for k, v in self.patchers.items()}
def tearDown(self):
for patch in self.patchers.values():
patch.stop()
def test_abort_unregister(self):
self.mocks['account'].AccountFileStorage.return_value = mock.Mock()
util_mock = self.mocks['get_utility']()
util_mock.yesno.return_value = False
config = mock.Mock()
unused_plugins = mock.Mock()
res = main.unregister(config, unused_plugins)
self.assertEqual(res, "Deactivation aborted.")
@mock.patch("certbot._internal.main.display_util.notify")
def test_unregister(self, mock_notify):
mocked_storage = mock.MagicMock()
mocked_storage.find_all.return_value = ["an account"]
self.mocks['account'].AccountFileStorage.return_value = mocked_storage
self.mocks['_determine_account'].return_value = (mock.MagicMock(), "foo")
cb_client = mock.MagicMock()
self.mocks['client'].Client.return_value = cb_client
config = mock.MagicMock()
unused_plugins = mock.MagicMock()
res = main.unregister(config, unused_plugins)
self.assertTrue(res is None)
mock_notify.assert_called_once_with("Account deactivated.")
def test_unregister_no_account(self):
mocked_storage = mock.MagicMock()
mocked_storage.find_all.return_value = []
self.mocks['account'].AccountFileStorage.return_value = mocked_storage
cb_client = mock.MagicMock()
self.mocks['client'].Client.return_value = cb_client
config = mock.MagicMock()
unused_plugins = mock.MagicMock()
res = main.unregister(config, unused_plugins)
m = "Could not find existing account to deactivate."
self.assertEqual(res, m)
self.assertFalse(cb_client.acme.deactivate_registration.called)
class MakeOrVerifyNeededDirs(test_util.ConfigTestCase):
"""Tests for certbot._internal.main.make_or_verify_needed_dirs."""
@mock.patch("certbot._internal.main.util")
def test_it(self, mock_util):
main.make_or_verify_needed_dirs(self.config)
for core_dir in (self.config.config_dir, self.config.work_dir,):
mock_util.set_up_core_dir.assert_any_call(
core_dir, constants.CONFIG_DIRS_MODE,
self.config.strict_permissions
)
hook_dirs = (self.config.renewal_pre_hooks_dir,
self.config.renewal_deploy_hooks_dir,
self.config.renewal_post_hooks_dir,)
for hook_dir in hook_dirs:
# default mode of 755 is used
mock_util.make_or_verify_dir.assert_any_call(
hook_dir, strict=self.config.strict_permissions)
class EnhanceTest(test_util.ConfigTestCase):
"""Tests for certbot._internal.main.enhance."""
def setUp(self):
super().setUp()
self.get_utility_patch = test_util.patch_get_utility()
self.mock_get_utility = self.get_utility_patch.start()
self.mockinstaller = mock.MagicMock(spec=enhancements.AutoHSTSEnhancement)
def tearDown(self):
self.get_utility_patch.stop()
def _call(self, args):
plugins = disco.PluginsRegistry.find_all()
config = configuration.NamespaceConfig(
cli.prepare_and_parse_args(plugins, args))
with mock.patch('certbot._internal.cert_manager.get_certnames') as mock_certs:
mock_certs.return_value = ['example.com']
with mock.patch('certbot._internal.cert_manager.domains_for_certname') as mock_dom:
mock_dom.return_value = ['example.com']
with mock.patch('certbot._internal.main._init_le_client') as mock_init:
mock_client = mock.MagicMock()
mock_client.config = config
mock_init.return_value = mock_client
main.enhance(config, plugins)
return mock_client # returns the client
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main._find_domains_or_certname')
def test_selection_question(self, mock_find, mock_choose, mock_lineage, _rec):
mock_lineage.return_value = mock.MagicMock(chain_path="/tmp/nonexistent")
mock_choose.return_value = ['example.com']
mock_find.return_value = (None, None)
with mock.patch('certbot._internal.main.plug_sel.pick_installer') as mock_pick:
self._call(['enhance', '--redirect'])
self.assertTrue(mock_pick.called)
# Check that the message includes "enhancements"
self.assertTrue("enhancements" in mock_pick.call_args[0][3])
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main._find_domains_or_certname')
def test_selection_auth_warning(self, mock_find, mock_choose, mock_lineage, _rec):
mock_lineage.return_value = mock.MagicMock(chain_path="/tmp/nonexistent")
mock_choose.return_value = ["example.com"]
mock_find.return_value = (None, None)
with mock.patch('certbot._internal.main.plug_sel.pick_installer'):
with mock.patch('certbot._internal.main.plug_sel.logger.warning') as mock_log:
mock_client = self._call(['enhance', '-a', 'webroot', '--redirect'])
self.assertTrue(mock_log.called)
self.assertTrue("make sense" in mock_log.call_args[0][0])
self.assertTrue(mock_client.enhance_config.called)
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
def test_enhance_config_call(self, _rec, mock_choose, mock_lineage):
mock_lineage.return_value = mock.MagicMock(chain_path="/tmp/nonexistent")
mock_choose.return_value = ["example.com"]
with mock.patch('certbot._internal.main.plug_sel.pick_installer'):
mock_client = self._call(['enhance', '--redirect', '--hsts'])
req_enh = ["redirect", "hsts"]
not_req_enh = ["uir"]
self.assertTrue(mock_client.enhance_config.called)
self.assertTrue(
all(getattr(mock_client.config, e) for e in req_enh))
self.assertFalse(
any(getattr(mock_client.config, e) for e in not_req_enh))
self.assertTrue(
"example.com" in mock_client.enhance_config.call_args[0][0])
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
def test_enhance_noninteractive(self, _rec, mock_choose, mock_lineage):
mock_lineage.return_value = mock.MagicMock(
chain_path="/tmp/nonexistent")
mock_choose.return_value = ["example.com"]
with mock.patch('certbot._internal.main.plug_sel.pick_installer'):
mock_client = self._call(['enhance', '--redirect',
'--hsts', '--non-interactive'])
self.assertTrue(mock_client.enhance_config.called)
self.assertFalse(mock_choose.called)
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
def test_user_abort_domains(self, _rec, mock_choose):
mock_choose.return_value = []
with mock.patch('certbot._internal.main.plug_sel.pick_installer'):
self.assertRaises(errors.Error,
self._call,
['enhance', '--redirect', '--hsts'])
def test_no_enhancements_defined(self):
self.assertRaises(errors.MisconfigurationError,
self._call, ['enhance', '-a', 'null'])
@mock.patch('certbot._internal.main.plug_sel.choose_configurator_plugins')
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
def test_plugin_selection_error(self, _rec, mock_choose, mock_pick):
mock_choose.return_value = ["example.com"]
mock_pick.return_value = (None, None)
mock_pick.side_effect = errors.PluginSelectionError()
mock_client = self._call(['enhance', '--hsts'])
self.assertFalse(mock_client.enhance_config.called)
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@test_util.patch_get_utility()
def test_enhancement_enable(self, _, _rec, mock_inst, mock_choose, mock_lineage):
mock_inst.return_value = self.mockinstaller
mock_choose.return_value = ["example.com", "another.tld"]
mock_lineage.return_value = mock.MagicMock(chain_path="/tmp/nonexistent")
self._call(['enhance', '--auto-hsts'])
self.assertTrue(self.mockinstaller.enable_autohsts.called)
self.assertEqual(self.mockinstaller.enable_autohsts.call_args[0][1],
["example.com", "another.tld"])
@mock.patch('certbot._internal.cert_manager.lineage_for_certname')
@mock.patch('certbot._internal.main.display_ops.choose_values')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@test_util.patch_get_utility()
def test_enhancement_enable_not_supported(self, _, _rec, mock_inst, mock_choose, mock_lineage):
mock_inst.return_value = null.Installer(self.config, "null")
mock_choose.return_value = ["example.com", "another.tld"]
mock_lineage.return_value = mock.MagicMock(chain_path="/tmp/nonexistent")
self.assertRaises(
errors.NotSupportedError,
self._call, ['enhance', '--auto-hsts'])
def test_enhancement_enable_conflict(self):
self.assertRaises(
errors.Error,
self._call, ['enhance', '--auto-hsts', '--hsts'])
class InstallTest(test_util.ConfigTestCase):
"""Tests for certbot._internal.main.install."""
def setUp(self):
super().setUp()
self.mockinstaller = mock.MagicMock(spec=enhancements.AutoHSTSEnhancement)
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
def test_install_enhancement_not_supported(self, mock_inst, _rec):
mock_inst.return_value = null.Installer(self.config, "null")
plugins = disco.PluginsRegistry.find_all()
self.config.auto_hsts = True
self.config.certname = "nonexistent"
self.assertRaises(errors.NotSupportedError,
main.install,
self.config, plugins)
@mock.patch('certbot._internal.main.plug_sel.record_chosen_plugins')
@mock.patch('certbot._internal.main.plug_sel.pick_installer')
def test_install_enhancement_no_certname(self, mock_inst, _rec):
mock_inst.return_value = self.mockinstaller
plugins = disco.PluginsRegistry.find_all()
self.config.auto_hsts = True
self.config.certname = None
self.config.key_path = "/tmp/nonexistent"
self.config.cert_path = "/tmp/nonexistent"
self.assertRaises(errors.ConfigurationError,
main.install,
self.config, plugins)
class UpdateAccountTest(test_util.ConfigTestCase):
"""Tests for certbot._internal.main.update_account"""
def setUp(self):
patches = {
'account': mock.patch('certbot._internal.main.account'),
'atexit': mock.patch('certbot.util.atexit'),
'client': mock.patch('certbot._internal.main.client'),
'determine_account': mock.patch('certbot._internal.main._determine_account'),
'notify': mock.patch('certbot._internal.main.display_util.notify'),
'prepare_sub': mock.patch('certbot._internal.eff.prepare_subscription'),
'util': test_util.patch_get_utility()
}
self.mocks = { k: patches[k].start() for k in patches }
for patch in patches.values():
self.addCleanup(patch.stop)
return super().setUp()
def _call(self, args):
with mock.patch('certbot._internal.main.sys.stdout'), \
mock.patch('certbot._internal.main.sys.stderr'):
args = ['--config-dir', self.config.config_dir,
'--work-dir', self.config.work_dir,
'--logs-dir', self.config.logs_dir, '--text'] + args
return main.main(args[:]) # NOTE: parser can alter its args!
def _prepare_mock_account(self):
mock_storage = mock.MagicMock()
mock_account = mock.MagicMock()
mock_regr = mock.MagicMock()
mock_storage.find_all.return_value = [mock_account]
self.mocks['account'].AccountFileStorage.return_value = mock_storage
mock_account.regr.body = mock_regr.body
self.mocks['determine_account'].return_value = (mock_account, mock.MagicMock())
return (mock_account, mock_storage, mock_regr)
def _test_update_no_contact(self, args):
"""Utility to assert that email removal is handled correctly"""
(_, mock_storage, mock_regr) = self._prepare_mock_account()
result = self._call(args)
# When update succeeds, the return value of update_account() is None
self.assertIsNone(result)
# We submitted a registration to the server
self.assertEqual(self.mocks['client'].Client().acme.update_registration.call_count, 1)
mock_regr.body.update.assert_called_with(contact=())
# We got an update from the server and persisted it
self.assertEqual(mock_storage.update_regr.call_count, 1)
# We should have notified the user
self.mocks['notify'].assert_called_with(
'Any contact information associated with this account has been removed.'
)
# We should not have called subscription because there's no email
self.mocks['prepare_sub'].assert_not_called()
def test_no_existing_accounts(self):
"""Test that no existing account is handled correctly"""
mock_storage = mock.MagicMock()
mock_storage.find_all.return_value = []
self.mocks['account'].AccountFileStorage.return_value = mock_storage
self.assertEqual(self._call(['update_account', '--email', 'user@example.org']),
'Could not find an existing account to update.')
def test_update_account_remove_email(self):
"""Test that --register-unsafely-without-email is handled as no email"""
self._test_update_no_contact(['update_account', '--register-unsafely-without-email'])
def test_update_account_empty_email(self):
"""Test that providing an empty email is handled as no email"""
self._test_update_no_contact(['update_account', '-m', ''])
@mock.patch('certbot._internal.main.display_ops.get_email')
def test_update_account_with_email(self, mock_email):
"""Test that updating with a singular email is handled correctly"""
mock_email.return_value = 'user@example.com'
(_, mock_storage, _) = self._prepare_mock_account()
mock_client = mock.MagicMock()
self.mocks['client'].Client.return_value = mock_client
result = self._call(['update_account'])
# None if registration succeeds
self.assertIsNone(result)
# We should have updated the server
self.assertEqual(mock_client.acme.update_registration.call_count, 1)
# We should have updated the account on disk
self.assertEqual(mock_storage.update_regr.call_count, 1)
# Subscription should have been prompted
self.assertEqual(self.mocks['prepare_sub'].call_count, 1)
# Should have printed the email
self.mocks['notify'].assert_called_with(
'Your e-mail address was updated to user@example.com.')
def test_update_account_with_multiple_emails(self):
"""Test that multiple email addresses are handled correctly"""
(_, mock_storage, mock_regr) = self._prepare_mock_account()
self.assertIsNone(
self._call(['update_account', '-m', 'user@example.com,user@example.org'])
)
mock_regr.body.update.assert_called_with(
contact=['mailto:user@example.com', 'mailto:user@example.org']
)
self.assertEqual(mock_storage.update_regr.call_count, 1)
self.mocks['notify'].assert_called_with(
'Your e-mail address was updated to user@example.com,user@example.org.')
if __name__ == '__main__':
unittest.main() # pragma: no cover
| certbot/tests/main_test.py | 90,467 | Tests for certbot._internal.main.certonly.
Tests for certbot._internal.main._delete_if_appropriate
Tests for certbot._internal.main._determine_account.
Tests for certbot._internal.main.enhance.
Tests for certbot._internal.main._find_domains_or_certname.
Tests for certbot._internal.main.install.
Tests for different commands.
Tests for certbot._internal.main.make_or_verify_needed_dirs.
Tests for certbot._internal.main.revoke.
Tests for certbot._internal.main.run.
Test for certbot._internal.main._handle_* methods
Tests for certbot._internal.main.update_account
Run the cli with output streams, actual client and optionally
os.path.isfile() mocked out
Run the client with output streams mocked out
Ensure that a particular error raises a missing cli flag error containing message
Utility to assert that email removal is handled correctly
Mock os.path.isfile()
Test that no existing account is handled correctly
Revoking with --server should use the server from the CLI
Revoking with --cert-name where the lineage server is empty shouldn't crash
Test that providing an empty email is handled as no email
Test that --register-unsafely-without-email is handled as no email
Test that updating with a singular email is handled correctly
Test that multiple email addresses are handled correctly
Raises error.Error.
Write message to stdout.
Tests for certbot._internal.main.
coding=utf-8 pylint: disable=too-many-lines pylint: disable=unused-import pragma: no cover pylint: disable=protected-access returns the client pylint: disable=unused-argument user confirms updating lineage with new domains error in _ask_user_to_confirm_new_names no lineage with this name but we specified domains so create a new cert no lineage with this name and we didn't give domains pylint: disable=protected-access pylint: disable=protected-access pylint: disable=protected-access required to reset set_by_cli state pylint: disable = unused-argument pylint: disable = unused-argument pylint: disable = unused-argument pylint: disable = unused-argument For use in saving accounts: fake out the new_authz URL. pylint: disable=protected-access Reset globals in cli pylint: disable=unused-argument NOTE: parser can alter its args! NOTE: parser can alter its args! Normally the client is totally mocked out, but here we need more arguments to automate it... This needed two calls to find_all(), which we're avoiding for now because of possible side effects: https://github.com/letsencrypt/letsencrypt/commit/51ed2b681f87b1eb29088dd48718a54f401e4855 with mock.patch('certbot._internal.cli.plugins_testable') as plugins: plugins.return_value = {"apache": True, "nginx": True} ret, _, _, _ = self._call(args) self.assertTrue("Too many flags setting" in ret) Sending nginx a non-existent conf dir will simulate misconfiguration (we can only do that if certbot-nginx is actually present) FQDN FQDN 2 Bare IP address (this is actually a different error message now) This is an error because you can only use --csr with certonly Asserts we don't suggest donating after a successful dry run pylint: disable=unused-argument pylint: disable=bare-except The location of the previous live privkey.pem is passed to obtain_certificate --dry-run should force renewal in main.py: sleep_time = random.randint(1, 60*8) leave the file empty pylint: disable=protected-accessself.assertTrue('donate' not in mock_get_utility().add_message.call_args[0][0]) TODO: It would be more correct to explicitly check that _determine_account() gets called in the above case, but coverage statistics should also show that it did. Make sure we're returning None, and hence not trying to run the without installer default mode of 755 is used returns the client Check that the message includes "enhancements" NOTE: parser can alter its args! When update succeeds, the return value of update_account() is None We submitted a registration to the server We got an update from the server and persisted it We should have notified the user We should not have called subscription because there's no email None if registration succeeds We should have updated the server We should have updated the account on disk Subscription should have been prompted Should have printed the email pragma: no cover | 4,253 | en | 0.711593 |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""conv"""
import numpy as np
from mindspore import log as logger
from mindspore.ops import operations as P
from mindspore.ops.primitive import constexpr
from mindspore.common.parameter import Parameter
from mindspore.common.initializer import initializer
from mindspore.common.tensor import Tensor
from mindspore._checkparam import ParamValidator as validator, Rel
from mindspore._checkparam import Validator
from mindspore._checkparam import check_bool, twice, check_int_positive
from mindspore._extends import cell_attr_register
from ..cell import Cell
__all__ = ['Conv2d', 'Conv2dTranspose', 'DepthwiseConv2d', 'Conv1d', 'Conv1dTranspose']
class _Conv(Cell):
"""
Applies a N-D convolution over an input signal composed of several input planes.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
pad_mode,
padding,
dilation,
group,
has_bias,
weight_init,
bias_init,
transposed=False):
super(_Conv, self).__init__()
self.in_channels = check_int_positive(in_channels)
self.out_channels = check_int_positive(out_channels)
self.kernel_size = kernel_size
self.stride = stride
self.pad_mode = pad_mode
self.weight_init = weight_init
self.bias_init = bias_init
if isinstance(padding, int):
Validator.check_integer('padding', padding, 0, Rel.GE, self.cls_name)
self.padding = padding
elif isinstance(padding, tuple):
for pad in padding:
Validator.check_integer('padding item', pad, 0, Rel.GE, self.cls_name)
self.padding = padding
else:
raise TypeError("padding type must be int/tuple(int) cannot be {}!".format(type(padding)))
self.dilation = dilation
self.group = check_int_positive(group)
self.has_bias = has_bias
if (not isinstance(kernel_size[0], int)) or (not isinstance(kernel_size[1], int)) or \
isinstance(kernel_size[0], bool) or isinstance(kernel_size[1], bool) or \
kernel_size[0] < 1 or kernel_size[1] < 1:
raise ValueError("Attr 'kernel_size' of 'Conv2D' Op passed "
+ str(self.kernel_size) + ", should be a int or tuple and equal to or greater than 1.")
if (not isinstance(stride[0], int)) or (not isinstance(stride[1], int)) or \
isinstance(stride[0], bool) or isinstance(stride[1], bool) or stride[0] < 1 or stride[1] < 1:
raise ValueError("Attr 'stride' of 'Conv2D' Op passed "
+ str(self.stride) + ", should be a int or tuple and equal to or greater than 1.")
if (not isinstance(dilation[0], int)) or (not isinstance(dilation[1], int)) or \
isinstance(dilation[0], bool) or isinstance(dilation[1], bool) or dilation[0] < 1 or dilation[1] < 1:
raise ValueError("Attr 'dilation' of 'Conv2D' Op passed "
+ str(self.dilation) + ", should be a int or tuple and equal to or greater than 1.")
if in_channels % group != 0:
raise ValueError("Attr 'in_channels' of 'Conv2D' Op must be divisible by "
"attr 'group' of 'Conv2D' Op.")
if out_channels % group != 0:
raise ValueError("Attr 'out_channels' of 'Conv2D' Op must be divisible by "
"attr 'group' of 'Conv2D' Op.")
if transposed:
shape = [in_channels, out_channels // group, *kernel_size]
else:
shape = [out_channels, in_channels // group, *kernel_size]
self.weight = Parameter(initializer(self.weight_init, shape), name='weight')
if check_bool(has_bias):
self.bias = Parameter(initializer(self.bias_init, [out_channels]), name='bias')
else:
if self.bias_init != 'zeros':
logger.warning("Value of 'has_bias' is False, value of 'bias_init' will be ignored.")
self.bias = None
def construct(self, *inputs):
"""Must be overridden by all subclasses."""
raise NotImplementedError
class Conv2d(_Conv):
r"""
2D convolution layer.
Applies a 2D convolution over an input tensor which is typically of shape :math:`(N, C_{in}, H_{in}, W_{in})`,
where :math:`N` is batch size, :math:`C_{in}` is channel number, and :math:`H_{in}, W_{in})` are height and width.
For each batch of shape :math:`(C_{in}, H_{in}, W_{in})`, the formula is defined as:
.. math::
out_j = \sum_{i=0}^{C_{in} - 1} ccor(W_{ij}, X_i) + b_j,
where :math:`ccor` is the cross-correlation operator, :math:`C_{in}` is the input channel number, :math:`j` ranges
from :math:`0` to :math:`C_{out} - 1`, :math:`W_{ij}` corresponds to the :math:`i`-th channel of the :math:`j`-th
filter and :math:`out_{j}` corresponds to the :math:`j`-th channel of the output. :math:`W_{ij}` is a slice
of kernel and it has shape :math:`(\text{ks_h}, \text{ks_w})`, where :math:`\text{ks_h}` and
:math:`\text{ks_w}` are the height and width of the convolution kernel. The full kernel has shape
:math:`(C_{out}, C_{in} // \text{group}, \text{ks_h}, \text{ks_w})`, where group is the group number
to split the input in the channel dimension.
If the 'pad_mode' is set to be "valid", the output height and width will be
:math:`\left \lfloor{1 + \frac{H_{in} + 2 \times \text{padding} - \text{ks_h} -
(\text{ks_h} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` and
:math:`\left \lfloor{1 + \frac{W_{in} + 2 \times \text{padding} - \text{ks_w} -
(\text{ks_w} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` respectively.
The first introduction can be found in paper `Gradient Based Learning Applied to Document Recognition
<http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
Args:
in_channels (int): The number of input channel :math:`C_{in}`.
out_channels (int): The number of output channel :math:`C_{out}`.
kernel_size (Union[int, tuple[int]]): The data type is int or a tuple of 2 integers. Specifies the height
and width of the 2D convolution window. Single int means the value is for both the height and the width of
the kernel. A tuple of 2 ints means the first value is for the height and the other is for the
width of the kernel.
stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
pad_mode (str): Specifies padding mode. The optional values are
"same", "valid", "pad". Default: "same".
- same: Adopts the way of completion. The height and width of the output will be the same as
the input. The total number of padding will be calculated in horizontal and vertical
directions and evenly distributed to top and bottom, left and right if possible. Otherwise, the
last extra padding will be done from the bottom and the right side. If this mode is set, `padding`
must be 0.
- valid: Adopts the way of discarding. The possible largest height and width of output will be returned
without padding. Extra pixels will be discarded. If this mode is set, `padding`
must be 0.
- pad: Implicit paddings on both sides of the input. The number of `padding` will be padded to the input
Tensor borders. `padding` should be greater than or equal to 0.
padding (Union[int, tuple[int]]): Implicit paddings on both sides of the input. If `padding` is one integer,
the paddings of top, bottom, left and right are the same, equal to padding. If `padding` is a tuple
with four integers, the paddings of top, bottom, left and right will be equal to padding[0],
padding[1], padding[2], and padding[3] accordingly. Default: 0.
dilation (Union[int, tuple[int]]): The data type is int or a tuple of 2 integers. Specifies the dilation rate
to use for dilated convolution. If set to be :math:`k > 1`, there will
be :math:`k - 1` pixels skipped for each sampling location. Its value should
be greater or equal to 1 and bounded by the height and width of the
input. Default: 1.
group (int): Split filter into groups, `in_ channels` and `out_channels` should be
divisible by the number of groups. Default: 1.
has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the convolution kernel.
It can be a Tensor, a string, an Initializer or a number. When a string is specified,
values from 'TruncatedNormal', 'Normal', 'Uniform', 'HeUniform' and 'XavierUniform' distributions as well
as constant 'One' and 'Zero' distributions are possible. Alias 'xavier_uniform', 'he_uniform', 'ones'
and 'zeros' are acceptable. Uppercase and lowercase are both acceptable. Refer to the values of
Initializer for more details. Default: 'normal'.
bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the bias vector. Possible
Initializer and string are the same as 'weight_init'. Refer to the values of
Initializer for more details. Default: 'zeros'.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
Outputs:
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples:
>>> net = nn.Conv2d(120, 240, 4, has_bias=False, weight_init='normal')
>>> input = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32)
>>> net(input).shape
(1, 240, 1024, 640)
"""
@cell_attr_register
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
pad_mode='same',
padding=0,
dilation=1,
group=1,
has_bias=False,
weight_init='normal',
bias_init='zeros'):
kernel_size = twice(kernel_size)
stride = twice(stride)
dilation = twice(dilation)
super(Conv2d, self).__init__(
in_channels,
out_channels,
kernel_size,
stride,
pad_mode,
padding,
dilation,
group,
has_bias,
weight_init,
bias_init)
self.conv2d = P.Conv2D(out_channel=self.out_channels,
kernel_size=self.kernel_size,
mode=1,
pad_mode=self.pad_mode,
pad=self.padding,
stride=self.stride,
dilation=self.dilation,
group=self.group)
self.bias_add = P.BiasAdd()
if pad_mode not in ('valid', 'same', 'pad'):
raise ValueError('Attr \'pad_mode\' of \'Conv2d\' Op passed '
+ str(pad_mode) + ', should be one of values in \'valid\', \'same\', \'pad\'.')
def construct(self, x):
output = self.conv2d(x, self.weight)
if self.has_bias:
output = self.bias_add(output, self.bias)
return output
def extend_repr(self):
s = 'input_channels={}, output_channels={}, kernel_size={},' \
'stride={}, pad_mode={}, padding={}, dilation={}, ' \
'group={}, has_bias={},' \
'weight_init={}, bias_init={}'.format(
self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
self.pad_mode,
self.padding,
self.dilation,
self.group,
self.has_bias,
self.weight_init,
self.bias_init)
return s
@constexpr
def _check_input_3d(input_shape):
if len(input_shape) != 3:
raise ValueError(f"Input should be 3d, but got shape {input_shape}")
class Conv1d(_Conv):
r"""
1D convolution layer.
Applies a 1D convolution over an input tensor which is typically of shape :math:`(N, C_{in}, W_{in})`,
where :math:`N` is batch size and :math:`C_{in}` is channel number. For each batch of shape
:math:`(C_{in}, W_{in})`, the formula is defined as:
.. math::
out_j = \sum_{i=0}^{C_{in} - 1} ccor(W_{ij}, X_i) + b_j,
where :math:`ccor` is the cross correlation operator, :math:`C_{in}` is the input channel number, :math:`j` ranges
from :math:`0` to :math:`C_{out} - 1`, :math:`W_{ij}` corresponds to the :math:`i`-th channel of the :math:`j`-th
filter and :math:`out_{j}` corresponds to the :math:`j`-th channel of the output. :math:`W_{ij}` is a slice
of kernel and it has shape :math:`(\text{ks_w})`, where :math:`\text{ks_w}` is the width of the convolution kernel.
The full kernel has shape :math:`(C_{out}, C_{in} // \text{group}, \text{ks_w})`, where group is the group number
to split the input in the channel dimension.
If the 'pad_mode' is set to be "valid", the output width will be
:math:`\left \lfloor{1 + \frac{W_{in} + 2 \times \text{padding} - \text{ks_w} -
(\text{ks_w} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` respectively.
The first introduction of convolution layer can be found in paper `Gradient Based Learning Applied to Document
Recognition <http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
Args:
in_channels (int): The number of input channel :math:`C_{in}`.
out_channels (int): The number of output channel :math:`C_{out}`.
kernel_size (int): The data type is int. Specifies the
width of the 1D convolution window.
stride (int): The distance of kernel moving, an int number that represents
the width of movement. Default: 1.
pad_mode (str): Specifies padding mode. The optional values are
"same", "valid", "pad". Default: "same".
- same: Adopts the way of completion. The output width will be the same as the input.
The total number of padding will be calculated in the horizontal
direction and evenly distributed to left and right if possible. Otherwise, the
last extra padding will be done from the bottom and the right side. If this mode is set, `padding`
must be 0.
- valid: Adopts the way of discarding. The possible largest width of the output will be returned
without padding. Extra pixels will be discarded. If this mode is set, `padding`
must be 0.
- pad: Implicit paddings on both sides of the input. The number of `padding` will be padded to the input
Tensor borders. `padding` should be greater than or equal to 0.
padding (int): Implicit paddings on both sides of the input. Default: 0.
dilation (int): The data type is int. Specifies the dilation rate
to use for dilated convolution. If set to be :math:`k > 1`, there will
be :math:`k - 1` pixels skipped for each sampling location. Its value should
be greater or equal to 1 and bounded by the height and width of the
input. Default: 1.
group (int): Split filter into groups, `in_ channels` and `out_channels` should be
divisible by the number of groups. Default: 1.
has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
weight_init (Union[Tensor, str, Initializer, numbers.Number]): An initializer for the convolution kernel.
It can be a Tensor, a string, an Initializer or a number. When a string is specified,
values from 'TruncatedNormal', 'Normal', 'Uniform', 'HeUniform' and 'XavierUniform' distributions as well
as constant 'One' and 'Zero' distributions are possible. Alias 'xavier_uniform', 'he_uniform', 'ones'
and 'zeros' are acceptable. Uppercase and lowercase are both acceptable. Refer to the values of
Initializer for more details. Default: 'normal'.
bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the bias vector. Possible
Initializer and string are the same as 'weight_init'. Refer to the values of
Initializer for more details. Default: 'zeros'.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, W_{in})`.
Outputs:
Tensor of shape :math:`(N, C_{out}, W_{out})`.
Examples:
>>> net = nn.Conv1d(120, 240, 4, has_bias=False, weight_init='normal')
>>> input = Tensor(np.ones([1, 120, 640]), mindspore.float32)
>>> net(input).shape
(1, 240, 640)
"""
@cell_attr_register
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
pad_mode='same',
padding=0,
dilation=1,
group=1,
has_bias=False,
weight_init='normal',
bias_init='zeros'):
Validator.check_value_type("kernel_size", kernel_size, [int], self.cls_name)
Validator.check_value_type("stride", stride, [int], self.cls_name)
Validator.check_value_type("padding", padding, [int], self.cls_name)
Validator.check_value_type("dilation", dilation, [int], self.cls_name)
Validator.check_integer('kernel_size', kernel_size, 1, Rel.GE, self.cls_name)
Validator.check_integer('stride', stride, 1, Rel.GE, self.cls_name)
Validator.check_integer('padding', padding, 0, Rel.GE, self.cls_name)
Validator.check_integer('dilation', dilation, 1, Rel.GE, self.cls_name)
kernel_size = (1, kernel_size)
stride = (1, stride)
dilation = (1, dilation)
get_shape = P.Shape()
get_dtype = P.DType()
if isinstance(weight_init, Tensor):
weight_init_shape = get_shape(weight_init)
Validator.check_integer('weight_init_shape', len(weight_init_shape), 3, Rel.EQ, self.cls_name)
weight_init_dtype = get_dtype(weight_init)
weight_init_value = weight_init.asnumpy()
weight_init_value = np.expand_dims(weight_init_value, 2)
weight_init = Tensor(weight_init_value, weight_init_dtype)
super(Conv1d, self).__init__(
in_channels,
out_channels,
kernel_size,
stride,
pad_mode,
padding,
dilation,
group,
has_bias,
weight_init,
bias_init)
self.padding = (0, 0, padding, padding)
self.conv2d = P.Conv2D(out_channel=self.out_channels,
kernel_size=self.kernel_size,
mode=1,
pad_mode=self.pad_mode,
pad=self.padding,
stride=self.stride,
dilation=self.dilation,
group=self.group)
self.bias_add = P.BiasAdd()
if pad_mode not in ('valid', 'same', 'pad'):
raise ValueError('Attr \'pad_mode\' of \'Conv1d\' Op passed '
+ str(pad_mode) + ', should be one of values in \'valid\', \'same\', \'pad\'.')
self.expand_dims = P.ExpandDims()
self.squeeze = P.Squeeze(2)
self.shape = P.Shape()
def construct(self, x):
x_shape = self.shape(x)
_check_input_3d(x_shape)
x = self.expand_dims(x, 2)
output = self.conv2d(x, self.weight)
if self.has_bias:
output = self.bias_add(output, self.bias)
output = self.squeeze(output)
return output
def extend_repr(self):
s = 'input_channels={}, output_channels={}, kernel_size={},' \
'stride={}, pad_mode={}, padding={}, dilation={}, ' \
'group={}, has_bias={},' \
'weight_init={}, bias_init={}'.format(
self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
self.pad_mode,
self.padding,
self.dilation,
self.group,
self.has_bias,
self.weight_init,
self.bias_init)
return s
class Conv2dTranspose(_Conv):
r"""
2D transposed convolution layer.
Compute a 2D transposed convolution, which is also known as a deconvolution
(although it is not an actual deconvolution).
Input is typically of shape :math:`(N, C, H, W)`, where :math:`N` is batch size and :math:`C` is channel number.
Args:
in_channels (int): The number of channels in the input space.
out_channels (int): The number of channels in the output space.
kernel_size (Union[int, tuple]): int or a tuple of 2 integers, which specifies the height
and width of the 2D convolution window. Single int means the value is for both the height and the width of
the kernel. A tuple of 2 ints means the first value is for the height and the other is for the
width of the kernel.
stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Its value should be equal to or greater than 1.
Default: 1.
pad_mode (str): Select the mode of the pad. The optional values are
"pad", "same", "valid". Default: "same".
- pad: Implicit paddings on both sides of the input.
- same: Adopted the way of completion.
- valid: Adopted the way of discarding.
padding (Union[int, tuple[int]]): Implicit paddings on both sides of the input. If `padding` is one integer,
the paddings of top, bottom, left and right are the same, equal to padding. If `padding` is a tuple
with four integers, the paddings of top, bottom, left and right will be equal to padding[0],
padding[1], padding[2], and padding[3] accordingly. Default: 0.
dilation (Union[int, tuple[int]]): The data type is int or a tuple of 2 integers. Specifies the dilation rate
to use for dilated convolution. If set to be :math:`k > 1`, there will
be :math:`k - 1` pixels skipped for each sampling location. Its value should
be greater than or equal to 1 and bounded by the height and width of the
input. Default: 1.
group (int): Split filter into groups, `in_channels` and `out_channels` should be
divisible by the number of groups. This does not support for Davinci devices when group > 1. Default: 1.
has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the convolution kernel.
It can be a Tensor, a string, an Initializer or a number. When a string is specified,
values from 'TruncatedNormal', 'Normal', 'Uniform', 'HeUniform' and 'XavierUniform' distributions as well
as constant 'One' and 'Zero' distributions are possible. Alias 'xavier_uniform', 'he_uniform', 'ones'
and 'zeros' are acceptable. Uppercase and lowercase are both acceptable. Refer to the values of
Initializer for more details. Default: 'normal'.
bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the bias vector. Possible
Initializer and string are the same as 'weight_init'. Refer to the values of
Initializer for more details. Default: 'zeros'.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
Outputs:
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples:
>>> net = nn.Conv2dTranspose(3, 64, 4, has_bias=False, weight_init='normal')
>>> input = Tensor(np.ones([1, 3, 16, 50]), mindspore.float32)
>>> net(input)
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
pad_mode='same',
padding=0,
dilation=1,
group=1,
has_bias=False,
weight_init='normal',
bias_init='zeros'):
kernel_size = twice(kernel_size)
stride = twice(stride)
dilation = twice(dilation)
Validator.check_value_type('padding', padding, (int, tuple), self.cls_name)
if isinstance(padding, tuple):
Validator.check_integer('padding size', len(padding), 4, Rel.EQ, self.cls_name)
# out_channels and in_channels swap.
# cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel,
# then Conv2dTranspose's out_channel refers to Conv2DBackpropInput's in_channel.
super(Conv2dTranspose, self).__init__(
in_channels,
out_channels,
kernel_size,
stride,
pad_mode,
padding,
dilation,
group,
has_bias,
weight_init,
bias_init,
transposed=True)
self.in_channels = in_channels
self.out_channels = out_channels
self.shape = P.Shape()
if pad_mode not in ('valid', 'same', 'pad'):
raise ValueError('Attr \'pad_mode\' of \'Conv2dTranspose\' Op passed '
+ str(pad_mode) + ', should be one of values in \'valid\', \'same\', \'pad\'.')
self.is_valid = self.pad_mode == 'valid'
self.is_same = self.pad_mode == 'same'
self.is_pad = self.pad_mode == 'pad'
if check_bool(has_bias):
self.bias = Parameter(initializer(bias_init, [out_channels]), name='bias')
# cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel.
self.conv2d_transpose = P.Conv2DBackpropInput(out_channel=in_channels,
kernel_size=kernel_size,
mode=1,
pad_mode=pad_mode,
pad=padding,
stride=stride,
dilation=dilation,
group=group)
self.bias_add = P.BiasAdd()
if isinstance(self.padding, int):
self.padding_top, self.padding_bottom, self.padding_left, self.padding_right = (self.padding,) * 4
else:
self.padding_top, self.padding_bottom, self.padding_left, self.padding_right = self.padding
def set_strategy(self, strategy):
self.conv2d_transpose.set_strategy(strategy)
return self
def _deconv_output_length(self, input_length, filter_size, stride_size, dilation_size, padding):
"""Calculate the width and height of output."""
length = 0
filter_size = filter_size + (filter_size - 1) * (dilation_size - 1)
if self.is_valid:
if filter_size - stride_size > 0:
length = input_length * stride_size + filter_size - stride_size
else:
length = input_length * stride_size
elif self.is_same:
length = input_length * stride_size
elif self.is_pad:
length = input_length * stride_size - padding + filter_size - stride_size
return length
def construct(self, x):
n, _, h, w = self.shape(x)
h_out = self._deconv_output_length(h, self.kernel_size[0], self.stride[0], self.dilation[0],
self.padding_top + self.padding_bottom)
w_out = self._deconv_output_length(w, self.kernel_size[1], self.stride[1], self.dilation[1],
self.padding_left + self.padding_right)
if self.has_bias:
return self.bias_add(self.conv2d_transpose(x, self.weight, (n, self.out_channels, h_out, w_out)),
self.bias)
return self.conv2d_transpose(x, self.weight, (n, self.out_channels, h_out, w_out))
def extend_repr(self):
s = 'input_channels={}, output_channels={}, kernel_size={},' \
'stride={}, pad_mode={}, padding={}, dilation={}, ' \
'group={}, has_bias={},' \
'weight_init={}, bias_init={}'.format(self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
self.pad_mode,
self.padding,
self.dilation,
self.group,
self.has_bias,
self.weight_init,
self.bias_init)
return s
class Conv1dTranspose(_Conv):
r"""
1D transposed convolution layer.
Compute a 1D transposed convolution, which is also known as a deconvolution
(although it is not an actual deconvolution).
Input is typically of shape :math:`(N, C, W)`, where :math:`N` is batch size and :math:`C` is channel number.
Args:
in_channels (int): The number of channels in the input space.
out_channels (int): The number of channels in the output space.
kernel_size (int): int, which specifies the width of the 1D convolution window.
stride (int): The distance of kernel moving, an int number that represents
the width of movement. Default: 1.
pad_mode (str): Select the mode of the pad. The optional values are
"pad", "same", "valid". Default: "same".
- pad: Implicit paddings on both sides of the input.
- same: Adopted the way of completion.
- valid: Adopted the way of discarding.
padding (int): Implicit paddings on both sides of the input. Default: 0.
dilation (int): The data type is int. Specifies the dilation rate
to use for dilated convolution. If set to be :math:`k > 1`, there will
be :math:`k - 1` pixels skipped for each sampling location. Its value should
be greater or equal to 1 and bounded by the width of the
input. Default: 1.
group (int): Split filter into groups, `in_channels` and `out_channels` should be
divisible by the number of groups. This is not support for Davinci devices when group > 1. Default: 1.
has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the convolution kernel.
It can be a Tensor, a string, an Initializer or a numbers.Number. When a string is specified,
values from 'TruncatedNormal', 'Normal', 'Uniform', 'HeUniform' and 'XavierUniform' distributions as well
as constant 'One' and 'Zero' distributions are possible. Alias 'xavier_uniform', 'he_uniform', 'ones'
and 'zeros' are acceptable. Uppercase and lowercase are both acceptable. Refer to the values of
Initializer for more details. Default: 'normal'.
bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the bias vector. Possible
Initializer and string are the same as 'weight_init'. Refer to the values of
Initializer for more details. Default: 'zeros'.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, W_{in})`.
Outputs:
Tensor of shape :math:`(N, C_{out}, W_{out})`.
Examples:
>>> net = nn.Conv1dTranspose(3, 64, 4, has_bias=False, weight_init='normal')
>>> input = Tensor(np.ones([1, 3, 50]), mindspore.float32)
>>> net(input)
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
pad_mode='same',
padding=0,
dilation=1,
group=1,
has_bias=False,
weight_init='normal',
bias_init='zeros'):
Validator.check_value_type("kernel_size", kernel_size, [int], self.cls_name)
Validator.check_value_type("stride", stride, [int], self.cls_name)
Validator.check_value_type("padding", padding, [int], self.cls_name)
Validator.check_value_type("dilation", dilation, [int], self.cls_name)
Validator.check_integer('kernel_size', kernel_size, 1, Rel.GE, self.cls_name)
Validator.check_integer('stride', stride, 1, Rel.GE, self.cls_name)
Validator.check_integer('padding', padding, 0, Rel.GE, self.cls_name)
Validator.check_integer('dilation', dilation, 1, Rel.GE, self.cls_name)
kernel_size = (1, kernel_size)
stride = (1, stride)
dilation = (1, dilation)
get_shape = P.Shape()
get_dtype = P.DType()
if isinstance(weight_init, Tensor):
weight_init_shape = get_shape(weight_init)
Validator.check_integer('weight_init_shape', len(weight_init_shape), 3, Rel.EQ, self.cls_name)
weight_init_dtype = get_dtype(weight_init)
weight_init_value = weight_init.asnumpy()
weight_init_value = np.expand_dims(weight_init_value, 2)
weight_init = Tensor(weight_init_value, weight_init_dtype)
# out_channels and in_channels swap.
# cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel,
# then Conv1dTranspose's out_channel refers to Conv2DBackpropInput's in_channel.
super(Conv1dTranspose, self).__init__(
in_channels,
out_channels,
kernel_size,
stride,
pad_mode,
padding,
dilation,
group,
has_bias,
weight_init,
bias_init,
transposed=True)
self.padding = (0, 0, padding, padding)
self.in_channels = in_channels
self.out_channels = out_channels
self.shape = P.Shape()
if pad_mode not in ('valid', 'same', 'pad'):
raise ValueError('Attr \'pad_mode\' of \'Conv1dTranspose\' Op passed '
+ str(pad_mode) + ', should be one of values in \'valid\', \'same\', \'pad\'.')
self.is_valid = self.pad_mode == 'valid'
self.is_same = self.pad_mode == 'same'
self.is_pad = self.pad_mode == 'pad'
if check_bool(has_bias):
self.bias = Parameter(initializer(bias_init, [out_channels]), name='bias')
# cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel.
self.conv2d_transpose = P.Conv2DBackpropInput(out_channel=in_channels,
kernel_size=kernel_size,
mode=1,
pad_mode=pad_mode,
pad=self.padding,
stride=stride,
dilation=dilation,
group=group)
self.bias_add = P.BiasAdd()
self.expand_dims = P.ExpandDims()
self.squeeze = P.Squeeze(2)
def set_strategy(self, strategy):
self.conv2d_transpose.set_strategy(strategy)
return self
def _deconv_output_length(self, input_length, filter_size, stride_size, dilation_size, padding):
"""Calculate the width and height of output."""
length = 0
filter_size = filter_size + (filter_size - 1) * (dilation_size - 1)
if self.is_valid:
if filter_size - stride_size > 0:
length = input_length * stride_size + filter_size - stride_size
else:
length = input_length * stride_size
elif self.is_same:
length = input_length * stride_size
elif self.is_pad:
length = input_length * stride_size - padding + filter_size - stride_size
return length
def construct(self, x):
x_shape = self.shape(x)
_check_input_3d(x_shape)
x = self.expand_dims(x, 2)
n, _, h, w = self.shape(x)
h_out = self._deconv_output_length(h, self.kernel_size[0], self.stride[0], self.dilation[0],
self.padding[0] + self.padding[1])
w_out = self._deconv_output_length(w, self.kernel_size[1], self.stride[1], self.dilation[1],
self.padding[2] + self.padding[3])
output = self.conv2d_transpose(x, self.weight, (n, self.out_channels, h_out, w_out))
if self.has_bias:
output = self.bias_add(output, self.bias)
output = self.squeeze(output)
return output
def extend_repr(self):
s = 'input_channels={}, output_channels={}, kernel_size={},' \
'stride={}, pad_mode={}, padding={}, dilation={}, ' \
'group={}, has_bias={},' \
'weight_init={}, bias_init={}'.format(self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
self.pad_mode,
self.padding,
self.dilation,
self.group,
self.has_bias,
self.weight_init,
self.bias_init)
return s
class DepthwiseConv2d(Cell):
r"""
2D depthwise convolution layer.
Applies a 2D depthwise convolution over an input tensor which is typically of shape:
math:`(N, C_{in}, H_{in}, W_{in})`, where :math:`N` is batch size and :math:`C_{in}` is channel number.
For each batch of shape:math:`(C_{in}, H_{in}, W_{in})`, the formula is defined as:
.. math::
out_j = \sum_{i=0}^{C_{in} - 1} ccor(W_{ij}, X_i) + b_j,
where :math:`ccor` is the cross correlation operator, :math:`C_{in}` is the input channel number, :math:`j` ranges
from :math:`0` to :math:`C_{out} - 1`, :math:`W_{ij}` corresponds to the :math:`i`-th channel of the :math:`j`-th
filter and :math:`out_{j}` corresponds to the :math:`j`-th channel of the output. :math:`W_{ij}` is a slice
of kernel and it has shape :math:`(\text{ks_h}, \text{ks_w})`, where :math:`\text{ks_h}` and
:math:`\text{ks_w}` are the height and width of the convolution kernel. The full kernel has shape
:math:`(C_{out}, C_{in} // \text{group}, \text{ks_h}, \text{ks_w})`, where group is the group number
to split the input in the channel dimension.
If the 'pad_mode' is set to be "valid", the output height and width will be
:math:`\left \lfloor{1 + \frac{H_{in} + 2 \times \text{padding} - \text{ks_h} -
(\text{ks_h} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` and
:math:`\left \lfloor{1 + \frac{W_{in} + 2 \times \text{padding} - \text{ks_w} -
(\text{ks_w} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` respectively.
The first introduction can be found in paper `Gradient Based Learning Applied to Document Recognition
<http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
Args:
in_channels (int): The number of input channel :math:`C_{in}`.
out_channels (int): The number of output channel :math:`C_{out}`.
kernel_size (Union[int, tuple[int]]): The data type is int or a tuple of 2 integers. Specifies the height
and width of the 2D convolution window. Single int means the value is for both the height and the width of
the kernel. A tuple of 2 ints means the first value is for the height and the other is for the
width of the kernel.
stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
pad_mode (str): Specifies padding mode. The optional values are
"same", "valid", "pad". Default: "same".
- same: Adopts the way of completion. The height and width of the output will be the same as
the input. The total number of padding will be calculated in horizontal and vertical
directions and evenly distributed to top and bottom, left and right if possible. Otherwise, the
last extra padding will be done from the bottom and the right side. If this mode is set, `padding`
must be 0.
- valid: Adopts the way of discarding. The possible largest height and width of output will be returned
without padding. Extra pixels will be discarded. If this mode is set, `padding`
must be 0.
- pad: Implicit paddings on both sides of the input. The number of `padding` will be padded to the input
Tensor borders. `padding` should be greater than or equal to 0.
padding (Union[int, tuple[int]]): Implicit paddings on both sides of the input. If `padding` is one integer,
the paddings of top, bottom, left and right are the same, equal to padding. If `padding` is a tuple
with four integers, the paddings of top, bottom, left and right will be equal to padding[0],
padding[1], padding[2], and padding[3] accordingly. Default: 0.
dilation (Union[int, tuple[int]]): The data type is int or a tuple of 2 integers. Specifies the dilation rate
to use for dilated convolution. If set to be :math:`k > 1`, there will
be :math:`k - 1` pixels skipped for each sampling location. Its value should
be greater than or equal to 1 and bounded by the height and width of the
input. Default: 1.
group (int): Split filter into groups, `in_ channels` and `out_channels` should be
divisible by the number of groups. If 'group' is None, it will be set as the value of 'in_channels'
has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the convolution kernel.
It can be a Tensor, a string, an Initializer or a number. When a string is specified,
values from 'TruncatedNormal', 'Normal', 'Uniform', 'HeUniform' and 'XavierUniform' distributions as well
as constant 'One' and 'Zero' distributions are possible. Alias 'xavier_uniform', 'he_uniform', 'ones'
and 'zeros' are acceptable. Uppercase and lowercase are both acceptable. Refer to the values of
Initializer for more details. Default: 'normal'.
bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the bias vector. Possible
Initializer and string are the same as 'weight_init'. Refer to the values of
Initializer for more details. Default: 'zeros'.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
Outputs:
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples:
>>> net = nn.DepthwiseConv2d(120, 240, 4, has_bias=False, weight_init='normal')
>>> input = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32)
>>> net(input).shape
(1, 240, 1024, 640)
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
group,
stride=1,
pad_mode='same',
padding=0,
dilation=1,
has_bias=False,
weight_init='normal',
bias_init='zeros'):
super(DepthwiseConv2d, self).__init__()
self.kernel_size = twice(kernel_size)
self.stride = twice(stride)
self.dilation = twice(dilation)
self.in_channels = check_int_positive(in_channels)
self.out_channels = check_int_positive(out_channels)
if group is None:
group = in_channels
validator.check_integer('group', group, in_channels, Rel.EQ)
validator.check_integer('group', group, out_channels, Rel.EQ)
validator.check_integer('group', group, 1, Rel.GE)
self.pad_mode = pad_mode
self.dilation = dilation
self.group = group
self.has_bias = has_bias
self.weight_init = weight_init
self.bias_init = bias_init
Validator.check_value_type('padding', padding, (int, tuple), self.cls_name)
if isinstance(padding, tuple):
Validator.check_integer('padding size', len(padding), 4, Rel.EQ, self.cls_name)
self.padding = padding
self.conv = P.DepthwiseConv2dNative(channel_multiplier=1,
kernel_size=self.kernel_size,
pad_mode=self.pad_mode,
pad=self.padding,
stride=self.stride,
dilation=self.dilation)
self.bias_add = P.BiasAdd()
weight_shape = [1, in_channels, *self.kernel_size]
self.weight = Parameter(initializer(weight_init, weight_shape), name='weight')
if check_bool(has_bias):
self.bias = Parameter(initializer(bias_init, [out_channels]), name='bias')
else:
if bias_init != 'zeros':
logger.warning("value of `has_bias` is False, value of `bias_init` will be ignore.")
self.bias = None
def construct(self, x):
out = self.conv(x, self.weight)
if self.has_bias:
out = self.bias_add(out, self.bias)
return out
def extend_repr(self):
s = 'input_channels={}, output_channels={}, kernel_size={}, stride={}, ' \
'pad_mode={}, padding={}, dilation={}, group={}, ' \
'has_bias={}, weight_init={}, bias_init={}'.format(
self.in_channels, self.out_channels, self.kernel_size, self.stride,
self.pad_mode, self.padding, self.dilation, self.group,
self.has_bias, self.weight_init, self.bias_init)
if self.has_bias:
s += ', bias={}'.format(self.bias)
return s
| mindspore/nn/layer/conv.py | 49,477 | 1D convolution layer.
Applies a 1D convolution over an input tensor which is typically of shape :math:`(N, C_{in}, W_{in})`,
where :math:`N` is batch size and :math:`C_{in}` is channel number. For each batch of shape
:math:`(C_{in}, W_{in})`, the formula is defined as:
.. math::
out_j = \sum_{i=0}^{C_{in} - 1} ccor(W_{ij}, X_i) + b_j,
where :math:`ccor` is the cross correlation operator, :math:`C_{in}` is the input channel number, :math:`j` ranges
from :math:`0` to :math:`C_{out} - 1`, :math:`W_{ij}` corresponds to the :math:`i`-th channel of the :math:`j`-th
filter and :math:`out_{j}` corresponds to the :math:`j`-th channel of the output. :math:`W_{ij}` is a slice
of kernel and it has shape :math:`(\text{ks_w})`, where :math:`\text{ks_w}` is the width of the convolution kernel.
The full kernel has shape :math:`(C_{out}, C_{in} // \text{group}, \text{ks_w})`, where group is the group number
to split the input in the channel dimension.
If the 'pad_mode' is set to be "valid", the output width will be
:math:`\left \lfloor{1 + \frac{W_{in} + 2 \times \text{padding} - \text{ks_w} -
(\text{ks_w} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` respectively.
The first introduction of convolution layer can be found in paper `Gradient Based Learning Applied to Document
Recognition <http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
Args:
in_channels (int): The number of input channel :math:`C_{in}`.
out_channels (int): The number of output channel :math:`C_{out}`.
kernel_size (int): The data type is int. Specifies the
width of the 1D convolution window.
stride (int): The distance of kernel moving, an int number that represents
the width of movement. Default: 1.
pad_mode (str): Specifies padding mode. The optional values are
"same", "valid", "pad". Default: "same".
- same: Adopts the way of completion. The output width will be the same as the input.
The total number of padding will be calculated in the horizontal
direction and evenly distributed to left and right if possible. Otherwise, the
last extra padding will be done from the bottom and the right side. If this mode is set, `padding`
must be 0.
- valid: Adopts the way of discarding. The possible largest width of the output will be returned
without padding. Extra pixels will be discarded. If this mode is set, `padding`
must be 0.
- pad: Implicit paddings on both sides of the input. The number of `padding` will be padded to the input
Tensor borders. `padding` should be greater than or equal to 0.
padding (int): Implicit paddings on both sides of the input. Default: 0.
dilation (int): The data type is int. Specifies the dilation rate
to use for dilated convolution. If set to be :math:`k > 1`, there will
be :math:`k - 1` pixels skipped for each sampling location. Its value should
be greater or equal to 1 and bounded by the height and width of the
input. Default: 1.
group (int): Split filter into groups, `in_ channels` and `out_channels` should be
divisible by the number of groups. Default: 1.
has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
weight_init (Union[Tensor, str, Initializer, numbers.Number]): An initializer for the convolution kernel.
It can be a Tensor, a string, an Initializer or a number. When a string is specified,
values from 'TruncatedNormal', 'Normal', 'Uniform', 'HeUniform' and 'XavierUniform' distributions as well
as constant 'One' and 'Zero' distributions are possible. Alias 'xavier_uniform', 'he_uniform', 'ones'
and 'zeros' are acceptable. Uppercase and lowercase are both acceptable. Refer to the values of
Initializer for more details. Default: 'normal'.
bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the bias vector. Possible
Initializer and string are the same as 'weight_init'. Refer to the values of
Initializer for more details. Default: 'zeros'.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, W_{in})`.
Outputs:
Tensor of shape :math:`(N, C_{out}, W_{out})`.
Examples:
>>> net = nn.Conv1d(120, 240, 4, has_bias=False, weight_init='normal')
>>> input = Tensor(np.ones([1, 120, 640]), mindspore.float32)
>>> net(input).shape
(1, 240, 640)
1D transposed convolution layer.
Compute a 1D transposed convolution, which is also known as a deconvolution
(although it is not an actual deconvolution).
Input is typically of shape :math:`(N, C, W)`, where :math:`N` is batch size and :math:`C` is channel number.
Args:
in_channels (int): The number of channels in the input space.
out_channels (int): The number of channels in the output space.
kernel_size (int): int, which specifies the width of the 1D convolution window.
stride (int): The distance of kernel moving, an int number that represents
the width of movement. Default: 1.
pad_mode (str): Select the mode of the pad. The optional values are
"pad", "same", "valid". Default: "same".
- pad: Implicit paddings on both sides of the input.
- same: Adopted the way of completion.
- valid: Adopted the way of discarding.
padding (int): Implicit paddings on both sides of the input. Default: 0.
dilation (int): The data type is int. Specifies the dilation rate
to use for dilated convolution. If set to be :math:`k > 1`, there will
be :math:`k - 1` pixels skipped for each sampling location. Its value should
be greater or equal to 1 and bounded by the width of the
input. Default: 1.
group (int): Split filter into groups, `in_channels` and `out_channels` should be
divisible by the number of groups. This is not support for Davinci devices when group > 1. Default: 1.
has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the convolution kernel.
It can be a Tensor, a string, an Initializer or a numbers.Number. When a string is specified,
values from 'TruncatedNormal', 'Normal', 'Uniform', 'HeUniform' and 'XavierUniform' distributions as well
as constant 'One' and 'Zero' distributions are possible. Alias 'xavier_uniform', 'he_uniform', 'ones'
and 'zeros' are acceptable. Uppercase and lowercase are both acceptable. Refer to the values of
Initializer for more details. Default: 'normal'.
bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the bias vector. Possible
Initializer and string are the same as 'weight_init'. Refer to the values of
Initializer for more details. Default: 'zeros'.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, W_{in})`.
Outputs:
Tensor of shape :math:`(N, C_{out}, W_{out})`.
Examples:
>>> net = nn.Conv1dTranspose(3, 64, 4, has_bias=False, weight_init='normal')
>>> input = Tensor(np.ones([1, 3, 50]), mindspore.float32)
>>> net(input)
2D convolution layer.
Applies a 2D convolution over an input tensor which is typically of shape :math:`(N, C_{in}, H_{in}, W_{in})`,
where :math:`N` is batch size, :math:`C_{in}` is channel number, and :math:`H_{in}, W_{in})` are height and width.
For each batch of shape :math:`(C_{in}, H_{in}, W_{in})`, the formula is defined as:
.. math::
out_j = \sum_{i=0}^{C_{in} - 1} ccor(W_{ij}, X_i) + b_j,
where :math:`ccor` is the cross-correlation operator, :math:`C_{in}` is the input channel number, :math:`j` ranges
from :math:`0` to :math:`C_{out} - 1`, :math:`W_{ij}` corresponds to the :math:`i`-th channel of the :math:`j`-th
filter and :math:`out_{j}` corresponds to the :math:`j`-th channel of the output. :math:`W_{ij}` is a slice
of kernel and it has shape :math:`(\text{ks_h}, \text{ks_w})`, where :math:`\text{ks_h}` and
:math:`\text{ks_w}` are the height and width of the convolution kernel. The full kernel has shape
:math:`(C_{out}, C_{in} // \text{group}, \text{ks_h}, \text{ks_w})`, where group is the group number
to split the input in the channel dimension.
If the 'pad_mode' is set to be "valid", the output height and width will be
:math:`\left \lfloor{1 + \frac{H_{in} + 2 \times \text{padding} - \text{ks_h} -
(\text{ks_h} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` and
:math:`\left \lfloor{1 + \frac{W_{in} + 2 \times \text{padding} - \text{ks_w} -
(\text{ks_w} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` respectively.
The first introduction can be found in paper `Gradient Based Learning Applied to Document Recognition
<http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
Args:
in_channels (int): The number of input channel :math:`C_{in}`.
out_channels (int): The number of output channel :math:`C_{out}`.
kernel_size (Union[int, tuple[int]]): The data type is int or a tuple of 2 integers. Specifies the height
and width of the 2D convolution window. Single int means the value is for both the height and the width of
the kernel. A tuple of 2 ints means the first value is for the height and the other is for the
width of the kernel.
stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
pad_mode (str): Specifies padding mode. The optional values are
"same", "valid", "pad". Default: "same".
- same: Adopts the way of completion. The height and width of the output will be the same as
the input. The total number of padding will be calculated in horizontal and vertical
directions and evenly distributed to top and bottom, left and right if possible. Otherwise, the
last extra padding will be done from the bottom and the right side. If this mode is set, `padding`
must be 0.
- valid: Adopts the way of discarding. The possible largest height and width of output will be returned
without padding. Extra pixels will be discarded. If this mode is set, `padding`
must be 0.
- pad: Implicit paddings on both sides of the input. The number of `padding` will be padded to the input
Tensor borders. `padding` should be greater than or equal to 0.
padding (Union[int, tuple[int]]): Implicit paddings on both sides of the input. If `padding` is one integer,
the paddings of top, bottom, left and right are the same, equal to padding. If `padding` is a tuple
with four integers, the paddings of top, bottom, left and right will be equal to padding[0],
padding[1], padding[2], and padding[3] accordingly. Default: 0.
dilation (Union[int, tuple[int]]): The data type is int or a tuple of 2 integers. Specifies the dilation rate
to use for dilated convolution. If set to be :math:`k > 1`, there will
be :math:`k - 1` pixels skipped for each sampling location. Its value should
be greater or equal to 1 and bounded by the height and width of the
input. Default: 1.
group (int): Split filter into groups, `in_ channels` and `out_channels` should be
divisible by the number of groups. Default: 1.
has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the convolution kernel.
It can be a Tensor, a string, an Initializer or a number. When a string is specified,
values from 'TruncatedNormal', 'Normal', 'Uniform', 'HeUniform' and 'XavierUniform' distributions as well
as constant 'One' and 'Zero' distributions are possible. Alias 'xavier_uniform', 'he_uniform', 'ones'
and 'zeros' are acceptable. Uppercase and lowercase are both acceptable. Refer to the values of
Initializer for more details. Default: 'normal'.
bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the bias vector. Possible
Initializer and string are the same as 'weight_init'. Refer to the values of
Initializer for more details. Default: 'zeros'.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
Outputs:
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples:
>>> net = nn.Conv2d(120, 240, 4, has_bias=False, weight_init='normal')
>>> input = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32)
>>> net(input).shape
(1, 240, 1024, 640)
2D transposed convolution layer.
Compute a 2D transposed convolution, which is also known as a deconvolution
(although it is not an actual deconvolution).
Input is typically of shape :math:`(N, C, H, W)`, where :math:`N` is batch size and :math:`C` is channel number.
Args:
in_channels (int): The number of channels in the input space.
out_channels (int): The number of channels in the output space.
kernel_size (Union[int, tuple]): int or a tuple of 2 integers, which specifies the height
and width of the 2D convolution window. Single int means the value is for both the height and the width of
the kernel. A tuple of 2 ints means the first value is for the height and the other is for the
width of the kernel.
stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Its value should be equal to or greater than 1.
Default: 1.
pad_mode (str): Select the mode of the pad. The optional values are
"pad", "same", "valid". Default: "same".
- pad: Implicit paddings on both sides of the input.
- same: Adopted the way of completion.
- valid: Adopted the way of discarding.
padding (Union[int, tuple[int]]): Implicit paddings on both sides of the input. If `padding` is one integer,
the paddings of top, bottom, left and right are the same, equal to padding. If `padding` is a tuple
with four integers, the paddings of top, bottom, left and right will be equal to padding[0],
padding[1], padding[2], and padding[3] accordingly. Default: 0.
dilation (Union[int, tuple[int]]): The data type is int or a tuple of 2 integers. Specifies the dilation rate
to use for dilated convolution. If set to be :math:`k > 1`, there will
be :math:`k - 1` pixels skipped for each sampling location. Its value should
be greater than or equal to 1 and bounded by the height and width of the
input. Default: 1.
group (int): Split filter into groups, `in_channels` and `out_channels` should be
divisible by the number of groups. This does not support for Davinci devices when group > 1. Default: 1.
has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the convolution kernel.
It can be a Tensor, a string, an Initializer or a number. When a string is specified,
values from 'TruncatedNormal', 'Normal', 'Uniform', 'HeUniform' and 'XavierUniform' distributions as well
as constant 'One' and 'Zero' distributions are possible. Alias 'xavier_uniform', 'he_uniform', 'ones'
and 'zeros' are acceptable. Uppercase and lowercase are both acceptable. Refer to the values of
Initializer for more details. Default: 'normal'.
bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the bias vector. Possible
Initializer and string are the same as 'weight_init'. Refer to the values of
Initializer for more details. Default: 'zeros'.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
Outputs:
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples:
>>> net = nn.Conv2dTranspose(3, 64, 4, has_bias=False, weight_init='normal')
>>> input = Tensor(np.ones([1, 3, 16, 50]), mindspore.float32)
>>> net(input)
2D depthwise convolution layer.
Applies a 2D depthwise convolution over an input tensor which is typically of shape:
math:`(N, C_{in}, H_{in}, W_{in})`, where :math:`N` is batch size and :math:`C_{in}` is channel number.
For each batch of shape:math:`(C_{in}, H_{in}, W_{in})`, the formula is defined as:
.. math::
out_j = \sum_{i=0}^{C_{in} - 1} ccor(W_{ij}, X_i) + b_j,
where :math:`ccor` is the cross correlation operator, :math:`C_{in}` is the input channel number, :math:`j` ranges
from :math:`0` to :math:`C_{out} - 1`, :math:`W_{ij}` corresponds to the :math:`i`-th channel of the :math:`j`-th
filter and :math:`out_{j}` corresponds to the :math:`j`-th channel of the output. :math:`W_{ij}` is a slice
of kernel and it has shape :math:`(\text{ks_h}, \text{ks_w})`, where :math:`\text{ks_h}` and
:math:`\text{ks_w}` are the height and width of the convolution kernel. The full kernel has shape
:math:`(C_{out}, C_{in} // \text{group}, \text{ks_h}, \text{ks_w})`, where group is the group number
to split the input in the channel dimension.
If the 'pad_mode' is set to be "valid", the output height and width will be
:math:`\left \lfloor{1 + \frac{H_{in} + 2 \times \text{padding} - \text{ks_h} -
(\text{ks_h} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` and
:math:`\left \lfloor{1 + \frac{W_{in} + 2 \times \text{padding} - \text{ks_w} -
(\text{ks_w} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` respectively.
The first introduction can be found in paper `Gradient Based Learning Applied to Document Recognition
<http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
Args:
in_channels (int): The number of input channel :math:`C_{in}`.
out_channels (int): The number of output channel :math:`C_{out}`.
kernel_size (Union[int, tuple[int]]): The data type is int or a tuple of 2 integers. Specifies the height
and width of the 2D convolution window. Single int means the value is for both the height and the width of
the kernel. A tuple of 2 ints means the first value is for the height and the other is for the
width of the kernel.
stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
pad_mode (str): Specifies padding mode. The optional values are
"same", "valid", "pad". Default: "same".
- same: Adopts the way of completion. The height and width of the output will be the same as
the input. The total number of padding will be calculated in horizontal and vertical
directions and evenly distributed to top and bottom, left and right if possible. Otherwise, the
last extra padding will be done from the bottom and the right side. If this mode is set, `padding`
must be 0.
- valid: Adopts the way of discarding. The possible largest height and width of output will be returned
without padding. Extra pixels will be discarded. If this mode is set, `padding`
must be 0.
- pad: Implicit paddings on both sides of the input. The number of `padding` will be padded to the input
Tensor borders. `padding` should be greater than or equal to 0.
padding (Union[int, tuple[int]]): Implicit paddings on both sides of the input. If `padding` is one integer,
the paddings of top, bottom, left and right are the same, equal to padding. If `padding` is a tuple
with four integers, the paddings of top, bottom, left and right will be equal to padding[0],
padding[1], padding[2], and padding[3] accordingly. Default: 0.
dilation (Union[int, tuple[int]]): The data type is int or a tuple of 2 integers. Specifies the dilation rate
to use for dilated convolution. If set to be :math:`k > 1`, there will
be :math:`k - 1` pixels skipped for each sampling location. Its value should
be greater than or equal to 1 and bounded by the height and width of the
input. Default: 1.
group (int): Split filter into groups, `in_ channels` and `out_channels` should be
divisible by the number of groups. If 'group' is None, it will be set as the value of 'in_channels'
has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the convolution kernel.
It can be a Tensor, a string, an Initializer or a number. When a string is specified,
values from 'TruncatedNormal', 'Normal', 'Uniform', 'HeUniform' and 'XavierUniform' distributions as well
as constant 'One' and 'Zero' distributions are possible. Alias 'xavier_uniform', 'he_uniform', 'ones'
and 'zeros' are acceptable. Uppercase and lowercase are both acceptable. Refer to the values of
Initializer for more details. Default: 'normal'.
bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the bias vector. Possible
Initializer and string are the same as 'weight_init'. Refer to the values of
Initializer for more details. Default: 'zeros'.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
Outputs:
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples:
>>> net = nn.DepthwiseConv2d(120, 240, 4, has_bias=False, weight_init='normal')
>>> input = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32)
>>> net(input).shape
(1, 240, 1024, 640)
Applies a N-D convolution over an input signal composed of several input planes.
Calculate the width and height of output.
Calculate the width and height of output.
Must be overridden by all subclasses.
conv
Copyright 2020 Huawei Technologies Co., Ltd Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================ out_channels and in_channels swap. cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel, then Conv2dTranspose's out_channel refers to Conv2DBackpropInput's in_channel. cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel. out_channels and in_channels swap. cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel, then Conv1dTranspose's out_channel refers to Conv2DBackpropInput's in_channel. cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel. | 23,984 | en | 0.705282 |
# uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: encodings.cp1026
import codecs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
def getregentry():
return codecs.CodecInfo(name='cp1026', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
decoding_table = u'\x00\x01\x02\x03\x9c\t\x86\x7f\x97\x8d\x8e\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x9d\x85\x08\x87\x18\x19\x92\x8f\x1c\x1d\x1e\x1f\x80\x81\x82\x83\x84\n\x17\x1b\x88\x89\x8a\x8b\x8c\x05\x06\x07\x90\x91\x16\x93\x94\x95\x96\x04\x98\x99\x9a\x9b\x14\x15\x9e\x1a \xa0\xe2\xe4\xe0\xe1\xe3\xe5{\xf1\xc7.<(+!&\xe9\xea\xeb\xe8\xed\xee\xef\xec\xdf\u011e\u0130*);^-/\xc2\xc4\xc0\xc1\xc3\xc5[\xd1\u015f,%_>?\xf8\xc9\xca\xcb\xc8\xcd\xce\xcf\xcc\u0131:\xd6\u015e\'=\xdc\xd8abcdefghi\xab\xbb}`\xa6\xb1\xb0jklmnopqr\xaa\xba\xe6\xb8\xc6\xa4\xb5\xf6stuvwxyz\xa1\xbf]$@\xae\xa2\xa3\xa5\xb7\xa9\xa7\xb6\xbc\xbd\xbe\xac|\xaf\xa8\xb4\xd7\xe7ABCDEFGHI\xad\xf4~\xf2\xf3\xf5\u011fJKLMNOPQR\xb9\xfb\\\xf9\xfa\xff\xfc\xf7STUVWXYZ\xb2\xd4#\xd2\xd3\xd50123456789\xb3\xdb"\xd9\xda\x9f'
encoding_table = codecs.charmap_build(decoding_table) | encodings/cp1026.py | 2,004 | uncompyle6 version 3.2.4 Python bytecode 2.7 (62211) Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)] Embedded file name: encodings.cp1026 | 192 | en | 0.51824 |
"""
WSGI config for lacuna project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
application = Sentry(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| config/wsgi.py | 1,548 | WSGI config for lacuna project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks if running multiple sites in the same mod_wsgi process. To fix this, use mod_wsgi daemon mode with each site in its own daemon process, or use os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production" This application object is used by any WSGI server configured to use this file. This includes Django's development server, if the WSGI_APPLICATION setting points here. Apply WSGI middleware here. from helloworld.wsgi import HelloWorldApplication application = HelloWorldApplication(application) | 1,235 | en | 0.846466 |
# Undirected Graph from demo represented as Adjacency List
graph = {
"a": [("b", 7), ("c", 9), ("f", 14)],
"b": [("a", 7), ("c", 10), ("d", 15)],
"c": [("a", 9), ("b", 10), ("d", 11), ("f", 2)],
"d": [("b", 15), ("c", 11), ("e", 6)],
"e": [("d", 6), ("f", 9)],
"f": [("a", 14), ("c", 2), ("e", 9)],
}
def find_vertices():
return graph.keys()
def find_edges():
edges = []
for v in graph:
for e in graph[v]:
edges.append((v, e[0], e[1]))
return edges
print("Vertices: {}".format(find_vertices()))
print("Edges: {}".format(find_edges())) | Section4/graph_adj_list.py | 599 | Undirected Graph from demo represented as Adjacency List | 56 | en | 0.991744 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import *
from tensorlayer.models import Model
from tests.utils import CustomTestCase
class Laye_BatchNorm_Test(CustomTestCase):
@classmethod
def setUpClass(cls):
x_1_input_shape =[None, 100, 1]
x_2_input_shape =[None, 100, 100, 3]
x_3_input_shape =[None, 100, 100, 100, 3]
batchsize = 2
cls.x1 = tf.random.normal([batchsize] + x_1_input_shape[1:])
cls.x2 = tf.random.normal([batchsize] + x_2_input_shape[1:])
cls.x3 = tf.random.normal([batchsize] + x_3_input_shape[1:])
## Base
ni_1 = Input(x_1_input_shape, name='test_ni1')
nn_1 = Conv1d(
n_filter=32, filter_size=5, stride=2, name='test_conv1d'
)(ni_1)
n1_b = BatchNorm(name='test_conv')(nn_1)
cls.n1_b = n1_b
cls.base_1d = Model(inputs=ni_1, outputs=n1_b, name='test_base_1d')
ni_2 = Input(x_2_input_shape, name='test_ni2')
nn_2 = Conv2d(
n_filter=32, filter_size=(3, 3), strides=(2, 2), name='test_conv2d'
)(ni_2)
n2_b = BatchNorm2d(name='test_bn2d')(nn_2)
cls.n2_b = n2_b
cls.base_2d = Model(inputs=ni_2, outputs=n2_b, name='test_base_2d')
ni_3 = Input(x_3_input_shape, name='test_ni2')
nn_3 = Conv3d(
n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), name='test_conv3d'
)(ni_3)
n3_b = BatchNorm3d(name='test_bn3d')(nn_3)
cls.n3_b = n3_b
cls.base_3d = Model(inputs=ni_3, outputs=n3_b, name='test_base_3d')
## 1D ========================================================================
nin_1 = Input(x_1_input_shape, name='test_in1')
n1 = Conv1d(
n_filter=32, filter_size=5, stride=2, name='test_conv1d'
)(nin_1)
n1 = BatchNorm1d(name='test_bn1d')(n1)
cls.n1 = n1
cls.static_1d = Model(inputs=nin_1, outputs=n1)
class bn_1d_model(Model):
def __init__(self):
super(bn_1d_model, self).__init__(name='test_bn_1d_model')
self.conv = Conv1d(n_filter=32, filter_size=5, stride=2, name='test_conv1d', in_channels=1)
self.bn = BatchNorm1d(num_features=32, name='test_bn1d')
def forward(self, x):
x = self.bn(self.conv(x))
return x
cls.dynamic_1d = bn_1d_model()
print("Printing BatchNorm1d")
print(cls.static_1d)
print(cls.dynamic_1d)
## 2D ========================================================================
nin_2 = Input(x_2_input_shape, name='test_in2')
n2 = Conv2d(
n_filter=32, filter_size=(3, 3), strides=(2, 2), name='test_conv2d'
)(nin_2)
n2 = BatchNorm2d(name='test_bn2d')(n2)
cls.n2 = n2
cls.static_2d = Model(inputs=nin_2, outputs=n2)
class bn_2d_model(Model):
def __init__(self):
super(bn_2d_model, self).__init__(name='test_bn_2d_model')
self.conv = Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), name='test_conv2d', in_channels=3)
self.bn = BatchNorm2d(num_features=32, name='test_bn2d')
def forward(self, x):
x = self.bn(self.conv(x))
return x
cls.dynamic_2d = bn_2d_model()
print("Printing BatchNorm1d")
print(cls.static_2d)
print(cls.dynamic_2d)
## 3D ========================================================================
nin_3 = Input(x_3_input_shape, name='test_in3')
n3 = Conv3d(
n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), name='test_conv3d'
)(nin_3)
n3 = BatchNorm3d(name='test_bn3d', act=tf.nn.relu)(n3)
cls.n3 = n3
cls.static_3d = Model(inputs=nin_3, outputs=n3)
class bn_3d_model(Model):
def __init__(self):
super(bn_3d_model, self).__init__(name='test_bn_3d_model')
self.conv = Conv3d(n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), name='test_conv3d', in_channels=3)
self.bn = BatchNorm3d(num_features=32, name='test_bn3d')
def forward(self, x):
x = self.bn(self.conv(x))
return x
cls.dynamic_3d = bn_3d_model()
print("Printing BatchNorm1d")
print(cls.static_3d)
print(cls.dynamic_3d)
@classmethod
def tearDownClass(cls):
pass
# tf.reset_default_graph()
def test_BatchNorm(self):
self.assertEqual(self.n1_b.shape[1:], (50, 32))
out = self.base_1d(self.x1, is_train=True)
self.assertEqual(self.n2_b.shape[1:], (50, 50, 32))
out = self.base_2d(self.x2, is_train=True)
self.assertEqual(self.n3_b.shape[1:], (50, 50, 50, 32))
out = self.base_3d(self.x3, is_train=True)
def test_BatchNorm1d(self):
self.assertEqual(self.n1.shape[1:], (50, 32))
out = self.static_1d(self.x1, is_train=True)
out = self.dynamic_1d(self.x1, is_train=True)
def test_BatchNorm2d(self):
self.assertEqual(self.n2.shape[1:], (50, 50, 32))
out = self.static_2d(self.x2, is_train=True)
out = self.dynamic_2d(self.x2, is_train=True)
out = self.dynamic_2d(self.x2, is_train=False)
def test_BatchNorm3d(self):
self.assertEqual(self.n3.shape[1:], (50, 50, 50, 32))
out = self.static_3d(self.x3, is_train=True)
out = self.dynamic_3d(self.x3, is_train=True)
def test_dataformat(self):
bn1d = BatchNorm1d(data_format='channels_first', num_features=32)
bn2d = BatchNorm2d(data_format='channels_first', num_features=32)
bn3d = BatchNorm3d(data_format='channels_first', num_features=32)
bn = BatchNorm(data_format='channels_first')
try:
bn_fail = BatchNorm1d(data_format='xyz', num_features=32)
except Exception as e:
self.assertIsInstance(e, ValueError)
print(e)
def test_exception(self):
try:
bn = BatchNorm(num_features=32)
except Exception as e:
self.assertIsInstance(e, ValueError)
print(e)
try:
ni = Input([None, 100, 1], name='test_ni1')
bn = BatchNorm(decay=1.5)(ni)
except Exception as e:
self.assertIsInstance(e, ValueError)
print(e)
if __name__ == '__main__':
tl.logging.set_verbosity(tl.logging.DEBUG)
unittest.main()
| tests/layers/test_layers_normalization.py | 6,711 | !/usr/bin/env python -*- coding: utf-8 -*- Base 1D ======================================================================== 2D ======================================================================== 3D ======================================================================== tf.reset_default_graph() | 300 | fr | 0.32301 |
#!/usr/bin/env python3
#author markpurcell@ie.ibm.com
"""RabbitMQ helper class.
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
"""
"""
IBM-Review-Requirement: Art30.3 - DO NOT TRANSFER OR EXCLUSIVELY LICENSE THE FOLLOWING CODE UNTIL 30/11/2025!
Please note that the following code was developed for the project MUSKETEER in DRL funded by the European Union
under the Horizon 2020 Program.
The project started on 01/12/2018 and was completed on 30/11/2021. Thus, in accordance with article 30.3 of the
Multi-Beneficiary General Model Grant Agreement of the Program, the above limitations are in force until 30/11/2025.
"""
import pytest
import json
def pytest_addoption(parser):
parser.addoption("--credentials", required=True)
parser.addoption("--feed_queue", required=False)
parser.addoption("--reply_queue", required=False)
@pytest.fixture
def credentials(request):
value = request.config.getoption('credentials')
if request.cls:
with open(value) as json_file:
request.cls.credentials = json.load(json_file)
return value
@pytest.fixture
def feed_queue(request):
value = request.config.getoption('feed_queue')
if request.cls:
request.cls.feed_queue = value
return value
@pytest.fixture
def reply_queue(request):
value = request.config.getoption('reply_queue')
if request.cls:
request.cls.reply_queue = value
return value
| tests/conftest.py | 2,170 | RabbitMQ helper class.
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
!/usr/bin/env python3author markpurcell@ie.ibm.com | 876 | en | 0.846737 |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=line-too-long
from azure.cli.core.commands import CliCommandType
def load_command_table(self, _):
from azext_stack_hci.generated._client_factory import cf_cluster
stack_hci_cluster = CliCommandType(
operations_tmpl='azext_stack_hci.vendored_sdks.azurestackhci.operations._cluster_operations#ClusterOperations.{}',
client_factory=cf_cluster)
with self.command_group('stack-hci cluster', stack_hci_cluster, client_factory=cf_cluster) as g:
g.custom_command('list', 'stack_hci_cluster_list')
g.custom_show_command('show', 'stack_hci_cluster_show')
g.custom_command('create', 'stack_hci_cluster_create')
g.custom_command('update', 'stack_hci_cluster_update')
g.custom_command('delete', 'stack_hci_cluster_delete', confirmation=True)
| src/stack-hci/azext_stack_hci/generated/commands.py | 1,278 | -------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. -------------------------------------------------------------------------- pylint: disable=line-too-long | 469 | en | 0.543515 |
from django.http import HttpResponse
from django.shortcuts import render
# Create your views here.
def index(request):
return HttpResponse("Check URL => /admin") | emailautomate/views.py | 166 | Create your views here. | 23 | en | 0.928092 |
# -*- coding: utf-8 -*-
#
# This document is free and open-source software, subject to the OSI-approved
# BSD license below.
#
# Copyright (c) 2011 - 2013 Alexis Petrounias <www.petrounias.org>,
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the author nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Django CTE Trees - an experimental PostgreSQL Common Table Expressions (CTE)
implementation of of Adjacency-Linked trees.
"""
VERSION = (0, 2, 2)
__version__ = ".".join(map(str, VERSION))
| cte_forest/__init__.py | 1,877 | Django CTE Trees - an experimental PostgreSQL Common Table Expressions (CTE)
implementation of of Adjacency-Linked trees.
-*- coding: utf-8 -*- This document is free and open-source software, subject to the OSI-approved BSD license below. Copyright (c) 2011 - 2013 Alexis Petrounias <www.petrounias.org>, All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 1,738 | en | 0.870109 |
"""CveException Class"""
import cloudpassage.sanity as sanity
from .halo_endpoint import HaloEndpoint
from .http_helper import HttpHelper
class CveExceptions(HaloEndpoint):
"""Initializing the CveException class:
Args:
session (:class:`cloudpassage.HaloSession`): This will define how you
interact with the Halo API, including proxy settings and API keys
used for authentication.
Keyword args:
endpoint_version (int): Endpoint version override.
"""
object_name = "cve_exception"
objects_name = "cve_exceptions"
default_endpoint_version = 1
def endpoint(self):
"""Return the endpoint for API requests."""
return "/v{}/{}".format(self.endpoint_version, self.objects_name)
@classmethod
def object_key(cls):
"""Return the key used to pull the object from the json document."""
return cls.object_name
@classmethod
def pagination_key(cls):
"""Return the pagination key for parsing paged results."""
return cls.objects_name
def create(self, package_name, package_version, scope="all", scope_id=''):
"""This method allows user to create CVE exceptions.
Args:
package_name (str): The name of the vulnerable
package to be excepted.
package_version (str): The version number of the
vulnerable package.
scope (str): Possible values are server, group and all.
scope_id (str): If you pass the value server as scope, this field
will include server ID. If you pass the value group as scope,
this field will include group ID.
Returns:
str: ID of the newly-created cve exception
"""
body_ref = {
"server": "server_id",
"group": "group_id"
}
params = {
"package_name": package_name,
"package_version": package_version,
"scope": scope
}
endpoint = self.endpoint()
if scope != "all":
sanity.validate_cve_exception_scope_id(scope_id)
scope_key = body_ref[scope]
params[scope_key] = scope_id
body = {"cve_exception": params}
request = HttpHelper(self.session)
response = request.post(endpoint, body)
return response["cve_exception"]["id"]
def update(self, exception_id, **kwargs):
""" Update CVE Exceptions.
Args:
exception_id (str): Identifier for the CVE exception.
Keyword Args:
scope (str): Possible values are server, group and all.
group_id (str): The ID of the server group containing the server to
which this exception applies.
server_id (str): The ID of the server to which this exception
applies.
cve_entries : List of CVEs
Returns:
True if successful, throws exception otherwise.
"""
endpoint = "{}/{}".format(self.endpoint(), exception_id)
body = {"cve_exception": kwargs}
request = HttpHelper(self.session)
response = request.put(endpoint, body)
return response
# The following class needs to live on only in name, and should absorb the
# functionality of the current CveExceptions class.
class CveException(HaloEndpoint):
"""Initializing the CveException class:
Args:
session (:class:`cloudpassage.HaloSession`): This will define how you
interact with the Halo API, including proxy settings and API keys
used for authentication.
"""
object_name = "cve_exception"
objects_name = "cve_exceptions"
default_endpoint_version = 1
def endpoint(self):
"""Return the endpoint for API requests."""
return "/v{}/{}".format(self.endpoint_version, self.objects_name)
@classmethod
def object_key(cls):
"""Return the key used to pull the object from the json document."""
return cls.object_name
@classmethod
def pagination_key(cls):
"""Return the pagination key for parsing paged results."""
return cls.objects_name
| cloudpassage/cve_exception.py | 4,240 | Initializing the CveException class:
Args:
session (:class:`cloudpassage.HaloSession`): This will define how you
interact with the Halo API, including proxy settings and API keys
used for authentication.
Initializing the CveException class:
Args:
session (:class:`cloudpassage.HaloSession`): This will define how you
interact with the Halo API, including proxy settings and API keys
used for authentication.
Keyword args:
endpoint_version (int): Endpoint version override.
This method allows user to create CVE exceptions.
Args:
package_name (str): The name of the vulnerable
package to be excepted.
package_version (str): The version number of the
vulnerable package.
scope (str): Possible values are server, group and all.
scope_id (str): If you pass the value server as scope, this field
will include server ID. If you pass the value group as scope,
this field will include group ID.
Returns:
str: ID of the newly-created cve exception
Return the endpoint for API requests.
Return the endpoint for API requests.
Return the key used to pull the object from the json document.
Return the key used to pull the object from the json document.
Return the pagination key for parsing paged results.
Return the pagination key for parsing paged results.
Update CVE Exceptions.
Args:
exception_id (str): Identifier for the CVE exception.
Keyword Args:
scope (str): Possible values are server, group and all.
group_id (str): The ID of the server group containing the server to
which this exception applies.
server_id (str): The ID of the server to which this exception
applies.
cve_entries : List of CVEs
Returns:
True if successful, throws exception otherwise.
CveException Class
The following class needs to live on only in name, and should absorb the functionality of the current CveExceptions class. | 1,975 | en | 0.712898 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayUserAccountBindingSyncModel(object):
def __init__(self):
self._alipay_user_id = None
self._create_time = None
self._data_version = None
self._havana_user_id = None
self._modify_time = None
self._realm = None
self._status = None
@property
def alipay_user_id(self):
return self._alipay_user_id
@alipay_user_id.setter
def alipay_user_id(self, value):
self._alipay_user_id = value
@property
def create_time(self):
return self._create_time
@create_time.setter
def create_time(self, value):
self._create_time = value
@property
def data_version(self):
return self._data_version
@data_version.setter
def data_version(self, value):
self._data_version = value
@property
def havana_user_id(self):
return self._havana_user_id
@havana_user_id.setter
def havana_user_id(self, value):
self._havana_user_id = value
@property
def modify_time(self):
return self._modify_time
@modify_time.setter
def modify_time(self, value):
self._modify_time = value
@property
def realm(self):
return self._realm
@realm.setter
def realm(self, value):
self._realm = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
def to_alipay_dict(self):
params = dict()
if self.alipay_user_id:
if hasattr(self.alipay_user_id, 'to_alipay_dict'):
params['alipay_user_id'] = self.alipay_user_id.to_alipay_dict()
else:
params['alipay_user_id'] = self.alipay_user_id
if self.create_time:
if hasattr(self.create_time, 'to_alipay_dict'):
params['create_time'] = self.create_time.to_alipay_dict()
else:
params['create_time'] = self.create_time
if self.data_version:
if hasattr(self.data_version, 'to_alipay_dict'):
params['data_version'] = self.data_version.to_alipay_dict()
else:
params['data_version'] = self.data_version
if self.havana_user_id:
if hasattr(self.havana_user_id, 'to_alipay_dict'):
params['havana_user_id'] = self.havana_user_id.to_alipay_dict()
else:
params['havana_user_id'] = self.havana_user_id
if self.modify_time:
if hasattr(self.modify_time, 'to_alipay_dict'):
params['modify_time'] = self.modify_time.to_alipay_dict()
else:
params['modify_time'] = self.modify_time
if self.realm:
if hasattr(self.realm, 'to_alipay_dict'):
params['realm'] = self.realm.to_alipay_dict()
else:
params['realm'] = self.realm
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayUserAccountBindingSyncModel()
if 'alipay_user_id' in d:
o.alipay_user_id = d['alipay_user_id']
if 'create_time' in d:
o.create_time = d['create_time']
if 'data_version' in d:
o.data_version = d['data_version']
if 'havana_user_id' in d:
o.havana_user_id = d['havana_user_id']
if 'modify_time' in d:
o.modify_time = d['modify_time']
if 'realm' in d:
o.realm = d['realm']
if 'status' in d:
o.status = d['status']
return o
| alipay/aop/api/domain/AlipayUserAccountBindingSyncModel.py | 3,962 | !/usr/bin/env python -*- coding: utf-8 -*- | 42 | en | 0.34282 |
"""
JunOSLikeDevice Class is abstract class for using in Juniper JunOS like devices
Connection Method are based upon AsyncSSH and should be running in asyncio loop
"""
import re
from netdev.logger import logger
from netdev.vendors.base import BaseDevice
class JunOSLikeDevice(BaseDevice):
"""
JunOSLikeDevice Class for working with Juniper JunOS like devices
Juniper JunOS like devices having several concepts:
* shell mode (csh). This is csh shell for FreeBSD. This mode is not covered by this Class.
* cli mode (specific shell). The entire configuration is usual configured in this shell:
* operation mode. This mode is using for getting information from device
* configuration mode. This mode is using for configuration system
"""
_delimiter_list = ["%", ">", "#"]
"""All this characters will stop reading from buffer. It mean the end of device prompt"""
_pattern = r"\w+(\@[\-\w]*)?[{delimiters}]"
"""Pattern for using in reading buffer. When it found processing ends"""
_disable_paging_command = "set cli screen-length 0"
"""Command for disabling paging"""
_config_enter = "configure"
"""Command for entering to configuration mode"""
_config_exit = "exit configuration-mode"
"""Command for existing from configuration mode to privilege exec"""
_config_check = "#"
"""Checking string in prompt. If it's exist im prompt - we are in configuration mode"""
_commit_command = "commit"
"""Command for committing changes"""
_commit_comment_command = "commit comment {}"
"""Command for committing changes with comment"""
async def _set_base_prompt(self):
"""
Setting two important vars
base_prompt - textual prompt in CLI (usually username or hostname)
base_pattern - regexp for finding the end of command. IT's platform specific parameter
For JunOS devices base_pattern is "user(@[hostname])?[>|#]
"""
logger.info("Host {}: Setting base prompt".format(self._host))
prompt = await self._find_prompt()
prompt = prompt[:-1]
# Strip off trailing terminator
if "@" in prompt:
prompt = prompt.split("@")[1]
self._base_prompt = prompt
delimiters = map(re.escape, type(self)._delimiter_list)
delimiters = r"|".join(delimiters)
base_prompt = re.escape(self._base_prompt[:12])
pattern = type(self)._pattern
self._base_pattern = pattern.format(delimiters=delimiters)
logger.debug("Host {}: Base Prompt: {}".format(self._host, self._base_prompt))
logger.debug("Host {}: Base Pattern: {}".format(self._host, self._base_pattern))
return self._base_prompt
async def check_config_mode(self):
"""Check if are in configuration mode. Return boolean"""
logger.info("Host {}: Checking configuration mode".format(self._host))
check_string = type(self)._config_check
self._stdin.write(self._normalize_cmd("\n"))
output = await self._read_until_prompt()
return check_string in output
async def config_mode(self):
"""Enter to configuration mode"""
logger.info("Host {}: Entering to configuration mode".format(self._host))
output = ""
config_enter = type(self)._config_enter
if not await self.check_config_mode():
self._stdin.write(self._normalize_cmd(config_enter))
output += await self._read_until_prompt()
if not await self.check_config_mode():
raise ValueError("Failed to enter to configuration mode")
return output
async def exit_config_mode(self):
"""Exit from configuration mode"""
logger.info("Host {}: Exiting from configuration mode".format(self._host))
output = ""
config_exit = type(self)._config_exit
if await self.check_config_mode():
self._stdin.write(self._normalize_cmd(config_exit))
output += await self._read_until_prompt()
if await self.check_config_mode():
raise ValueError("Failed to exit from configuration mode")
return output
async def send_config_set(
self,
config_commands=None,
with_commit=True,
commit_comment="",
exit_config_mode=True,
):
"""
Sending configuration commands to device
By default automatically exits/enters configuration mode.
:param list config_commands: iterable string list with commands for applying to network devices in system view
:param bool with_commit: if true it commit all changes after applying all config_commands
:param string commit_comment: message for configuration commit
:param bool exit_config_mode: If true it will quit from configuration mode automatically
:return: The output of these commands
"""
if config_commands is None:
return ""
# Send config commands
output = await self.config_mode()
output += await super().send_config_set(config_commands=config_commands)
if with_commit:
commit = type(self)._commit_command
if commit_comment:
commit = type(self)._commit_comment_command.format(commit_comment)
self._stdin.write(self._normalize_cmd(commit))
output += await self._read_until_prompt()
if exit_config_mode:
output += await self.exit_config_mode()
output = self._normalize_linefeeds(output)
logger.debug(
"Host {}: Config commands output: {}".format(self._host, repr(output))
)
return output
| netdev/vendors/junos_like.py | 5,855 | JunOSLikeDevice Class for working with Juniper JunOS like devices
Juniper JunOS like devices having several concepts:
* shell mode (csh). This is csh shell for FreeBSD. This mode is not covered by this Class.
* cli mode (specific shell). The entire configuration is usual configured in this shell:
* operation mode. This mode is using for getting information from device
* configuration mode. This mode is using for configuration system
JunOSLikeDevice Class is abstract class for using in Juniper JunOS like devices
Connection Method are based upon AsyncSSH and should be running in asyncio loop
Strip off trailing terminator Send config commands | 657 | en | 0.850379 |
"""
Finance-specific data cleaning functions.
"""
import json
from datetime import date
from functools import lru_cache
import pandas as pd
import pandas_flavor as pf
import requests
from janitor.errors import JanitorError
from .utils import check, deprecated_alias, is_connected
currency_set = {
"AUD",
"BGN",
"BRL",
"CAD",
"CHF",
"CNY",
"CZK",
"DKK",
"EUR",
"GBP",
"HKD",
"HRK",
"HUF",
"IDR",
"ILS",
"INR",
"ISK",
"JPY",
"KRW",
"MXN",
"MYR",
"NOK",
"NZD",
"PHP",
"PLN",
"RON",
"RUB",
"SEK",
"SGD",
"THB",
"TRY",
"USD",
"ZAR",
}
# Dictionary of recognized World Bank countries and their abbreviations
wb_country_dict = {
"Aruba": "ABW",
"Afghanistan": "AFG",
"Angola": "AGO",
"Albania": "ALB",
"Andorra": "AND",
"Arab World": "ARB",
"United Arab Emirates": "ARE",
"Argentina": "ARG",
"Armenia": "ARM",
"American Samoa": "ASM",
"Antigua and Barbuda": "ATG",
"Australia": "AUS",
"Austria": "AUT",
"Azerbaijan": "AZE",
"Burundi": "BDI",
"Belgium": "BEL",
"Benin": "BEN",
"Burkina Faso": "BFA",
"Bangladesh": "BGD",
"Bulgaria": "BGR",
"Bahrain": "BHR",
"Bahamas, The": "BHS",
"Bosnia and Herzegovina": "BIH",
"Belarus": "BLR",
"Belize": "BLZ",
"Bermuda": "BMU",
"Bolivia": "BOL",
"Brazil": "BRA",
"Barbados": "BRB",
"Brunei Darussalam": "BRN",
"Bhutan": "BTN",
"Botswana": "BWA",
"Central African Republic": "CAF",
"Canada": "CAN",
"Central Europe and the Baltics": "CEB",
"Switzerland": "CHE",
"Channel Islands": "CHI",
"Chile": "CHL",
"China": "CHN",
"Cote d'Ivoire": "CIV",
"Cameroon": "CMR",
"Congo, Dem. Rep.": "COD",
"Congo, Rep.": "COG",
"Colombia": "COL",
"Comoros": "COM",
"Cabo Verde": "CPV",
"Costa Rica": "CRI",
"Caribbean small states": "CSS",
"Cuba": "CUB",
"Curacao": "CUW",
"Cayman Islands": "CYM",
"Cyprus": "CYP",
"Czech Republic": "CZE",
"Germany": "DEU",
"Djibouti": "DJI",
"Dominica": "DMA",
"Denmark": "DNK",
"Dominican Republic": "DOM",
"Algeria": "DZA",
"East Asia & Pacific (excluding high income)": "EAP",
"Early-demographic dividend": "EAR",
"East Asia & Pacific": "EAS",
"Europe & Central Asia (excluding high income)": "ECA",
"Europe & Central Asia": "ECS",
"Ecuador": "ECU",
"Egypt, Arab Rep.": "EGY",
"Euro area": "EMU",
"Eritrea": "ERI",
"Spain": "ESP",
"Estonia": "EST",
"Ethiopia": "ETH",
"European Union": "EUU",
"Fragile and conflict affected situations": "FCS",
"Finland": "FIN",
"Fiji": "FJI",
"France": "FRA",
"Faroe Islands": "FRO",
"Micronesia, Fed. Sts.": "FSM",
"Gabon": "GAB",
"United Kingdom": "GBR",
"Georgia": "GEO",
"Ghana": "GHA",
"Gibraltar": "GIB",
"Guinea": "GIN",
"Gambia, The": "GMB",
"Guinea-Bissau": "GNB",
"Equatorial Guinea": "GNQ",
"Greece": "GRC",
"Grenada": "GRD",
"Greenland": "GRL",
"Guatemala": "GTM",
"Guam": "GUM",
"Guyana": "GUY",
"High income": "HIC",
"Hong Kong SAR, China": "HKG",
"Honduras": "HND",
"Heavily indebted poor countries (HIPC)": "HPC",
"Croatia": "HRV",
"Haiti": "HTI",
"Hungary": "HUN",
"IBRD only": "IBD",
"IDA & IBRD total": "IBT",
"IDA total": "IDA",
"IDA blend": "IDB",
"Indonesia": "IDN",
"IDA only": "IDX",
"Isle of Man": "IMN",
"India": "IND",
"Not classified": "INX",
"Ireland": "IRL",
"Iran, Islamic Rep.": "IRN",
"Iraq": "IRQ",
"Iceland": "ISL",
"Israel": "ISR",
"Italy": "ITA",
"Jamaica": "JAM",
"Jordan": "JOR",
"Japan": "JPN",
"Kazakhstan": "KAZ",
"Kenya": "KEN",
"Kyrgyz Republic": "KGZ",
"Cambodia": "KHM",
"Kiribati": "KIR",
"St. Kitts and Nevis": "KNA",
"Korea, Rep.": "KOR",
"Kuwait": "KWT",
"Latin America & Caribbean (excluding high income)": "LAC",
"Lao PDR": "LAO",
"Lebanon": "LBN",
"Liberia": "LBR",
"Libya": "LBY",
"St. Lucia": "LCA",
"Latin America & Caribbean": "LCN",
"Least developed countries: UN classification": "LDC",
"Low income": "LIC",
"Liechtenstein": "LIE",
"Sri Lanka": "LKA",
"Lower middle income": "LMC",
"Low & middle income": "LMY",
"Lesotho": "LSO",
"Late-demographic dividend": "LTE",
"Lithuania": "LTU",
"Luxembourg": "LUX",
"Latvia": "LVA",
"Macao SAR, China": "MAC",
"St. Martin (French part)": "MAF",
"Morocco": "MAR",
"Monaco": "MCO",
"Moldova": "MDA",
"Madagascar": "MDG",
"Maldives": "MDV",
"Middle East & North Africa": "MEA",
"Mexico": "MEX",
"Marshall Islands": "MHL",
"Middle income": "MIC",
"North Macedonia": "MKD",
"Mali": "MLI",
"Malta": "MLT",
"Myanmar": "MMR",
"Middle East & North Africa (excluding high income)": "MNA",
"Montenegro": "MNE",
"Mongolia": "MNG",
"Northern Mariana Islands": "MNP",
"Mozambique": "MOZ",
"Mauritania": "MRT",
"Mauritius": "MUS",
"Malawi": "MWI",
"Malaysia": "MYS",
"North America": "NAC",
"Namibia": "NAM",
"New Caledonia": "NCL",
"Niger": "NER",
"Nigeria": "NGA",
"Nicaragua": "NIC",
"Netherlands": "NLD",
"Norway": "NOR",
"Nepal": "NPL",
"Nauru": "NRU",
"New Zealand": "NZL",
"OECD members": "OED",
"Oman": "OMN",
"Other small states": "OSS",
"Pakistan": "PAK",
"Panama": "PAN",
"Peru": "PER",
"Philippines": "PHL",
"Palau": "PLW",
"Papua New Guinea": "PNG",
"Poland": "POL",
"Pre-demographic dividend": "PRE",
"Puerto Rico": "PRI",
"Korea, Dem. People's Rep.": "PRK",
"Portugal": "PRT",
"Paraguay": "PRY",
"West Bank and Gaza": "PSE",
"Pacific island small states": "PSS",
"Post-demographic dividend": "PST",
"French Polynesia": "PYF",
"Qatar": "QAT",
"Romania": "ROU",
"Russian Federation": "RUS",
"Rwanda": "RWA",
"South Asia": "SAS",
"Saudi Arabia": "SAU",
"Sudan": "SDN",
"Senegal": "SEN",
"Singapore": "SGP",
"Solomon Islands": "SLB",
"Sierra Leone": "SLE",
"El Salvador": "SLV",
"San Marino": "SMR",
"Somalia": "SOM",
"Serbia": "SRB",
"Sub-Saharan Africa (excluding high income)": "SSA",
"South Sudan": "SSD",
"Sub-Saharan Africa": "SSF",
"Small states": "SST",
"Sao Tome and Principe": "STP",
"Suriname": "SUR",
"Slovak Republic": "SVK",
"Slovenia": "SVN",
"Sweden": "SWE",
"Eswatini": "SWZ",
"Sint Maarten (Dutch part)": "SXM",
"Seychelles": "SYC",
"Syrian Arab Republic": "SYR",
"Turks and Caicos Islands": "TCA",
"Chad": "TCD",
"East Asia & Pacific (IDA & IBRD countries)": "TEA",
"Europe & Central Asia (IDA & IBRD countries)": "TEC",
"Togo": "TGO",
"Thailand": "THA",
"Tajikistan": "TJK",
"Turkmenistan": "TKM",
"Latin America & the Caribbean (IDA & IBRD countries)": "TLA",
"Timor-Leste": "TLS",
"Middle East & North Africa (IDA & IBRD countries)": "TMN",
"Tonga": "TON",
"South Asia (IDA & IBRD)": "TSA",
"Sub-Saharan Africa (IDA & IBRD countries)": "TSS",
"Trinidad and Tobago": "TTO",
"Tunisia": "TUN",
"Turkey": "TUR",
"Tuvalu": "TUV",
"Tanzania": "TZA",
"Uganda": "UGA",
"Ukraine": "UKR",
"Upper middle income": "UMC",
"Uruguay": "URY",
"United States": "USA",
"Uzbekistan": "UZB",
"St. Vincent and the Grenadines": "VCT",
"Venezuela, RB": "VEN",
"British Virgin Islands": "VGB",
"Virgin Islands (U.S.)": "VIR",
"Vietnam": "VNM",
"Vanuatu": "VUT",
"World": "WLD",
"Samoa": "WSM",
"Kosovo": "XKX",
"Yemen, Rep.": "YEM",
"South Africa": "ZAF",
"Zambia": "ZMB",
"Zimbabwe": "ZWE",
}
def _check_currency(currency: str):
"""Check that currency is in supported set."""
if currency not in currency_set:
raise ValueError(
f"currency {currency} not in supported currency set, "
f"{currency_set}"
)
def _check_wb_country(country: str):
"""Check that world bank country is in supported set."""
if (country not in wb_country_dict.keys()) & (
country not in wb_country_dict.values() # noqa: PD011
):
raise ValueError(
f"country {country} not in supported World Bank country dict, "
f"{wb_country_dict}"
)
def _check_wb_years(year: int):
"""Check that year is in world bank dataset years."""
if year < 1960:
raise ValueError("year value must be 1960 or later")
# @lru_cache(maxsize=32)
# def _convert_currency(
# api_key: str,
# from_currency: str = None,
# to_currency: str = None,
# historical_date: Optional[date] = None,
# ) -> float:
# """
# Currency conversion for Pandas DataFrame column.
# Helper function for `convert_currency` method.
# The API used is https://exchangeratesapi.io/.
# """
# url = "http://api.exchangeratesapi.io"
# if historical_date:
# check("historical_date", historical_date, [datetime, date])
# if isinstance(historical_date, datetime):
# if historical_date < datetime(1999, 1, 4):
# raise ValueError(
# "historical_date:datetime must be later than 1999-01-04!"
# )
# string_date = str(historical_date)[:10]
# else:
# if historical_date < date(1999, 1, 4):
# raise ValueError(
# "historical_date:date must be later than 1999-01-04!"
# )
# string_date = str(historical_date)
# url = url + "/%s" % string_date
# else:
# url = url + "/latest"
# _check_currency(from_currency)
# _check_currency(to_currency)
# payload = {
# # "base": from_currency,
# "symbols": to_currency,
# "access_key": api_key,
# }
# result = requests.get(url, params=payload)
# if result.status_code != 200:
# raise ConnectionError(
# "Exchange Rate API failed to receive a 200 "
# "response from the server. "
# "Please try again later."
# )
# currency_dict = json.loads(result.text)
# rate = currency_dict["rates"][to_currency]
# return rate
@pf.register_dataframe_method
@deprecated_alias(colname="column_name")
def convert_currency(
df: pd.DataFrame,
api_key: str,
column_name: str = None,
from_currency: str = None,
to_currency: str = None,
historical_date: date = None,
make_new_column: bool = False,
) -> pd.DataFrame:
"""Deprecated function."""
raise JanitorError(
"The `convert_currency` function has been temporarily disabled due to "
"exchangeratesapi.io disallowing free pinging of its API. "
"(Our tests started to fail due to this issue.) "
"There is no easy way around this problem "
"except to find a new API to call on."
"Please comment on issue #829 "
"(https://github.com/pyjanitor-devs/pyjanitor/issues/829) "
"if you know of an alternative API that we can call on, "
"otherwise the function will be removed in pyjanitor's 1.0 release."
)
# @pf.register_dataframe_method
# @deprecated_alias(colname="column_name")
# def convert_currency(
# df: pd.DataFrame,
# api_key: str,
# column_name: str = None,
# from_currency: str = None,
# to_currency: str = None,
# historical_date: date = None,
# make_new_column: bool = False,
# ) -> pd.DataFrame:
# """
# Converts a column from one currency to another, with an option to
# convert based on historical exchange values.
# On April 10 2021,
# we discovered that there was no more free API available.
# Thus, an API key is required to perform currency conversion.
# API keys should be set as an environment variable,
# for example, `EXCHANGE_RATE_API_KEY``,
# and then passed into the function
# by calling on `os.getenv("EXCHANGE_RATE_APIKEY")``.
# :param df: A pandas dataframe.
# :param api_key: exchangeratesapi.io API key.
# :param column_name: Name of the new column. Should be a string, in order
# for the column name to be compatible with the Feather binary
# format (this is a useful thing to have).
# :param from_currency: The base currency to convert from.
# May be any of: currency_set = {"AUD", "BGN", "BRL", "CAD", "CHF",
# "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HRK", "HUF", "IDR",
# "ILS", "INR", "ISK", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD",
# "PHP", "PLN", "RON", "RUB", "SEK", "SGD", "THB", "TRY", "USD",
# "ZAR"}
# :param to_currency: The target currency to convert to.
# May be any of: currency_set = {"AUD", "BGN", "BRL", "CAD", "CHF",
# "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HRK", "HUF", "IDR",
# "ILS", "INR", "ISK", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD",
# "PHP", "PLN", "RON", "RUB", "SEK", "SGD", "THB", "TRY", "USD",
# "ZAR"}
# :param historical_date: If supplied,
# get exchange rate on a certain date.
# If not supplied, get the latest exchange rate.
# The exchange rates go back to Jan. 4, 1999.
# :param make_new_column: Generates new column
# for converted currency if True,
# otherwise, converts currency in place.
# :returns: The dataframe with converted currency column.
# .. code-block:: python
# import pandas as pd
# import janitor
# from datetime import date
# data_dict = {
# "a": [1.23452345, 2.456234, 3.2346125] * 3,
# "Bell__Chart": [1/3, 2/7, 3/2] * 3,
# "decorated-elephant": [1/234, 2/13, 3/167] * 3,
# "animals": ["rabbit", "leopard", "lion"] * 3,
# "cities": ["Cambridge", "Shanghai", "Basel"] * 3,
# }
# example_dataframe = pd.DataFrame(data_dict)
# Example: Converting a column from one currency to another
# using rates from 01/01/2018.
# .. code-block:: python
# example_dataframe.convert_currency('a', from_currency='USD',
# to_currency='EUR', historical_date=date(2018,1,1))
# Output:
# .. code-block:: python
# a Bell__Chart decorated-elephant animals cities
# 0 1.029370 0.333333 0.004274 rabbit Cambridge
# 1 2.048056 0.285714 0.153846 leopard Shanghai
# 2 2.697084 1.500000 0.017964 lion Basel
# 3 1.029370 0.333333 0.004274 rabbit Cambridge
# 4 2.048056 0.285714 0.153846 leopard Shanghai
# 5 2.697084 1.500000 0.017964 lion Basel
# 6 1.029370 0.333333 0.004274 rabbit Cambridge
# 7 2.048056 0.285714 0.153846 leopard Shanghai
# 8 2.697084 1.500000 0.017964 lion Basel
# """
# rate = _convert_currency(
# api_key, from_currency, to_currency, historical_date
# )
# if make_new_column:
# # new_column_name = column_name + "_" + to_currency
# column_name = column_name + "_" + to_currency
# df = df.assign(column_name=df[column_name] * rate)
# return df
@lru_cache(maxsize=32)
def _inflate_currency(
country: str = None, currency_year: int = None, to_year: int = None
) -> float:
"""
Currency inflation for Pandas DataFrame column.
Helper function for `inflate_currency` method.
The API used is the World Bank Indicator API:
https://datahelpdesk.worldbank.org/knowledgebase/articles/889392-about-the-indicators-api-documentation
"""
# Check all inputs are correct data type
check("country", country, [str])
check("currency_year", currency_year, [int])
check("to_year", to_year, [int])
# Get WB country abbreviation
_check_wb_country(country)
if country in wb_country_dict.keys():
country = wb_country_dict[country]
else:
# `country` is already a correct abbreviation; do nothing
pass
_check_wb_years(currency_year)
_check_wb_years(to_year)
url = (
"https://api.worldbank.org/v2/country/"
+ country
+ "/indicator/FP.CPI.TOTL?date="
+ str(min(currency_year, to_year))
+ ":"
+ str(max(currency_year, to_year))
+ "&format=json"
)
result = requests.get(url)
if result.status_code != 200:
raise ConnectionError(
"WB Indicator API failed to receive a 200 "
"response from the server. "
"Please try again later."
)
# The API returns a list of two items;
# the second item in the list is what we want
inflation_dict = json.loads(result.text)[1]
# Error checking
if inflation_dict is None:
raise ValueError(
"The WB Indicator API returned nothing. "
"This likely means the currency_year and "
"to_year are outside of the year range for "
"which the WB has inflation data for the "
"specified country."
)
# Create new dict with only the year and inflation values
inflation_dict_ready = {
int(inflation_dict[i]["date"]): float(inflation_dict[i]["value"])
for i in range(len(inflation_dict))
if inflation_dict[i]["value"] is not None
}
# Error catching
if currency_year not in inflation_dict_ready.keys():
raise ValueError(
f"The WB Indicator API does not have inflation "
f"data for {currency_year} for {country}."
)
if to_year not in inflation_dict_ready.keys():
raise ValueError(
f"The WB Indicator API does not have inflation "
f"data for {to_year} for {country}."
)
inflator = (
inflation_dict_ready[to_year] / inflation_dict_ready[currency_year]
)
return inflator
@pf.register_dataframe_method
def inflate_currency(
df: pd.DataFrame,
column_name: str = None,
country: str = None,
currency_year: int = None,
to_year: int = None,
make_new_column: bool = False,
) -> pd.DataFrame:
"""
Inflates a column of monetary values from one year to another, based on
the currency's country.
The provided country can be any economy name or code from the World Bank
[list of economies]
(https://databank.worldbank.org/data/download/site-content/CLASS.xls).
**Note**: This method mutates the original DataFrame.
Method chaining usage example:
>>> import pandas as pd
>>> import janitor.finance
>>> df = pd.DataFrame({"profit":[100.10, 200.20, 300.30, 400.40, 500.50]})
>>> df
profit
0 100.1
1 200.2
2 300.3
3 400.4
4 500.5
>>> df.inflate_currency(
... column_name='profit',
... country='USA',
... currency_year=2015,
... to_year=2018,
... make_new_column=True
... )
profit profit_2018
0 100.1 106.050596
1 200.2 212.101191
2 300.3 318.151787
3 400.4 424.202382
4 500.5 530.252978
:param df: A pandas DataFrame.
:param column_name: Name of the column containing monetary
values to inflate.
:param country: The country associated with the currency being inflated.
May be any economy or code from the World Bank [List of economies]
(https://databank.worldbank.org/data/download/site-content/CLASS.xls).
:param currency_year: The currency year to inflate from.
The year should be 1960 or later.
:param to_year: The currency year to inflate to.
The year should be 1960 or later.
:param make_new_column: Generates new column for inflated currency if
True, otherwise, inflates currency in place.
:returns: The dataframe with inflated currency column.
"""
inflator = _inflate_currency(country, currency_year, to_year)
if make_new_column:
new_column_name = column_name + "_" + str(to_year)
df[new_column_name] = df[column_name] * inflator
else:
df[column_name] = df[column_name] * inflator
return df
def convert_stock(stock_symbol: str) -> str:
"""
This function takes in a stock symbol as a parameter,
queries an API for the companies full name and returns
it
Functional usage example:
```python
import janitor.finance
janitor.finance.convert_stock("aapl")
```
:param stock_symbol: Stock ticker Symbol
:raises ConnectionError: Internet connection is not available
:returns: Full company name
"""
if is_connected("www.google.com"):
stock_symbol = stock_symbol.upper()
return get_symbol(stock_symbol)
else:
raise ConnectionError(
"Connection Error: Client Not Connected to Internet"
)
def get_symbol(symbol: str):
"""
This is a helper function to get a companies full
name based on the stock symbol.
Functional usage example:
```python
import janitor.finance
janitor.finance.get_symbol("aapl")
```
:param symbol: This is our stock symbol that we use
to query the api for the companies full name.
:return: Company full name
"""
result = requests.get(
"http://d.yimg.com/autoc."
+ "finance.yahoo.com/autoc?query={}®ion=1&lang=en".format(symbol)
).json()
for x in result["ResultSet"]["Result"]:
if x["symbol"] == symbol:
return x["name"]
else:
return None
| janitor/finance.py | 21,921 | Check that currency is in supported set.
Check that world bank country is in supported set.
Check that year is in world bank dataset years.
Currency inflation for Pandas DataFrame column.
Helper function for `inflate_currency` method.
The API used is the World Bank Indicator API:
https://datahelpdesk.worldbank.org/knowledgebase/articles/889392-about-the-indicators-api-documentation
Deprecated function.
This function takes in a stock symbol as a parameter,
queries an API for the companies full name and returns
it
Functional usage example:
```python
import janitor.finance
janitor.finance.convert_stock("aapl")
```
:param stock_symbol: Stock ticker Symbol
:raises ConnectionError: Internet connection is not available
:returns: Full company name
This is a helper function to get a companies full
name based on the stock symbol.
Functional usage example:
```python
import janitor.finance
janitor.finance.get_symbol("aapl")
```
:param symbol: This is our stock symbol that we use
to query the api for the companies full name.
:return: Company full name
Inflates a column of monetary values from one year to another, based on
the currency's country.
The provided country can be any economy name or code from the World Bank
[list of economies]
(https://databank.worldbank.org/data/download/site-content/CLASS.xls).
**Note**: This method mutates the original DataFrame.
Method chaining usage example:
>>> import pandas as pd
>>> import janitor.finance
>>> df = pd.DataFrame({"profit":[100.10, 200.20, 300.30, 400.40, 500.50]})
>>> df
profit
0 100.1
1 200.2
2 300.3
3 400.4
4 500.5
>>> df.inflate_currency(
... column_name='profit',
... country='USA',
... currency_year=2015,
... to_year=2018,
... make_new_column=True
... )
profit profit_2018
0 100.1 106.050596
1 200.2 212.101191
2 300.3 318.151787
3 400.4 424.202382
4 500.5 530.252978
:param df: A pandas DataFrame.
:param column_name: Name of the column containing monetary
values to inflate.
:param country: The country associated with the currency being inflated.
May be any economy or code from the World Bank [List of economies]
(https://databank.worldbank.org/data/download/site-content/CLASS.xls).
:param currency_year: The currency year to inflate from.
The year should be 1960 or later.
:param to_year: The currency year to inflate to.
The year should be 1960 or later.
:param make_new_column: Generates new column for inflated currency if
True, otherwise, inflates currency in place.
:returns: The dataframe with inflated currency column.
Finance-specific data cleaning functions.
Dictionary of recognized World Bank countries and their abbreviations noqa: PD011 @lru_cache(maxsize=32) def _convert_currency( api_key: str, from_currency: str = None, to_currency: str = None, historical_date: Optional[date] = None, ) -> float: """ Currency conversion for Pandas DataFrame column. Helper function for `convert_currency` method. The API used is https://exchangeratesapi.io/. """ url = "http://api.exchangeratesapi.io" if historical_date: check("historical_date", historical_date, [datetime, date]) if isinstance(historical_date, datetime): if historical_date < datetime(1999, 1, 4): raise ValueError( "historical_date:datetime must be later than 1999-01-04!" ) string_date = str(historical_date)[:10] else: if historical_date < date(1999, 1, 4): raise ValueError( "historical_date:date must be later than 1999-01-04!" ) string_date = str(historical_date) url = url + "/%s" % string_date else: url = url + "/latest" _check_currency(from_currency) _check_currency(to_currency) payload = { "base": from_currency, "symbols": to_currency, "access_key": api_key, } result = requests.get(url, params=payload) if result.status_code != 200: raise ConnectionError( "Exchange Rate API failed to receive a 200 " "response from the server. " "Please try again later." ) currency_dict = json.loads(result.text) rate = currency_dict["rates"][to_currency] return rate @pf.register_dataframe_method @deprecated_alias(colname="column_name") def convert_currency( df: pd.DataFrame, api_key: str, column_name: str = None, from_currency: str = None, to_currency: str = None, historical_date: date = None, make_new_column: bool = False, ) -> pd.DataFrame: """ Converts a column from one currency to another, with an option to convert based on historical exchange values. On April 10 2021, we discovered that there was no more free API available. Thus, an API key is required to perform currency conversion. API keys should be set as an environment variable, for example, `EXCHANGE_RATE_API_KEY``, and then passed into the function by calling on `os.getenv("EXCHANGE_RATE_APIKEY")``. :param df: A pandas dataframe. :param api_key: exchangeratesapi.io API key. :param column_name: Name of the new column. Should be a string, in order for the column name to be compatible with the Feather binary format (this is a useful thing to have). :param from_currency: The base currency to convert from. May be any of: currency_set = {"AUD", "BGN", "BRL", "CAD", "CHF", "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HRK", "HUF", "IDR", "ILS", "INR", "ISK", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD", "PHP", "PLN", "RON", "RUB", "SEK", "SGD", "THB", "TRY", "USD", "ZAR"} :param to_currency: The target currency to convert to. May be any of: currency_set = {"AUD", "BGN", "BRL", "CAD", "CHF", "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HRK", "HUF", "IDR", "ILS", "INR", "ISK", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD", "PHP", "PLN", "RON", "RUB", "SEK", "SGD", "THB", "TRY", "USD", "ZAR"} :param historical_date: If supplied, get exchange rate on a certain date. If not supplied, get the latest exchange rate. The exchange rates go back to Jan. 4, 1999. :param make_new_column: Generates new column for converted currency if True, otherwise, converts currency in place. :returns: The dataframe with converted currency column. .. code-block:: python import pandas as pd import janitor from datetime import date data_dict = { "a": [1.23452345, 2.456234, 3.2346125] * 3, "Bell__Chart": [1/3, 2/7, 3/2] * 3, "decorated-elephant": [1/234, 2/13, 3/167] * 3, "animals": ["rabbit", "leopard", "lion"] * 3, "cities": ["Cambridge", "Shanghai", "Basel"] * 3, } example_dataframe = pd.DataFrame(data_dict) Example: Converting a column from one currency to another using rates from 01/01/2018. .. code-block:: python example_dataframe.convert_currency('a', from_currency='USD', to_currency='EUR', historical_date=date(2018,1,1)) Output: .. code-block:: python a Bell__Chart decorated-elephant animals cities 0 1.029370 0.333333 0.004274 rabbit Cambridge 1 2.048056 0.285714 0.153846 leopard Shanghai 2 2.697084 1.500000 0.017964 lion Basel 3 1.029370 0.333333 0.004274 rabbit Cambridge 4 2.048056 0.285714 0.153846 leopard Shanghai 5 2.697084 1.500000 0.017964 lion Basel 6 1.029370 0.333333 0.004274 rabbit Cambridge 7 2.048056 0.285714 0.153846 leopard Shanghai 8 2.697084 1.500000 0.017964 lion Basel """ rate = _convert_currency( api_key, from_currency, to_currency, historical_date ) if make_new_column: new_column_name = column_name + "_" + to_currency column_name = column_name + "_" + to_currency df = df.assign(column_name=df[column_name] * rate) return df Check all inputs are correct data type Get WB country abbreviation `country` is already a correct abbreviation; do nothing The API returns a list of two items; the second item in the list is what we want Error checking Create new dict with only the year and inflation values Error catching | 8,631 | en | 0.586345 |
from datetime import datetime
import json
from unittest import TestCase
from celery.schedules import schedule, crontab
try: # celery 3.x
from celery.utils.timeutils import timezone
except ImportError: # celery 4.x
from celery.utils.time import timezone
from redbeat.decoder import RedBeatJSONDecoder, RedBeatJSONEncoder
from redbeat.schedules import rrule
class JSONTestCase(TestCase):
def dumps(self, d):
return json.dumps(d, cls=RedBeatJSONEncoder)
def loads(self, d):
return json.loads(d, cls=RedBeatJSONDecoder)
def datetime(self, **kwargs):
d = {
'__type__': 'datetime',
'year': 2015,
'month': 12,
'day': 30,
'hour': 12,
'minute': 59,
'second': 22,
'microsecond': 333,
}
d.update(kwargs)
return d
def schedule(self, **kwargs):
d = {
'__type__': 'interval',
'every': 60.0,
'relative': False,
}
d.update(kwargs)
return d
def crontab(self, **kwargs):
d = {
'__type__': 'crontab',
'minute': '*',
'hour': '*',
'day_of_week': '*',
'day_of_month': '*',
'month_of_year': '*',
}
d.update(kwargs)
return d
def rrule(self, **kwargs):
d = {
'__type__': 'rrule',
'freq': 5,
'dtstart': 1451480362,
'interval': 1,
'wkst': None,
'count': 1,
'until': None,
'bysetpos': None,
'bymonth': None,
'bymonthday': None,
'byyearday': None,
'byeaster': None,
'byweekno': None,
'byweekday': None,
'byhour': None,
'byminute': None,
'bysecond': None,
}
d.update(kwargs)
return d
class RedBeatJSONEncoderTestCase(JSONTestCase):
def test_datetime(self):
dt = datetime.now()
result = self.dumps(dt)
expected = self.datetime()
for key in (k for k in expected if hasattr(dt, k)):
expected[key] = getattr(dt, key)
self.assertEqual(result, json.dumps(expected))
def test_schedule(self):
s = schedule(run_every=60.0)
result = self.dumps(s)
self.assertEqual(result, json.dumps(self.schedule(every=60.0)))
def test_crontab(self):
c = crontab()
result = self.dumps(c)
self.assertEqual(result, json.dumps(self.crontab()))
def test_rrule(self):
r = rrule('MINUTELY', dtstart=datetime(2015, 12, 30, 12, 59, 22, tzinfo=timezone.utc), count=1)
result = self.dumps(r)
self.assertEqual(result, json.dumps(self.rrule()))
def test_rrule_timezone(self):
tz = timezone.get_timezone('US/Eastern')
start1 = datetime(2015, 12, 30, 12, 59, 22, tzinfo=timezone.utc)
start2 = start1.astimezone(tz)
r1 = rrule('MINUTELY', dtstart=start1, count=1)
r2 = rrule('MINUTELY', dtstart=start2, count=1)
self.assertEqual(self.dumps(r1), self.dumps(r2))
class RedBeatJSONDecoderTestCase(JSONTestCase):
def test_datetime(self):
d = self.datetime()
result = self.loads(json.dumps(d))
d.pop('__type__')
self.assertEqual(result, datetime(tzinfo=timezone.utc, **d))
def test_schedule(self):
d = self.schedule()
result = self.loads(json.dumps(d))
d.pop('__type__')
self.assertEqual(result, schedule(run_every=60))
def test_crontab(self):
d = self.crontab()
result = self.loads(json.dumps(d))
d.pop('__type__')
self.assertEqual(result, crontab())
def test_rrule(self):
d = self.rrule()
result = self.loads(json.dumps(d))
d.pop('__type__')
self.assertEqual(
result,
rrule('MINUTELY', dtstart=datetime(2015, 12, 30, 12, 59, 22, tzinfo=timezone.utc), count=1),
)
| tests/test_json.py | 4,083 | celery 3.x celery 4.x | 21 | en | 0.809109 |
from typing import Optional, Any, Dict, List, Text, Tuple
from collections import defaultdict
SUBJECT_WITH_BRANCH_TEMPLATE = u'{repo} / {branch}'
SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE = u'{repo} / {type} #{id} {title}'
EMPTY_SHA = '0000000000000000000000000000000000000000'
COMMITS_LIMIT = 20
COMMIT_ROW_TEMPLATE = u'* {commit_msg} ([{commit_short_sha}]({commit_url}))\n'
COMMITS_MORE_THAN_LIMIT_TEMPLATE = u"[and {commits_number} more commit(s)]"
COMMIT_OR_COMMITS = u"commit{}"
PUSH_PUSHED_TEXT_WITH_URL = u"[pushed]({compare_url}) {number_of_commits} {commit_or_commits}"
PUSH_PUSHED_TEXT_WITHOUT_URL = u"pushed {number_of_commits} {commit_or_commits}"
PUSH_COMMITS_MESSAGE_TEMPLATE_WITH_COMMITTERS = u"""{user_name} {pushed_text} to branch {branch_name}. {committers_details}.
{commits_data}
"""
PUSH_COMMITS_MESSAGE_TEMPLATE_WITHOUT_COMMITTERS = u"""{user_name} {pushed_text} to branch {branch_name}.
{commits_data}
"""
PUSH_COMMITS_MESSAGE_EXTENSION = u"Commits by {}"
PUSH_COMMITTERS_LIMIT_INFO = 3
FORCE_PUSH_COMMITS_MESSAGE_TEMPLATE = u"{user_name} [force pushed]({url}) to branch {branch_name}. Head is now {head}"
CREATE_BRANCH_MESSAGE_TEMPLATE = u"{user_name} created [{branch_name}]({url}) branch"
REMOVE_BRANCH_MESSAGE_TEMPLATE = u"{user_name} deleted branch {branch_name}"
PULL_REQUEST_OR_ISSUE_MESSAGE_TEMPLATE = u"{user_name} {action} [{type}{id}]({url})"
PULL_REQUEST_OR_ISSUE_ASSIGNEE_INFO_TEMPLATE = u"(assigned to {assignee})"
PULL_REQUEST_BRANCH_INFO_TEMPLATE = u"\nfrom `{target}` to `{base}`"
SETUP_MESSAGE_TEMPLATE = u"{integration} webhook has been successfully configured"
SETUP_MESSAGE_USER_PART = u" by {user_name}"
CONTENT_MESSAGE_TEMPLATE = u"\n~~~ quote\n{message}\n~~~"
COMMITS_COMMENT_MESSAGE_TEMPLATE = u"{user_name} {action} on [{sha}]({url})"
PUSH_TAGS_MESSAGE_TEMPLATE = u"""{user_name} {action} tag {tag}"""
TAG_WITH_URL_TEMPLATE = u"[{tag_name}]({tag_url})"
TAG_WITHOUT_URL_TEMPLATE = u"{tag_name}"
def get_push_commits_event_message(user_name, compare_url, branch_name, commits_data, is_truncated=False):
# type: (Text, Optional[Text], Text, List[Dict[str, Any]], Optional[bool]) -> Text
pushed_message_template = PUSH_PUSHED_TEXT_WITH_URL if compare_url else PUSH_PUSHED_TEXT_WITHOUT_URL
pushed_text_message = pushed_message_template.format(
compare_url=compare_url,
number_of_commits=len(commits_data),
commit_or_commits=COMMIT_OR_COMMITS.format(u's' if len(commits_data) > 1 else u''))
committers_items = get_all_committers(commits_data) # type: List[Tuple[str, int]]
if len(committers_items) == 1 and user_name == committers_items[0][0]:
return PUSH_COMMITS_MESSAGE_TEMPLATE_WITHOUT_COMMITTERS.format(
user_name=user_name,
pushed_text=pushed_text_message,
branch_name=branch_name,
commits_data=get_commits_content(commits_data, is_truncated),
).rstrip()
else:
committers_details = "{} ({})".format(*committers_items[0])
for name, number_of_commits in committers_items[1:-1]:
committers_details = "{}, {} ({})".format(committers_details, name, number_of_commits)
if len(committers_items) > 1:
committers_details = "{} and {} ({})".format(committers_details, *committers_items[-1])
return PUSH_COMMITS_MESSAGE_TEMPLATE_WITH_COMMITTERS.format(
user_name=user_name,
pushed_text=pushed_text_message,
branch_name=branch_name,
committers_details=PUSH_COMMITS_MESSAGE_EXTENSION.format(committers_details),
commits_data=get_commits_content(commits_data, is_truncated),
).rstrip()
def get_force_push_commits_event_message(user_name, url, branch_name, head):
# type: (Text, Text, Text, Text) -> Text
return FORCE_PUSH_COMMITS_MESSAGE_TEMPLATE.format(
user_name=user_name,
url=url,
branch_name=branch_name,
head=head
)
def get_create_branch_event_message(user_name, url, branch_name):
# type: (Text, Text, Text) -> Text
return CREATE_BRANCH_MESSAGE_TEMPLATE.format(
user_name=user_name,
url=url,
branch_name=branch_name,
)
def get_remove_branch_event_message(user_name, branch_name):
# type: (Text, Text) -> Text
return REMOVE_BRANCH_MESSAGE_TEMPLATE.format(
user_name=user_name,
branch_name=branch_name,
)
def get_pull_request_event_message(
user_name, action, url, number=None,
target_branch=None, base_branch=None,
message=None, assignee=None, type='PR'
):
# type: (Text, Text, Text, Optional[int], Optional[Text], Optional[Text], Optional[Text], Optional[Text], Optional[Text]) -> Text
main_message = PULL_REQUEST_OR_ISSUE_MESSAGE_TEMPLATE.format(
user_name=user_name,
action=action,
type=type,
url=url,
id=" #{}".format(number) if number is not None else ''
)
if assignee:
main_message += PULL_REQUEST_OR_ISSUE_ASSIGNEE_INFO_TEMPLATE.format(assignee=assignee)
if target_branch and base_branch:
main_message += PULL_REQUEST_BRANCH_INFO_TEMPLATE.format(
target=target_branch,
base=base_branch
)
if message:
main_message += '\n' + CONTENT_MESSAGE_TEMPLATE.format(message=message)
return main_message.rstrip()
def get_setup_webhook_message(integration, user_name=None):
# type: (Text, Optional[Text]) -> Text
content = SETUP_MESSAGE_TEMPLATE.format(integration=integration)
if user_name:
content += SETUP_MESSAGE_USER_PART.format(user_name=user_name)
return content
def get_issue_event_message(user_name, action, url, number=None, message=None, assignee=None):
# type: (Text, Text, Text, Optional[int], Optional[Text], Optional[Text]) -> Text
return get_pull_request_event_message(
user_name,
action,
url,
number,
message=message,
assignee=assignee,
type='Issue'
)
def get_push_tag_event_message(user_name, tag_name, tag_url=None, action='pushed'):
# type: (Text, Text, Optional[Text], Optional[Text]) -> Text
if tag_url:
tag_part = TAG_WITH_URL_TEMPLATE.format(tag_name=tag_name, tag_url=tag_url)
else:
tag_part = TAG_WITHOUT_URL_TEMPLATE.format(tag_name=tag_name)
return PUSH_TAGS_MESSAGE_TEMPLATE.format(
user_name=user_name,
action=action,
tag=tag_part
)
def get_commits_comment_action_message(user_name, action, commit_url, sha, message=None):
# type: (Text, Text, Text, Text, Optional[Text]) -> Text
content = COMMITS_COMMENT_MESSAGE_TEMPLATE.format(
user_name=user_name,
action=action,
sha=get_short_sha(sha),
url=commit_url
)
if message is not None:
content += CONTENT_MESSAGE_TEMPLATE.format(
message=message
)
return content
def get_commits_content(commits_data, is_truncated=False):
# type: (List[Dict[str, Any]], Optional[bool]) -> Text
commits_content = u''
for commit in commits_data[:COMMITS_LIMIT]:
commits_content += COMMIT_ROW_TEMPLATE.format(
commit_short_sha=get_short_sha(commit.get('sha')),
commit_url=commit.get('url'),
commit_msg=commit.get('message').partition('\n')[0]
)
if len(commits_data) > COMMITS_LIMIT:
commits_content += COMMITS_MORE_THAN_LIMIT_TEMPLATE.format(
commits_number=len(commits_data) - COMMITS_LIMIT
)
elif is_truncated:
commits_content += COMMITS_MORE_THAN_LIMIT_TEMPLATE.format(
commits_number=''
).replace(' ', ' ')
return commits_content.rstrip()
def get_short_sha(sha):
# type: (Text) -> Text
return sha[:7]
def get_all_committers(commits_data):
# type: (List[Dict[str, Any]]) -> List[Tuple[str, int]]
committers = defaultdict(int) # type: Dict[str, int]
for commit in commits_data:
committers[commit['name']] += 1
# Sort by commit count, breaking ties alphabetically.
committers_items = sorted(list(committers.items()),
key=lambda item: (-item[1], item[0])) # type: List[Tuple[str, int]]
committers_values = [c_i[1] for c_i in committers_items] # type: List[int]
if len(committers) > PUSH_COMMITTERS_LIMIT_INFO:
others_number_of_commits = sum(committers_values[PUSH_COMMITTERS_LIMIT_INFO:])
committers_items = committers_items[:PUSH_COMMITTERS_LIMIT_INFO]
committers_items.append(('others', others_number_of_commits))
return committers_items
| zerver/lib/webhooks/git.py | 8,625 | type: (Text, Optional[Text], Text, List[Dict[str, Any]], Optional[bool]) -> Text type: List[Tuple[str, int]] type: (Text, Text, Text, Text) -> Text type: (Text, Text, Text) -> Text type: (Text, Text) -> Text type: (Text, Text, Text, Optional[int], Optional[Text], Optional[Text], Optional[Text], Optional[Text], Optional[Text]) -> Text type: (Text, Optional[Text]) -> Text type: (Text, Text, Text, Optional[int], Optional[Text], Optional[Text]) -> Text type: (Text, Text, Optional[Text], Optional[Text]) -> Text type: (Text, Text, Text, Text, Optional[Text]) -> Text type: (List[Dict[str, Any]], Optional[bool]) -> Text type: (Text) -> Text type: (List[Dict[str, Any]]) -> List[Tuple[str, int]] type: Dict[str, int] Sort by commit count, breaking ties alphabetically. type: List[Tuple[str, int]] type: List[int] | 811 | en | 0.076579 |
import random
import unittest
from typing import Tuple
import torch
import numpy as np
from src.utilities import set_random_seed
_RANDOM_SEED: int = random.randint(0, 100)
_TEST_ARRAY_SIZE: Tuple[int, int] = (2, 2)
_TEST_TENSOR_SIZE: Tuple[int, int] = (2, 2)
def _set_random_seed():
set_random_seed(
random_seed=_RANDOM_SEED,
)
class TestSetRandomSeed(unittest.TestCase):
"""Unit test class for ``set_random_seed`` function.
The test checks the random seed function for Python random,
NumPy, and PyTorch by asserting the first random number, array,
or tensor is always the same after seeding.
"""
def test_random(self):
_set_random_seed()
_random = random.random()
_set_random_seed()
assert _random == random.random()
def test_numpy(self):
_set_random_seed()
_array = np.random.random(size=_TEST_ARRAY_SIZE)
_set_random_seed()
assert (_array == np.random.random(size=_TEST_ARRAY_SIZE)).all()
def test_torch(self):
_set_random_seed()
_tensor = torch.rand(size=_TEST_TENSOR_SIZE)
_set_random_seed()
assert (_tensor == torch.rand(size=_TEST_TENSOR_SIZE)).all()
if __name__ == '__main__':
unittest.main()
| tests/test_set_random_seed.py | 1,263 | Unit test class for ``set_random_seed`` function.
The test checks the random seed function for Python random,
NumPy, and PyTorch by asserting the first random number, array,
or tensor is always the same after seeding. | 218 | en | 0.701773 |
import re
from typing import Any
from typing import Awaitable
from typing import Callable
from typing import Dict
from typing import List
class CommandRouter:
def __init__(self, subrouters: List["CommandRouter"] = []) -> None:
self.command_handlers: Dict[str, Callable[..., Awaitable[Any]]] = dict()
for subrouter in subrouters:
self.command_handlers.update(subrouter.command_handlers)
def register_command(self, regex: str) -> Callable[[Callable], Callable]:
def decorator(
function: Callable[..., Awaitable[Any]]
) -> Callable[..., Awaitable[Any]]:
self.command_handlers[regex] = function
return function
return decorator
def find_commands(self, body: str) -> List[str]:
"""Find all commands in a comment."""
commands = []
for regex in self.command_handlers.keys():
for _ in re.findall(regex, body):
commands.append(regex)
return commands
| marvin/command_router.py | 1,006 | Find all commands in a comment. | 31 | en | 0.937337 |
# coding: utf-8
import sys
from collections import Counter
import numpy as np
import tensorflow.contrib.keras as kr
import tensorflow as tf
if sys.version_info[0] > 2:
is_py3 = True
else:
# reload(sys)
sys.setdefaultencoding("utf-8")
is_py3 = False
def native_word(word, encoding='utf-8'):
"""如果在python2下面使用python3训练的模型,可考虑调用此函数转化一下字符编码"""
if not is_py3:
return word.encode(encoding)
else:
return word
def native_content(content):
if not is_py3:
return content.decode('utf-8')
else:
return content
def open_file(filename, mode='r'):
"""
常用文件操作,可在python2和python3间切换.
mode: 'r' or 'w' for read or write
"""
if is_py3:
return open(filename, mode, encoding='utf-8', errors='ignore')
else:
return open(filename, mode)
def read_file(filename):
"""读取文件数据"""
contents, labels = [], []
with open_file(filename) as f:
for line in f:
# while True:
# line = f.readline()
try:
label, content = line.strip().split('\t')
contents.append(content)
if content:
# contents.append(list(native_content(content)))
labels.append(native_content(label))
except:
pass
# if not line:
# break
return contents, labels
def build_vocab(train_dir, vocab_dir, vocab_size=5000):
"""根据训练集构建词汇表,存储, x, y"""
data_train, _ = read_file(train_dir)
all_data = []
for content in data_train:
all_data.extend(content)
counter = Counter(all_data)
count_pairs = counter.most_common(vocab_size - 1)
words, _ = list(zip(*count_pairs))
# 添加一个 <PAD> 来将所有文本pad为同一长度
words = ['<PAD>'] + list(words)
open_file(vocab_dir, mode='w').write('\n'.join(words) + '\n')
def read_vocab(vocab_dir):
"""读取词汇表"""
# words = open_file(vocab_dir).read().strip().split('\n')
with open_file(vocab_dir) as fp:
# 如果是py2 则每个值都转化为unicode
words = [native_content(_.strip()) for _ in fp.readlines()]
word_to_id = dict(zip(words, range(len(words))))
return words, word_to_id
def read_category():
"""读取分类目录,固定"""
categories = ['体育', '财经', '房产', '家居', '教育', '科技', '时尚', '时政', '游戏', '娱乐']
categories = [native_content(x) for x in categories]
cat_to_id = dict(zip(categories, range(len(categories))))
return categories, cat_to_id
def to_words(content, words):
"""将id表示的内容转换为文字"""
return ''.join(words[x] for x in content)
def process_file(filename, word_to_id, cat_to_id, max_length=600):
"""将文件转换为id表示"""
contents, labels = read_file(filename)
# np.save('./train_x.npy', contents)
# np.savetxt('./train_x.txt', contents, fmt='%s')
data_id, label_id = [], []
for i in range(len(contents)):
# data_id.append([word_to_id[x] for x in contents[i] if x in word_to_id])
label_id.append(cat_to_id[labels[i]])
# 使用keras提供的pad_sequences来将文本pad为固定长度
# x_pad = kr.preprocessing.sequence.pad_sequences(data_id, max_length)
y_pad = kr.utils.to_categorical(label_id, num_classes=len(cat_to_id)) # 将标签转换为one-hot表示
return contents, y_pad
def batch_iter(x, y, batch_size=64):
"""生成批次数据"""
data_len = len(x)
num_batch = int((data_len - 1) / batch_size) + 1
# 区别在于shuffle直接在原来的数组上进行操作,改变原来数组的顺序,无返回值。
# 而permutation不直接在原来的数组上进行操作,而是返回一个新的打乱顺序的数组,并不改变原来的数组。
indices = np.random.permutation(np.arange(data_len))
x_shuffle = np.array(x)[indices]
y_shuffle = y[indices]
for i in range(num_batch):
start_id = i * batch_size
end_id = min((i + 1) * batch_size, data_len)
# yield x[start_id:end_id], y[start_id:end_id]
yield x_shuffle[start_id:end_id], y_shuffle[start_id:end_id]
def attention(inputs, attention_size, l2_reg_lambda):
"""
Attention mechanism layer.
:param inputs: outputs of RNN/Bi-RNN layer (not final state)
:param attention_size: linear size of attention weights
:return: outputs of the passed RNN/Bi-RNN reduced with attention vector
"""
# In case of Bi-RNN input we need to concatenate outputs of its forward and backward parts
if isinstance(inputs, tuple):
inputs = tf.concat(2, inputs)
sequence_length = inputs.get_shape()[1].value # the length of sequences processed in the antecedent RNN layer
hidden_size = inputs.get_shape()[2].value # hidden size of the RNN layer
# Attention mechanism W,b 相当于对RNN的输出做一个非线性变化,得到的结果在和u做内积
W_omega = tf.get_variable("W_omega", initializer=tf.random_normal([hidden_size, attention_size], stddev=0.1))
b_omega = tf.get_variable("b_omega", initializer=tf.random_normal([attention_size], stddev=0.1))
u_omega = tf.get_variable("u_omega", initializer=tf.random_normal([attention_size], stddev=0.1))
v = tf.tanh(tf.matmul(tf.reshape(inputs, [-1, hidden_size]), W_omega) + tf.reshape(b_omega, [1, -1]))
vu = tf.matmul(v, tf.reshape(u_omega, [-1, 1]))
exps = tf.reshape(tf.exp(vu), [-1, sequence_length])
alphas = exps / tf.reshape(tf.reduce_sum(exps, 1), [-1, 1])
# Output of Bi-RNN is reduced with attention vector
output = tf.reduce_sum(inputs * tf.reshape(alphas, [-1, sequence_length, 1]), 1)
#if l2_reg_lambda > 0:
# l2_loss += tf.nn.l2_loss(W_omega)
# l2_loss += tf.nn.l2_loss(b_omega)
# l2_loss += tf.nn.l2_loss(u_omega)
# tf.add_to_collection('losses', l2_loss)
return output
| data/cnews_loader_bert.py | 6,073 | Attention mechanism layer.
:param inputs: outputs of RNN/Bi-RNN layer (not final state)
:param attention_size: linear size of attention weights
:return: outputs of the passed RNN/Bi-RNN reduced with attention vector
生成批次数据
根据训练集构建词汇表,存储, x, y
如果在python2下面使用python3训练的模型,可考虑调用此函数转化一下字符编码
常用文件操作,可在python2和python3间切换.
mode: 'r' or 'w' for read or write
将文件转换为id表示
读取分类目录,固定
读取文件数据
读取词汇表
将id表示的内容转换为文字
coding: utf-8 reload(sys) while True: line = f.readline() contents.append(list(native_content(content))) if not line: break 添加一个 <PAD> 来将所有文本pad为同一长度 words = open_file(vocab_dir).read().strip().split('\n') 如果是py2 则每个值都转化为unicode np.save('./train_x.npy', contents) np.savetxt('./train_x.txt', contents, fmt='%s') data_id.append([word_to_id[x] for x in contents[i] if x in word_to_id]) 使用keras提供的pad_sequences来将文本pad为固定长度 x_pad = kr.preprocessing.sequence.pad_sequences(data_id, max_length) 将标签转换为one-hot表示 区别在于shuffle直接在原来的数组上进行操作,改变原来数组的顺序,无返回值。 而permutation不直接在原来的数组上进行操作,而是返回一个新的打乱顺序的数组,并不改变原来的数组。 yield x[start_id:end_id], y[start_id:end_id] In case of Bi-RNN input we need to concatenate outputs of its forward and backward parts the length of sequences processed in the antecedent RNN layer hidden size of the RNN layer Attention mechanism W,b 相当于对RNN的输出做一个非线性变化,得到的结果在和u做内积 Output of Bi-RNN is reduced with attention vectorif l2_reg_lambda > 0: l2_loss += tf.nn.l2_loss(W_omega) l2_loss += tf.nn.l2_loss(b_omega) l2_loss += tf.nn.l2_loss(u_omega) tf.add_to_collection('losses', l2_loss) | 1,513 | zh | 0.289185 |
#
# This file is part of Orchid and related technologies.
#
# Copyright (c) 2017-2021 Reveal Energy Services. All Rights Reserved.
#
# LEGAL NOTICE:
# Orchid contains trade secrets and otherwise confidential information
# owned by Reveal Energy Services. Access to and use of this information is
# strictly limited and controlled by the Company. This file may not be copied,
# distributed, or otherwise disclosed outside of the Company's facilities
# except under appropriate precautions to maintain the confidentiality hereof,
# and may not be used in any way not expressly authorized by the Company.
#
import pathlib
def _stem_names():
"""Returns the sequence of example stem names."""
example_stems = ['completion_analysis', 'plot_time_series', 'plot_trajectories',
'plot_treatment', 'search_data_frames', 'volume_2_first_response']
return example_stems
def notebook_names():
"""Returns the sequence of example notebook names."""
result = [str(pathlib.Path(s).with_suffix('.ipynb')) for s in _stem_names()]
return result
def ordered_script_names():
script_name_pairs = [
('plot_trajectories.py', 0),
('plot_treatment.py', 1),
('plot_time_series.py', 2),
('completion_analysis.py', 3),
('volume_2_first_response.py', 4),
('search_data_frames.py', 5),
]
ordered_pairs = sorted(script_name_pairs, key=lambda op: op[1])
ordered_names = [op[0] for op in ordered_pairs]
difference = set(script_names()).difference(set(ordered_names))
assert len(difference) == 0, f'Ordered set, {ordered_names},' \
f' differs from, set {script_names()}' \
f' by, {difference}.'
return ordered_names
def script_names():
"""Returns the sequence of example script names."""
result = [str(pathlib.Path(s).with_suffix('.py')) for s in _stem_names()]
return result
| examples.py | 1,951 | Returns the sequence of example stem names.
Returns the sequence of example notebook names.
Returns the sequence of example script names.
This file is part of Orchid and related technologies. Copyright (c) 2017-2021 Reveal Energy Services. All Rights Reserved. LEGAL NOTICE: Orchid contains trade secrets and otherwise confidential information owned by Reveal Energy Services. Access to and use of this information is strictly limited and controlled by the Company. This file may not be copied, distributed, or otherwise disclosed outside of the Company's facilities except under appropriate precautions to maintain the confidentiality hereof, and may not be used in any way not expressly authorized by the Company. | 721 | en | 0.864083 |
import os
import subprocess
from tempfile import NamedTemporaryFile
from jinja2 import Template
# This file designed in a way that is independent of Django
# in order to be easy (but changes are required) to be used
# outside Django in the future
# That's why is using jinja2 as a template language instead of
# Django's template language.
#
# Example of use:
# Make sure to have jinja2 template language:
# python3 -m venv venv
# pip3 install jinja2
#
# In a Python file:
# import json
# import main # or the name that this file is saved as...
#
# datapackage = json.load(open("datapackage.json"))
# main.datapackage_to_markdown(datapackage)
def datapackage_to_markdown(datapackage):
"""
datapackage: datapackage schema as a dictionary
returns: str with the Markdown documentation
"""
template = Template(template_to_md)
rendered = template.render(datapackage)
return rendered.encode('utf-8')
def datapackage_to_pdf(datapackage):
"""
datapackage: datapackage schema as a dictionary
returns: binary content with the PDF or None if the conversion failed.
"""
markdown = datapackage_to_markdown(datapackage)
f = NamedTemporaryFile(suffix='.pdf', delete=False)
f.close()
command_line = ['pandoc', '--to=latex', f'--output={f.name}']
try:
pandoc_process = subprocess.run(command_line,
input=markdown)
except FileNotFoundError:
os.unlink(f.name)
raise OSError(f'FileNotFoundError trying to execute: {command_line}')
except subprocess.CalledProcessError:
os.unlink(f.name)
raise RuntimeError(f'CalledProcessError trying to execute: {command_line}')
if pandoc_process.returncode != 0:
os.unlink(f.name)
raise RuntimeError(f'Command {command_line} returned a PDF file of size 0')
pdf_file = open(f.name, 'rb')
pdf_content = pdf_file.read()
os.unlink(f.name)
return pdf_content
template_to_md = '''# {{ title }}
## Dataset description
{{ description }}
{% if contributors|length == 1 %}
## Contributor
{% else %}
## Contributors
{% endif %}{% for contributor in contributors %} * {{ contributor.title }} ({{ contributor.role }})
{% endfor %}
{% if keywords|length == 1 %}
## Keyword
{% else %}## Keywords
{% endif %}{% for keyword in keywords %} * {{ keyword }}
{% endfor %}
## Version
{{ version }}
## Homepage
[{{ homepage }}]({{ homepage }})
{% if licenses|length == 1 %}
## Dataset license
{% else %}
## Dataset license
{% endif %}{% for license in licenses %} * {{ license.title }} ([{{ license.name }}]({{ license.path }}))
{% endfor %}
## Resources
{% for resource in resources %}
### {{ resource.title }}
* Name: {{ resource.name }}
* Profile: {{ resource.profile }}
* Path: {{ resource.path }}
{% if resource.format %} * Format: {{ resource.format }}{% endif %}
{% if resource.encoding %} * Encoding: {{ resource.encoding }}{% endif %}
{% if resource.description %} * Desription: {{ resource.description }}{% endif %}
{% if resource.schema.fields %}
#### Fields
{% for field in resource.schema.fields %} * **{{ field.name }}** ({{ field.type }}): {{ field.description }}
{% endfor %}
{% endif %}
{% endfor %}
'''
| SchemaCollaboration/datapackage_to_documentation/main.py | 3,228 | datapackage: datapackage schema as a dictionary
returns: str with the Markdown documentation
datapackage: datapackage schema as a dictionary
returns: binary content with the PDF or None if the conversion failed.
This file designed in a way that is independent of Django in order to be easy (but changes are required) to be used outside Django in the future That's why is using jinja2 as a template language instead of Django's template language. Example of use: Make sure to have jinja2 template language: python3 -m venv venv pip3 install jinja2 In a Python file: import json import main or the name that this file is saved as... datapackage = json.load(open("datapackage.json")) main.datapackage_to_markdown(datapackage) | 725 | en | 0.752427 |
# ========================================================================= #
# Copyright 2018 National Technology & Engineering Solutions of Sandia,
# LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS,
# the U.S. Government retains certain rights in this software.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================= #
from ..instruction_parent_class import LogicalInstruction
from ...circuits.quantum_circuit import QuantumCircuit
from ..helper_functions import pos2qudit
class InstrSynExtraction(LogicalInstruction):
"""
Instruction for a round of syndrome extraction.
Parent class sets self.qecc.
"""
def __init__(self, qecc, symbol, **gate_params):
super().__init__(qecc, symbol, **gate_params)
qecc_init_ticks = qecc.qecc_params.get('init_ticks', 0)
qecc_meas_ticks = qecc.qecc_params.get('meas_ticks', 7)
qecc_x_ticks = qecc.qecc_params.get('x_ticks', [2, 4, 3, 5])
qecc_z_ticks = qecc.qecc_params.get('z_ticks', [2, 4, 3, 5])
self.init_ticks = gate_params.get('init_ticks', qecc_init_ticks)
self.meas_ticks = gate_params.get('meas_ticks', qecc_meas_ticks)
self.x_ticks = gate_params.get('x_ticks', qecc_x_ticks)
self.z_ticks = gate_params.get('z_ticks', qecc_z_ticks)
self.abstract_circuit = QuantumCircuit(**gate_params)
self.data_qudit_set = self.qecc.data_qudit_set
self.ancilla_qudit_set = self.qecc.ancilla_qudit_set
self.ancilla_x_check = set([])
self.ancilla_z_check = set([])
# Go through the ancillas and grab the data qubits that are on either side of it.
layout = qecc.layout # qudit_id => (x, y)
self.pos2qudit = pos2qudit(layout)
for q, (x, y) in layout.items():
if x % 2 == 0 and y % 2 == 0:
# Ancilla
if x % 4 == y % 4:
# X check
self._create_x_check(q, x, y)
else:
# Z check
self._create_z_check(q, x, y)
# Determine the logical operations
# --------------------------------
z_qudits = set(qecc.sides['top'])
x_qudits = set(qecc.sides['left'])
logical_ops = [ # Each element in the list corresponds to a logical qubit
# The keys label the type of logical operator
{'X': QuantumCircuit([{'X': x_qudits}]), 'Z': QuantumCircuit([{'Z': z_qudits}])},
]
self.initial_logical_ops = logical_ops
logical_ops = [ # Each element in the list corresponds to a logical qubit
# The keys label the type of logical operator
{'X': QuantumCircuit([{'X': x_qudits}]), 'Z': QuantumCircuit([{'Z': z_qudits}])},
]
self.final_logical_ops = logical_ops
self.logical_signs = None
self.logical_stabilizers = None
# Must be called at the end of initiation.
self._compile_circuit(self.abstract_circuit)
self._stabs_destabs = {}
def _create_x_check(self, ancilla, x, y):
"""
Creates X-checks for circuit_extended.
"""
# register the x syndrome ancillas
self.ancilla_x_check.add(ancilla)
# get where the position of where the data qubits should be relative to the ancilla
data_pos = self._data_pos_x_check(x, y)
# Get the actual, available data-qubits and their ticks that correspond to the possible data qubit positions
datas, my_data_ticks = self._find_data(position_to_qudit=self.pos2qudit, positions=data_pos,
ticks=self.x_ticks)
# Now add the check to the extended circuit
locations = set(datas)
locations.add(ancilla)
self.abstract_circuit.append('X check', locations=locations, datas=datas, ancillas=ancilla,
ancilla_ticks=self.init_ticks, data_ticks=my_data_ticks,
meas_ticks=self.meas_ticks)
def _create_z_check(self, ancilla, x, y):
"""
Creates Z-checks for circuit_extended.
"""
# register the z syndrome ancillas
self.ancilla_z_check.add(ancilla)
# get where the position of where the data qubits should be relative to the ancilla
data_pos = self._data_pos_z_check(x, y)
# Get the actual, available data-qubits and their ticks that correspond to the possible data qubit positions
datas, my_data_ticks = self._find_data(position_to_qudit=self.pos2qudit, positions=data_pos,
ticks=self.z_ticks)
# Now add the check to the extended circuit
locations = set(datas)
locations.add(ancilla)
self.abstract_circuit.append('Z check', locations=locations, datas=datas, ancillas=ancilla,
ancilla_ticks=self.init_ticks, data_ticks=my_data_ticks,
meas_ticks=self.meas_ticks)
@staticmethod
def _find_data(position_to_qudit, positions, ticks):
"""
From the positions given for possible data qudits, add the qudits and their corresponding ticks for each qudit
that does exist.
:param position_to_qudit:
:param positions:
:param ticks:
:return:
"""
data_list = []
tick_list = []
for i, p in enumerate(positions):
data = position_to_qudit.get(p, None)
if data is not None:
data_list.append(data)
tick_list.append(ticks[i])
return data_list, tick_list
@staticmethod
def _data_pos_z_check(x, y):
"""
Determines the position of data qudits in a Z check in order of ticks.
Check direction: 1 | 2
|
---+---
|
3 | 4
"""
data_pos = [
(x - 1, y + 1),
(x + 1, y + 1),
(x - 1, y - 1),
(x + 1, y - 1)
]
return data_pos
@staticmethod
def _data_pos_x_check(x, y):
"""
Determines the position of data qudits in a Z check in order of ticks.
Check direction: 1 | 3
|
---+---
|
2 | 4
"""
data_pos = [
(x - 1, y + 1),
(x - 1, y - 1),
(x + 1, y + 1),
(x + 1, y - 1)
]
return data_pos
@property
def stabs_destabs(self):
if self._stabs_destabs:
return self._stabs_destabs
if self.qecc.height != self.qecc.width:
raise Exception('This currently only works for square code blocks.')
# instr = self.instruction('instr_syn_extract')
instr = self
stabs_row_x = []
stabs_row_z = []
destabs_row_x = []
destabs_row_z = []
for a in self.ancilla_qudit_set:
stabs_row_z.append({a})
stabs_row_x.append(set([]))
destabs_row_x.append({a})
destabs_row_z.append(set([]))
xdestabs = self.generate_xdestabs()
zdestabs = self.generate_zdestabs()
# Creating stabilizers
for check_type, _, params in instr.abstract_circuit.items():
if check_type == 'X check':
# Ancillas initialized in |0>
# Pauli X-type stabilizers
stabs_row_x.append(set(params['datas']))
stabs_row_z.append(set([]))
destabs_row_x.append(set([]))
destabs_row_z.append(zdestabs[params['ancillas']])
else:
# Ancillas initialized in |0>
# Pauli Z-type stabilizers
stabs_row_z.append(set(params['datas']))
stabs_row_x.append(set([]))
destabs_row_z.append(set([]))
destabs_row_x.append(xdestabs[params['ancillas']])
output_dict = {
'stabs_x': stabs_row_x,
'stabs_z': stabs_row_z,
'destabs_x': destabs_row_x,
'destabs_z': destabs_row_z,
}
self._stabs_destabs = output_dict
return output_dict
def generate_xdestabs(self):
distance = self.qecc.distance
# x-type destabilizers
xdestabs_temp = []
# going alone the bottom
if distance % 2 == 0:
b = 1
else:
b = 2
for x in range(b, distance, 2):
temp = []
y = distance - 1
for j in range(0, distance):
new_point = (x + j, y - j)
if new_point[1] <= 0:
break
if new_point[0] > distance - 1:
break
temp.append(new_point)
xdestabs_temp.append(temp)
# ----------------
xdestabs = []
for ds in xdestabs_temp:
for i in range(len(ds)):
temp = []
for j in range(i + 1):
# print('-', i, j)
temp.append(ds[j])
xdestabs.append(temp)
# -----------------
# ladder climb
ladder = []
x = 0
for y in range(distance - 1, 0, -1):
ladder.append((x, y))
for i in range(len(ladder)):
xdestabs.append(ladder[:i + 1])
ladder_points = []
for i in range((distance + 1) % 2, distance - 1, 2):
ladder_points.append(i)
ladder_temp = []
for i in ladder_points:
temp = list(ladder[:i + 1])
x, y = ladder[i]
for j in range(1, distance):
if j != 1:
temp = list(ladder_temp[-1])
new_point = (x + j, y - j)
if new_point[1] <= 0:
break
if new_point[0] >= distance - 1:
break
temp.append(new_point)
ladder_temp.append(temp)
xdestabs.extend(ladder_temp)
set_destabs = {}
relayout = {v: k for k, v in self.qecc.layout.items()}
for d in xdestabs:
row = set([])
# Find the associated ancilla location
x, y = d[-1]
a = relayout[(2 * x + 1 + 1, 2 * y + 1 - 1)]
if a in self.ancilla_x_check:
a = relayout[(2 * x - 1 + 1, 2 * y + 1 - 1)]
for x, y in d:
row.add(relayout[(2 * x + 1, 2 * y + 1)])
set_destabs[a] = set(row)
return set_destabs
def generate_zdestabs(self):
distance = self.qecc.distance
# x-type destabilizers
zdestabs_temp = []
# going alone the bottom
if distance % 2 == 0:
b = 2
else:
b = 1
for y in range(b, distance, 2):
temp = []
x = distance - 1
for j in range(0, distance):
new_point = (x - j, y + j)
if new_point[0] <= 0:
break
if new_point[1] > distance - 1:
break
temp.append(new_point)
# print(x, y)
zdestabs_temp.append(temp)
# ----------------
zdestabs = []
for ds in zdestabs_temp:
for i in range(len(ds)):
temp = []
for j in range(i + 1):
# print('-', i, j)
temp.append(ds[j])
zdestabs.append(temp)
# -----------------
# ladder climb
ladder = []
y = 0
for x in range(distance - 1, 0, -1):
ladder.append((x, y))
for i in range(len(ladder)):
zdestabs.append(ladder[:i + 1])
ladder_points = []
for i in range(distance % 2, distance - 1, 2):
ladder_points.append(i)
ladder_temp = []
for i in ladder_points:
temp = list(ladder[:i + 1])
x, y = ladder[i]
for j in range(1, distance):
if j != 1:
temp = list(ladder_temp[-1])
new_point = (x - j, y + j)
if new_point[0] <= 0:
break
if new_point[1] >= distance - 1:
break
temp.append(new_point)
ladder_temp.append(temp)
zdestabs.extend(ladder_temp)
set_destabs = {}
relayout = {v: k for k, v in self.qecc.layout.items()}
for d in zdestabs:
row = set([])
# Find the associated ancilla location
x, y = d[-1]
a = relayout[(2 * x + 1 - 1, 2 * y + 1 + 1)]
if a in self.ancilla_z_check:
a = relayout[(2 * x + 1 - 1, 2 * y + 1 - 1)]
for x, y in d:
row.add(relayout[(2 * x + 1, 2 * y + 1)])
set_destabs[a] = row
return set_destabs
class InstrInitZero(LogicalInstruction):
"""
Instruction for initializing a logical zero.
It is just like syndrome extraction except the data qubits are initialized in the zero state at tick = 0.
`ideal_meas` == True will cause the measurements to be replace with ideal measurements.
Parent class sets self.qecc.
"""
def __init__(self, qecc, symbol, **gate_params):
super().__init__(qecc, symbol, **gate_params)
self.symbol = 'instr_init_zero'
self.data_qudit_set = self.qecc.data_qudit_set
self.ancilla_qudit_set = self.qecc.ancilla_qudit_set
# This is basically syndrome extraction round where all the data qubits are initialized to zero.
syn_ext = qecc.instruction('instr_syn_extract', **gate_params)
# Make a shallow copy of the abstract circuits.
self.abstract_circuit = syn_ext.abstract_circuit.copy()
self.abstract_circuit.params.update(gate_params)
self.ancilla_x_check = syn_ext.ancilla_x_check
self.ancilla_z_check = syn_ext.ancilla_z_check
data_qudits = syn_ext.data_qudit_set
self.abstract_circuit.append('init |0>', locations=data_qudits, tick=0)
self.initial_logical_ops = [ # Each element in the list corresponds to a logical qubit
# The keys label the type of logical operator
{'X': None, 'Z': None}, # None => can be anything
]
# Special for state initialization:
# ---------------------------------
# list of tuples of logical check and delogical stabilizer for each logical qudit.
self.final_logical_ops = [
{'Z': QuantumCircuit([{'Z': set(qecc.sides['top'])}]), 'X': QuantumCircuit([{'X': set(qecc.sides['left'])}])}
]
# List of corresponding logical sign. (The logical sign if the instruction is preformed ideally.)
self.logical_signs = [0]
self.logical_stabilizers = ['Z']
# ---------------------------------
# Must be called at the end of initiation.
self._compile_circuit(self.abstract_circuit)
self._stabs_destabs = {}
@property
def stabs_destabs(self):
if self._stabs_destabs:
return self._stabs_destabs
gate_params = self.gate_params
syn_ext = self.qecc.instruction('instr_syn_extract', **gate_params)
for name, rows in syn_ext.stabs_destabs.items():
self._stabs_destabs[name] = []
for row in rows:
self._stabs_destabs[name].append(set(row))
# |0> -> logical Z is a stabilizer
self._stabs_destabs['stabs_z'].append(set(self.qecc.sides['top']))
self._stabs_destabs['stabs_x'].append(set([]))
self._stabs_destabs['destabs_x'].append(set(self.qecc.sides['left']))
self._stabs_destabs['destabs_z'].append(set([]))
return self._stabs_destabs
class InstrInitPlus(LogicalInstruction):
"""
Instruction for initializing a logical plus.
It is just like syndrome extraction except the data qubits are initialized in the plus state at tick = 0.
`ideal_meas` == True will cause the measurements to be replace with ideal measurements.
Parent class sets self.qecc.
"""
def __init__(self, qecc, symbol, **gate_params):
super().__init__(qecc, symbol, **gate_params)
self.symbol = 'instr_init_plus'
self.data_qudit_set = self.qecc.data_qudit_set
self.ancilla_qudit_set = self.qecc.ancilla_qudit_set
# This is basically syndrome extraction round where all the data qubits are initialized to plus.
syn_ext = qecc.instruction('instr_syn_extract', **gate_params)
# Make a shallow copy of the abstract circuits.
self.abstract_circuit = syn_ext.abstract_circuit.copy()
self.abstract_circuit.params.update(gate_params)
self.ancilla_x_check = syn_ext.ancilla_x_check
self.ancilla_z_check = syn_ext.ancilla_z_check
data_qudits = syn_ext.data_qudit_set
# self.abstract_circuit.append('init |+>', qudits=data_qudits, tick=0)
self.abstract_circuit.append('init |0>', locations=data_qudits, tick=0)
self.abstract_circuit.append('H', locations=data_qudits, tick=1)
self.initial_logical_ops = [ # Each element in the list corresponds to a logical qubit
# The keys label the type of logical operator
{'X': None, 'Z': None}, # None => can be anything
]
# Special for state initialization:
# ---------------------------------
# list of tuples of logical check and delogical stabilizer for each logical qudit.
self.final_logical_ops = [
{'X': QuantumCircuit([{'X': set(qecc.sides['left'])}]), 'Z': QuantumCircuit([{'Z': set(qecc.sides['top'])}])}
]
# List of corresponding logical sign. (The logical sign if the instruction is preformed ideally.)
self.logical_signs = [0]
self.logical_stabilizers = ['X']
# ---------------------------------
# Must be called at the end of initiation.
self._compile_circuit(self.abstract_circuit)
self._stabs_destabs = {}
@property
def stabs_destabs(self):
if self._stabs_destabs:
return self._stabs_destabs
gate_params = self.gate_params
syn_ext = self.qecc.instruction('instr_syn_extract', **gate_params)
for name, rows in syn_ext.stabs_destabs.items():
self._stabs_destabs[name] = []
for row in rows:
self._stabs_destabs[name].append(set(row))
# |0> -> logical Z is a stabilizer
self._stabs_destabs['stabs_x'].append(set(self.qecc.sides['left']))
self._stabs_destabs['stabs_z'].append(set([]))
self._stabs_destabs['destabs_z'].append(set(self.qecc.sides['top']))
self._stabs_destabs['stabs_x'].append(set([]))
return self._stabs_destabs
| pecos/qeccs/surface_medial_4444/instructions.py | 19,796 | Instruction for initializing a logical plus.
It is just like syndrome extraction except the data qubits are initialized in the plus state at tick = 0.
`ideal_meas` == True will cause the measurements to be replace with ideal measurements.
Parent class sets self.qecc.
Instruction for initializing a logical zero.
It is just like syndrome extraction except the data qubits are initialized in the zero state at tick = 0.
`ideal_meas` == True will cause the measurements to be replace with ideal measurements.
Parent class sets self.qecc.
Instruction for a round of syndrome extraction.
Parent class sets self.qecc.
Creates X-checks for circuit_extended.
Creates Z-checks for circuit_extended.
Determines the position of data qudits in a Z check in order of ticks.
Check direction: 1 | 3
|
---+---
|
2 | 4
Determines the position of data qudits in a Z check in order of ticks.
Check direction: 1 | 2
|
---+---
|
3 | 4
From the positions given for possible data qudits, add the qudits and their corresponding ticks for each qudit
that does exist.
:param position_to_qudit:
:param positions:
:param ticks:
:return:
========================================================================= Copyright 2018 National Technology & Engineering Solutions of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights in this software. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ========================================================================= Go through the ancillas and grab the data qubits that are on either side of it. qudit_id => (x, y) Ancilla X check Z check Determine the logical operations -------------------------------- Each element in the list corresponds to a logical qubit The keys label the type of logical operator Each element in the list corresponds to a logical qubit The keys label the type of logical operator Must be called at the end of initiation. register the x syndrome ancillas get where the position of where the data qubits should be relative to the ancilla Get the actual, available data-qubits and their ticks that correspond to the possible data qubit positions Now add the check to the extended circuit register the z syndrome ancillas get where the position of where the data qubits should be relative to the ancilla Get the actual, available data-qubits and their ticks that correspond to the possible data qubit positions Now add the check to the extended circuit instr = self.instruction('instr_syn_extract') Creating stabilizers Ancillas initialized in |0> Pauli X-type stabilizers Ancillas initialized in |0> Pauli Z-type stabilizers x-type destabilizers going alone the bottom ---------------- print('-', i, j) ----------------- ladder climb Find the associated ancilla location x-type destabilizers going alone the bottom print(x, y) ---------------- print('-', i, j) ----------------- ladder climb Find the associated ancilla location This is basically syndrome extraction round where all the data qubits are initialized to zero. Make a shallow copy of the abstract circuits. Each element in the list corresponds to a logical qubit The keys label the type of logical operator None => can be anything Special for state initialization: --------------------------------- list of tuples of logical check and delogical stabilizer for each logical qudit. List of corresponding logical sign. (The logical sign if the instruction is preformed ideally.) --------------------------------- Must be called at the end of initiation. |0> -> logical Z is a stabilizer This is basically syndrome extraction round where all the data qubits are initialized to plus. Make a shallow copy of the abstract circuits. self.abstract_circuit.append('init |+>', qudits=data_qudits, tick=0) Each element in the list corresponds to a logical qubit The keys label the type of logical operator None => can be anything Special for state initialization: --------------------------------- list of tuples of logical check and delogical stabilizer for each logical qudit. List of corresponding logical sign. (The logical sign if the instruction is preformed ideally.) --------------------------------- Must be called at the end of initiation. |0> -> logical Z is a stabilizer | 4,934 | en | 0.7936 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.