max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
geo2_pipeline/preproc8/30_tokenize/pytok.py
|
brendano/twitter_geo_preproc
| 4
|
6626951
|
<gh_stars>1-10
# Used for experiment, not actually used in pipeline
import sys,json
from nlp import twokenize
for line in sys.stdin:
tweet = json.loads(line.split('\t')[-1])
print u' '.join(twokenize.tokenize(tweet['text'])).encode('utf8')
|
# Used for experiment, not actually used in pipeline
import sys,json
from nlp import twokenize
for line in sys.stdin:
tweet = json.loads(line.split('\t')[-1])
print u' '.join(twokenize.tokenize(tweet['text'])).encode('utf8')
|
en
| 0.865536
|
# Used for experiment, not actually used in pipeline
| 2.872611
| 3
|
cray/modules/aprun/cli.py
|
Cray-HPE/craycli
| 1
|
6626952
|
"""
cli.py - aprun PALS CLI
MIT License
(C) Copyright [2020-2022] Hewlett Packard Enterprise Development LP
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import argparse
import base64
import os
import sys
import click
from cray import core
from cray.echo import echo, LOG_WARN
from cray.pals import PALSApp, split_mpmd_args, get_resource_limits, parse_hostfile
APRUN_ENV_ALIAS = {
"ALPS_APP_DEPTH": "PALS_DEPTH",
"ALPS_APP_ID": "PALS_APID",
"ALPS_APP_PE": "PALS_RANKID",
}
def parse_rangelist(rli):
"""Parse a range list into a list of integers"""
try:
mylist = []
for nidrange in rli.split(","):
startstr, sep, endstr = nidrange.partition("-")
start = int(startstr, 0)
if sep:
end = int(endstr, 0)
if end < start:
mylist.extend(range(start, end - 1, -1))
else:
mylist.extend(range(start, end + 1))
else:
mylist.append(start)
except ValueError:
# pylint: disable=raise-missing-from
raise click.ClickException("Invalid range list %s" % rli)
return mylist
def parse_rangelist_file(rlif):
"""Parse a file containing rangelists into a list of integers"""
mylist = []
for line in rlif:
line = line.strip()
if line and line[0] != "#":
mylist.extend(parse_rangelist(line))
return mylist
def nids_to_hosts(nidlist):
"""Convert a list of integer nids to a list of hostnames"""
return ["nid%06d" % nid for nid in nidlist]
def get_hostlist(node_list, node_list_file, exclude_node_list, exclude_node_list_file):
"""Given command-line arguments, produce a host list"""
nodelist = []
excllist = []
# Build node list from command line arguments
if node_list:
nodelist = nids_to_hosts(parse_rangelist(node_list))
elif node_list_file:
nodelist = nids_to_hosts(parse_rangelist_file(node_list_file))
elif "PBS_NODEFILE" in os.environ:
with open(os.environ["PBS_NODEFILE"], encoding="utf-8") as nodefile:
nodelist = parse_hostfile(nodefile)
# Build exclude node list from command line arguments
if exclude_node_list:
excllist = nids_to_hosts(parse_rangelist(exclude_node_list))
elif exclude_node_list_file:
excllist = nids_to_hosts(parse_rangelist_file(exclude_node_list_file))
# Remove excluded nodes from host list
hostlist = [node for node in nodelist if node not in excllist]
# Check list before returning
if not hostlist:
raise click.ClickException("No host list provided")
return hostlist
def get_launch_env(environment_override, environ=None):
"""Given command line arguments, build up the environment array"""
# Copy the environment to avoid modifying the original
if environ is None:
environ = os.environ.copy()
else:
environ = environ.copy()
# Override specified environment variables
if environment_override:
for envvar in environment_override:
key, sep, val = envvar.partition("=")
if not sep:
raise click.ClickException("Invalid environment variable %s" % envvar)
environ[key] = val
# Format into array in the expected format
return ["%s=%s" % (key, val) for key, val in environ.items()]
def get_umask():
"""Return the current umask value"""
umask = os.umask(0)
os.umask(umask)
return umask
def get_wdir(wdir):
"""Get the current working directory to use for the launch"""
# If user provided a wdir through argument or env var, use that
if wdir:
return wdir
# Otherwise, attempt to get our cwd
# aprun treated this as a fatal error, so we do too
try:
return os.getcwd()
except OSError as err:
raise click.ClickException("getcwd failed: %s" % str(err))
def get_cpubind(cpu_binding):
"""Convert aprun-style CPU binding to PALS-style"""
# First check for keywords
if not cpu_binding or cpu_binding == "cpu":
return "thread"
if cpu_binding == "depth":
return "depth"
if cpu_binding == "numa_node":
return "numa"
if cpu_binding == "none":
return "none"
if cpu_binding == "core":
return "core"
# If not a keyword, it's colon-separated rangelists
return "list:%s" % cpu_binding
def get_membind(strict_memory_containment):
"""Get memory binding to use"""
if strict_memory_containment:
return "local"
return "none"
def get_exclusive(access_mode):
"""Get exclusive setting from -F [exclusive|share] option"""
# aprun only checked for e/E (exclusive) or s/S (share)
if not access_mode:
return None
if access_mode[0].lower() == "e":
return True
if access_mode[0].lower() == "s":
return False
raise click.ClickException("Invalid -F/--access-mode argument %s" % access_mode)
def print_output(params, a_file):
"""Print output from a stdout/stderr RPC to the given file"""
content = params.get("content")
if not content:
return
# If encoded in base64, decode it before printing
encoding = params.get("encoding")
if encoding == "base64":
content = base64.b64decode(content)
click.echo(content, nl=False, file=a_file)
def get_argv(executable, args, bypass_app_transfer):
"""
Get the application argv
"""
if bypass_app_transfer:
argv0 = executable
else:
argv0 = os.path.basename(executable)
return [argv0] + list(args)
def posint(val):
"""Parse a string into a positive integer"""
ival = int(val)
if ival <= 0:
raise argparse.ArgumentTypeError("%s must be positive" % val)
return ival
def parse_mpmd(executable, args, pes, wdir, depth, ppn):
"""Parse MPMD commands from the given arguments"""
# Split into separate commands
cmdargvs = split_mpmd_args(list(args))
# Create first command
umask = get_umask()
argv = [executable] + cmdargvs[0]
cmds = [dict(argv=argv, nranks=pes, umask=umask, wdir=wdir, depth=depth, ppn=ppn)]
# Create a parser for each other MPMD command
parser = argparse.ArgumentParser(prog="", description="MPMD Command Definition")
parser.add_argument(
"-n", "--pes", default=1, type=posint, help="number of processes to start"
)
parser.add_argument("executable", help="executable to launch")
parser.add_argument(
"args", nargs=argparse.REMAINDER, help="arguments to executable"
)
parser.add_argument(
"-d", "--cpus-per-pe", default=depth, type=posint, help="CPUs per process"
)
parser.add_argument(
"-N", "--pes-per-node", default=ppn, type=posint, help="PEs per compute node"
)
# Add other commands
for cmdargv in cmdargvs[1:]:
# Parse args for this command
cmdargs = parser.parse_args(cmdargv)
# Create MPMD command dict
argv = [cmdargs.executable] + list(cmdargs.args)
cmds.append(
dict(
argv=argv,
nranks=cmdargs.pes,
umask=umask,
wdir=wdir,
depth=cmdargs.cpus_per_pe,
ppn=cmdargs.pes_per_node,
)
)
return cmds
def get_rlimits(memory_per_pe):
"""Get resource limits to transfer to application"""
# Check relevant environment variables
send_limits = int(os.environ.get("APRUN_XFER_LIMITS", 0))
stack_limit = int(os.environ.get("APRUN_XFER_STACK_LIMIT", 0))
# Always send CORE, CPU limits
limitnames = ["CORE", "CPU"]
if send_limits:
limitnames.extend(
[
"RSS",
"STACK",
"FSIZE",
"DATA",
"NPROC",
"NOFILE",
"MEMLOCK",
"AS",
"LOCKS",
"SIGPENDING",
"MSGQUEUE",
"NICE",
"RTPRIO",
]
)
else:
if memory_per_pe:
limitnames.append("RSS")
if stack_limit:
limitnames.append("STACK")
return get_resource_limits(limitnames)
@core.command(
name="aprun",
context_settings={"ignore_unknown_options": True, "allow_interspersed_args": False},
needs_globals=True,
)
@core.option("-a", "--architecture", help="compute node architecture (ignored)")
@core.option(
"-b", "--bypass-app-transfer", is_flag=True, help="skip application binary transfer"
)
@core.option(
"-B",
"--batch-args",
is_flag=True,
help="reuse batch reservation arguments (ignored)",
)
@core.option(
"-C", "--reconnect", is_flag=True, help="reconnect on node failure (ignored)"
)
@core.option("--cpu-binding", "--cc", help="CPU binding for application")
@core.option("--cpu-binding-file", "--cp", help="specify binding in a file (ignored)")
@core.option(
"-d", "--cpus-per-pe", default=1, type=click.IntRange(1), help="CPUs per PE"
)
@core.option(
"-D", "--debug", default=0, type=click.IntRange(0), help="debug level (ignored)"
)
@core.option(
"-e",
"--environment-override",
multiple=True,
help="set an application environment variable (use VARNAME=value format)",
)
@core.option("-E", "--exclude-node-list", help="exclude a list of nodes from placement")
@core.option(
"--exclude-node-list-file",
type=click.File(),
help="file with list of nodes to exclude",
)
@core.option("-F", "--access-mode", help="exclusive/share access mode")
@core.option("-j", "--cpus-per-cu", help="CPUs per compute unit (ignored)")
@core.option("-L", "--node-list", help="list of nodes for placement")
@core.option(
"-l",
"--node-list-file",
type=click.File(),
help="file with list of nodes for placement",
)
@core.option(
"-m", "--memory-per-pe", envvar="APRUN_DEFAULT_MEMORY", help="memory per PE"
)
@core.option(
"--mpmd-env", multiple=True, help="set an MPMD environment variable (ignored)"
)
@core.option(
"-n",
"--pes",
default=1,
type=click.IntRange(1),
help="number of processing elements (PEs)",
)
@core.option(
"-N",
"--pes-per-node",
default=0,
type=click.IntRange(0),
help="PEs per compute node",
)
@core.option("-p", "--protection-domain", help="use protection domain (ignored)")
@core.option("--p-governor", help="compute node performance governor (ignored)")
@core.option(
"--p-state", envvar="APRUN_PSTATE", help="compute node performance state (ignored)"
)
@core.option("-q", "--quiet", "--silent", is_flag=True, help="quiet mode")
@core.option("-r", "--specialized-cpus", help="number of system process CPUs (ignored)")
@core.option("-R", "--relaunch", help="relaunch with fewer ranks on failure (ignored)")
@core.option("-S", "--pes-per-numa-node", help="number of PEs per NUMA node (ignored)")
@core.option(
"--strict-memory-containment",
"--ss",
is_flag=True,
help="restrict memory to local NUMA node",
)
@core.option(
"-T",
"--sync-output",
envvar="APRUN_SYNC_TTY",
is_flag=True,
default=False,
help="synchronize output",
)
@core.option("--wdir", envvar="APRUN_WDIR", help="application working directory")
@core.option(
"-z",
"--zone-sort",
envvar="APRUN_ZONE_SORT",
is_flag=True,
help="memory zone sort at launch (ignored)",
)
@core.option(
"-Z",
"--zone-sort-secs",
envvar="APRUN_ZONE_SORT_SECS",
help="periodic memory zone sort (ignored)",
)
@core.option(
"--procinfo-file",
envvar="APRUN_PROCINFO_FILE",
help="write application process information to the given file",
)
@core.option(
"--abort-on-failure/--no-abort-on-failure",
envvar="APRUN_ABORT_ON_FAILURE",
is_flag=True,
default=True,
help="abort/don't abort entire application if a rank exits with non-zero status",
)
@core.option(
"--pmi",
envvar="APRUN_PMI",
type=click.Choice(["cray", "pmix", "none"], case_sensitive=False),
default="cray",
help="Application PMI wire-up method ('cray' default)",
)
@core.option(
"--sstartup/--no-sstartup",
default=False,
help="enable/disable Scalable Start Up",
)
@core.argument("executable")
@core.argument("args", nargs=-1)
def cli(
architecture,
bypass_app_transfer,
batch_args,
reconnect,
cpu_binding,
cpu_binding_file,
cpus_per_pe,
debug,
environment_override,
exclude_node_list,
exclude_node_list_file,
access_mode,
cpus_per_cu,
node_list,
node_list_file,
memory_per_pe,
mpmd_env,
pes,
pes_per_node,
protection_domain,
p_governor,
p_state,
quiet,
specialized_cpus,
relaunch,
pes_per_numa_node,
strict_memory_containment,
sync_output,
wdir,
zone_sort,
zone_sort_secs,
procinfo_file,
abort_on_failure,
pmi,
sstartup,
executable,
args,
):
# pylint: disable=unused-argument, too-many-arguments, too-many-locals, redefined-builtin
"""
Run an application using the Parallel Application Launch Service
ARGUMENT PROCESSING
Use -- to separate the executable and its arguments from aprun's arguments.
For example, use 'cray aprun -n 4 -- a.out -n 2' to launch 4 copies of
'a.out -n 2'.
CPU BINDING
The --cpu-binding option is formatted as <keyword>|<cpu list>.
The cpu list consists of colon-separated range lists of CPU numbers.
The first range list will be used for the first PE on each node in the
application, second range list for the second PE, etc.
\b
Keywords:
* none - No CPU binding.
* cpu(default) - Bind ranks to a single thread.
* depth - Bind ranks to -d/--cpus-per-pe threads.
* numa_node - Bind ranks each thread in its assigned NUMA node.
* core - Bind ranks to every thread on -d/--cpus-per-pe cores.
ENVIRONMENT VARIABLES
\b
Input Environment Variables:
* APRUN_WDIR - Default working directory
* APRUN_SYNC_TTY - Synchronize output
* APRUN_PROCINFO_FILE - Write application process information to the given file
* APRUN_ABORT_ON_FAILURE - Whether to abort application on non-zero rank exit
* APRUN_PMI - Application PMI wire-up setting (cray, pmix, none)
* APRUN_XFER_LIMITS - If set to 1, transfer all resource limits
* APRUN_XFER_STACK_LIMIT - If set to 1, transfer stack limit
* APRUN_LABEL - If set to 1, label output with hostname and rank number
\b
Output Environment Variables:
* ALPS_APP_DEPTH - CPUs per PE
* ALPS_APP_ID - Application ID
* ALPS_APP_PE - Rank ID
"""
# Create a launch request from arguments
launchreq = {
"cmds": parse_mpmd(
executable, args, pes, get_wdir(wdir), cpus_per_pe, pes_per_node
),
"hosts": get_hostlist(
node_list, node_list_file, exclude_node_list, exclude_node_list_file
),
"ppn": pes_per_node,
"environment": get_launch_env(environment_override),
"cpubind": get_cpubind(cpu_binding),
"membind": get_membind(strict_memory_containment),
"envalias": APRUN_ENV_ALIAS,
"abort_on_failure": abort_on_failure,
"pmi": pmi,
"rlimits": get_rlimits(memory_per_pe),
}
# Add optional settings
if "PBS_JOBID" in os.environ:
launchreq["jobid"] = os.environ["PBS_JOBID"]
excl = get_exclusive(access_mode)
if excl:
launchreq["exclusive"] = excl
if sync_output:
launchreq["line_buffered"] = True
if sstartup:
launchreq["sstartup"] = True
label = int(os.getenv("APRUN_LABEL", "0"))
# Make the launch request
try:
app = PALSApp()
exit_codes = app.launch(
launchreq, not bypass_app_transfer, label, procinfo_file
)
except click.UsageError as err:
echo(
"Note: PALS may have been reconfigured for direct launch on this system.\n"
"To switch, load the 'cray-pals' module and replace 'cray aprun' with 'aprun'\n",
level=LOG_WARN,
)
raise err
# Print exit code summary (4 highest nonzero exit codes)
exit_codes.discard(0)
if exit_codes:
codelist = ", ".join([str(code) for code in sorted(exit_codes)[-4:]])
click.echo("Application %s exit codes: %s" % (app.apid, codelist))
exit_code = max(exit_codes)
else:
exit_code = 0
sys.exit(exit_code)
# Since this API/CLI is deprecated, hide from the main help message
cli.hidden = True
|
"""
cli.py - aprun PALS CLI
MIT License
(C) Copyright [2020-2022] Hewlett Packard Enterprise Development LP
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import argparse
import base64
import os
import sys
import click
from cray import core
from cray.echo import echo, LOG_WARN
from cray.pals import PALSApp, split_mpmd_args, get_resource_limits, parse_hostfile
APRUN_ENV_ALIAS = {
"ALPS_APP_DEPTH": "PALS_DEPTH",
"ALPS_APP_ID": "PALS_APID",
"ALPS_APP_PE": "PALS_RANKID",
}
def parse_rangelist(rli):
"""Parse a range list into a list of integers"""
try:
mylist = []
for nidrange in rli.split(","):
startstr, sep, endstr = nidrange.partition("-")
start = int(startstr, 0)
if sep:
end = int(endstr, 0)
if end < start:
mylist.extend(range(start, end - 1, -1))
else:
mylist.extend(range(start, end + 1))
else:
mylist.append(start)
except ValueError:
# pylint: disable=raise-missing-from
raise click.ClickException("Invalid range list %s" % rli)
return mylist
def parse_rangelist_file(rlif):
"""Parse a file containing rangelists into a list of integers"""
mylist = []
for line in rlif:
line = line.strip()
if line and line[0] != "#":
mylist.extend(parse_rangelist(line))
return mylist
def nids_to_hosts(nidlist):
"""Convert a list of integer nids to a list of hostnames"""
return ["nid%06d" % nid for nid in nidlist]
def get_hostlist(node_list, node_list_file, exclude_node_list, exclude_node_list_file):
"""Given command-line arguments, produce a host list"""
nodelist = []
excllist = []
# Build node list from command line arguments
if node_list:
nodelist = nids_to_hosts(parse_rangelist(node_list))
elif node_list_file:
nodelist = nids_to_hosts(parse_rangelist_file(node_list_file))
elif "PBS_NODEFILE" in os.environ:
with open(os.environ["PBS_NODEFILE"], encoding="utf-8") as nodefile:
nodelist = parse_hostfile(nodefile)
# Build exclude node list from command line arguments
if exclude_node_list:
excllist = nids_to_hosts(parse_rangelist(exclude_node_list))
elif exclude_node_list_file:
excllist = nids_to_hosts(parse_rangelist_file(exclude_node_list_file))
# Remove excluded nodes from host list
hostlist = [node for node in nodelist if node not in excllist]
# Check list before returning
if not hostlist:
raise click.ClickException("No host list provided")
return hostlist
def get_launch_env(environment_override, environ=None):
"""Given command line arguments, build up the environment array"""
# Copy the environment to avoid modifying the original
if environ is None:
environ = os.environ.copy()
else:
environ = environ.copy()
# Override specified environment variables
if environment_override:
for envvar in environment_override:
key, sep, val = envvar.partition("=")
if not sep:
raise click.ClickException("Invalid environment variable %s" % envvar)
environ[key] = val
# Format into array in the expected format
return ["%s=%s" % (key, val) for key, val in environ.items()]
def get_umask():
"""Return the current umask value"""
umask = os.umask(0)
os.umask(umask)
return umask
def get_wdir(wdir):
"""Get the current working directory to use for the launch"""
# If user provided a wdir through argument or env var, use that
if wdir:
return wdir
# Otherwise, attempt to get our cwd
# aprun treated this as a fatal error, so we do too
try:
return os.getcwd()
except OSError as err:
raise click.ClickException("getcwd failed: %s" % str(err))
def get_cpubind(cpu_binding):
"""Convert aprun-style CPU binding to PALS-style"""
# First check for keywords
if not cpu_binding or cpu_binding == "cpu":
return "thread"
if cpu_binding == "depth":
return "depth"
if cpu_binding == "numa_node":
return "numa"
if cpu_binding == "none":
return "none"
if cpu_binding == "core":
return "core"
# If not a keyword, it's colon-separated rangelists
return "list:%s" % cpu_binding
def get_membind(strict_memory_containment):
"""Get memory binding to use"""
if strict_memory_containment:
return "local"
return "none"
def get_exclusive(access_mode):
"""Get exclusive setting from -F [exclusive|share] option"""
# aprun only checked for e/E (exclusive) or s/S (share)
if not access_mode:
return None
if access_mode[0].lower() == "e":
return True
if access_mode[0].lower() == "s":
return False
raise click.ClickException("Invalid -F/--access-mode argument %s" % access_mode)
def print_output(params, a_file):
"""Print output from a stdout/stderr RPC to the given file"""
content = params.get("content")
if not content:
return
# If encoded in base64, decode it before printing
encoding = params.get("encoding")
if encoding == "base64":
content = base64.b64decode(content)
click.echo(content, nl=False, file=a_file)
def get_argv(executable, args, bypass_app_transfer):
"""
Get the application argv
"""
if bypass_app_transfer:
argv0 = executable
else:
argv0 = os.path.basename(executable)
return [argv0] + list(args)
def posint(val):
"""Parse a string into a positive integer"""
ival = int(val)
if ival <= 0:
raise argparse.ArgumentTypeError("%s must be positive" % val)
return ival
def parse_mpmd(executable, args, pes, wdir, depth, ppn):
"""Parse MPMD commands from the given arguments"""
# Split into separate commands
cmdargvs = split_mpmd_args(list(args))
# Create first command
umask = get_umask()
argv = [executable] + cmdargvs[0]
cmds = [dict(argv=argv, nranks=pes, umask=umask, wdir=wdir, depth=depth, ppn=ppn)]
# Create a parser for each other MPMD command
parser = argparse.ArgumentParser(prog="", description="MPMD Command Definition")
parser.add_argument(
"-n", "--pes", default=1, type=posint, help="number of processes to start"
)
parser.add_argument("executable", help="executable to launch")
parser.add_argument(
"args", nargs=argparse.REMAINDER, help="arguments to executable"
)
parser.add_argument(
"-d", "--cpus-per-pe", default=depth, type=posint, help="CPUs per process"
)
parser.add_argument(
"-N", "--pes-per-node", default=ppn, type=posint, help="PEs per compute node"
)
# Add other commands
for cmdargv in cmdargvs[1:]:
# Parse args for this command
cmdargs = parser.parse_args(cmdargv)
# Create MPMD command dict
argv = [cmdargs.executable] + list(cmdargs.args)
cmds.append(
dict(
argv=argv,
nranks=cmdargs.pes,
umask=umask,
wdir=wdir,
depth=cmdargs.cpus_per_pe,
ppn=cmdargs.pes_per_node,
)
)
return cmds
def get_rlimits(memory_per_pe):
"""Get resource limits to transfer to application"""
# Check relevant environment variables
send_limits = int(os.environ.get("APRUN_XFER_LIMITS", 0))
stack_limit = int(os.environ.get("APRUN_XFER_STACK_LIMIT", 0))
# Always send CORE, CPU limits
limitnames = ["CORE", "CPU"]
if send_limits:
limitnames.extend(
[
"RSS",
"STACK",
"FSIZE",
"DATA",
"NPROC",
"NOFILE",
"MEMLOCK",
"AS",
"LOCKS",
"SIGPENDING",
"MSGQUEUE",
"NICE",
"RTPRIO",
]
)
else:
if memory_per_pe:
limitnames.append("RSS")
if stack_limit:
limitnames.append("STACK")
return get_resource_limits(limitnames)
@core.command(
name="aprun",
context_settings={"ignore_unknown_options": True, "allow_interspersed_args": False},
needs_globals=True,
)
@core.option("-a", "--architecture", help="compute node architecture (ignored)")
@core.option(
"-b", "--bypass-app-transfer", is_flag=True, help="skip application binary transfer"
)
@core.option(
"-B",
"--batch-args",
is_flag=True,
help="reuse batch reservation arguments (ignored)",
)
@core.option(
"-C", "--reconnect", is_flag=True, help="reconnect on node failure (ignored)"
)
@core.option("--cpu-binding", "--cc", help="CPU binding for application")
@core.option("--cpu-binding-file", "--cp", help="specify binding in a file (ignored)")
@core.option(
"-d", "--cpus-per-pe", default=1, type=click.IntRange(1), help="CPUs per PE"
)
@core.option(
"-D", "--debug", default=0, type=click.IntRange(0), help="debug level (ignored)"
)
@core.option(
"-e",
"--environment-override",
multiple=True,
help="set an application environment variable (use VARNAME=value format)",
)
@core.option("-E", "--exclude-node-list", help="exclude a list of nodes from placement")
@core.option(
"--exclude-node-list-file",
type=click.File(),
help="file with list of nodes to exclude",
)
@core.option("-F", "--access-mode", help="exclusive/share access mode")
@core.option("-j", "--cpus-per-cu", help="CPUs per compute unit (ignored)")
@core.option("-L", "--node-list", help="list of nodes for placement")
@core.option(
"-l",
"--node-list-file",
type=click.File(),
help="file with list of nodes for placement",
)
@core.option(
"-m", "--memory-per-pe", envvar="APRUN_DEFAULT_MEMORY", help="memory per PE"
)
@core.option(
"--mpmd-env", multiple=True, help="set an MPMD environment variable (ignored)"
)
@core.option(
"-n",
"--pes",
default=1,
type=click.IntRange(1),
help="number of processing elements (PEs)",
)
@core.option(
"-N",
"--pes-per-node",
default=0,
type=click.IntRange(0),
help="PEs per compute node",
)
@core.option("-p", "--protection-domain", help="use protection domain (ignored)")
@core.option("--p-governor", help="compute node performance governor (ignored)")
@core.option(
"--p-state", envvar="APRUN_PSTATE", help="compute node performance state (ignored)"
)
@core.option("-q", "--quiet", "--silent", is_flag=True, help="quiet mode")
@core.option("-r", "--specialized-cpus", help="number of system process CPUs (ignored)")
@core.option("-R", "--relaunch", help="relaunch with fewer ranks on failure (ignored)")
@core.option("-S", "--pes-per-numa-node", help="number of PEs per NUMA node (ignored)")
@core.option(
"--strict-memory-containment",
"--ss",
is_flag=True,
help="restrict memory to local NUMA node",
)
@core.option(
"-T",
"--sync-output",
envvar="APRUN_SYNC_TTY",
is_flag=True,
default=False,
help="synchronize output",
)
@core.option("--wdir", envvar="APRUN_WDIR", help="application working directory")
@core.option(
"-z",
"--zone-sort",
envvar="APRUN_ZONE_SORT",
is_flag=True,
help="memory zone sort at launch (ignored)",
)
@core.option(
"-Z",
"--zone-sort-secs",
envvar="APRUN_ZONE_SORT_SECS",
help="periodic memory zone sort (ignored)",
)
@core.option(
"--procinfo-file",
envvar="APRUN_PROCINFO_FILE",
help="write application process information to the given file",
)
@core.option(
"--abort-on-failure/--no-abort-on-failure",
envvar="APRUN_ABORT_ON_FAILURE",
is_flag=True,
default=True,
help="abort/don't abort entire application if a rank exits with non-zero status",
)
@core.option(
"--pmi",
envvar="APRUN_PMI",
type=click.Choice(["cray", "pmix", "none"], case_sensitive=False),
default="cray",
help="Application PMI wire-up method ('cray' default)",
)
@core.option(
"--sstartup/--no-sstartup",
default=False,
help="enable/disable Scalable Start Up",
)
@core.argument("executable")
@core.argument("args", nargs=-1)
def cli(
architecture,
bypass_app_transfer,
batch_args,
reconnect,
cpu_binding,
cpu_binding_file,
cpus_per_pe,
debug,
environment_override,
exclude_node_list,
exclude_node_list_file,
access_mode,
cpus_per_cu,
node_list,
node_list_file,
memory_per_pe,
mpmd_env,
pes,
pes_per_node,
protection_domain,
p_governor,
p_state,
quiet,
specialized_cpus,
relaunch,
pes_per_numa_node,
strict_memory_containment,
sync_output,
wdir,
zone_sort,
zone_sort_secs,
procinfo_file,
abort_on_failure,
pmi,
sstartup,
executable,
args,
):
# pylint: disable=unused-argument, too-many-arguments, too-many-locals, redefined-builtin
"""
Run an application using the Parallel Application Launch Service
ARGUMENT PROCESSING
Use -- to separate the executable and its arguments from aprun's arguments.
For example, use 'cray aprun -n 4 -- a.out -n 2' to launch 4 copies of
'a.out -n 2'.
CPU BINDING
The --cpu-binding option is formatted as <keyword>|<cpu list>.
The cpu list consists of colon-separated range lists of CPU numbers.
The first range list will be used for the first PE on each node in the
application, second range list for the second PE, etc.
\b
Keywords:
* none - No CPU binding.
* cpu(default) - Bind ranks to a single thread.
* depth - Bind ranks to -d/--cpus-per-pe threads.
* numa_node - Bind ranks each thread in its assigned NUMA node.
* core - Bind ranks to every thread on -d/--cpus-per-pe cores.
ENVIRONMENT VARIABLES
\b
Input Environment Variables:
* APRUN_WDIR - Default working directory
* APRUN_SYNC_TTY - Synchronize output
* APRUN_PROCINFO_FILE - Write application process information to the given file
* APRUN_ABORT_ON_FAILURE - Whether to abort application on non-zero rank exit
* APRUN_PMI - Application PMI wire-up setting (cray, pmix, none)
* APRUN_XFER_LIMITS - If set to 1, transfer all resource limits
* APRUN_XFER_STACK_LIMIT - If set to 1, transfer stack limit
* APRUN_LABEL - If set to 1, label output with hostname and rank number
\b
Output Environment Variables:
* ALPS_APP_DEPTH - CPUs per PE
* ALPS_APP_ID - Application ID
* ALPS_APP_PE - Rank ID
"""
# Create a launch request from arguments
launchreq = {
"cmds": parse_mpmd(
executable, args, pes, get_wdir(wdir), cpus_per_pe, pes_per_node
),
"hosts": get_hostlist(
node_list, node_list_file, exclude_node_list, exclude_node_list_file
),
"ppn": pes_per_node,
"environment": get_launch_env(environment_override),
"cpubind": get_cpubind(cpu_binding),
"membind": get_membind(strict_memory_containment),
"envalias": APRUN_ENV_ALIAS,
"abort_on_failure": abort_on_failure,
"pmi": pmi,
"rlimits": get_rlimits(memory_per_pe),
}
# Add optional settings
if "PBS_JOBID" in os.environ:
launchreq["jobid"] = os.environ["PBS_JOBID"]
excl = get_exclusive(access_mode)
if excl:
launchreq["exclusive"] = excl
if sync_output:
launchreq["line_buffered"] = True
if sstartup:
launchreq["sstartup"] = True
label = int(os.getenv("APRUN_LABEL", "0"))
# Make the launch request
try:
app = PALSApp()
exit_codes = app.launch(
launchreq, not bypass_app_transfer, label, procinfo_file
)
except click.UsageError as err:
echo(
"Note: PALS may have been reconfigured for direct launch on this system.\n"
"To switch, load the 'cray-pals' module and replace 'cray aprun' with 'aprun'\n",
level=LOG_WARN,
)
raise err
# Print exit code summary (4 highest nonzero exit codes)
exit_codes.discard(0)
if exit_codes:
codelist = ", ".join([str(code) for code in sorted(exit_codes)[-4:]])
click.echo("Application %s exit codes: %s" % (app.apid, codelist))
exit_code = max(exit_codes)
else:
exit_code = 0
sys.exit(exit_code)
# Since this API/CLI is deprecated, hide from the main help message
cli.hidden = True
|
en
| 0.704792
|
cli.py - aprun PALS CLI MIT License (C) Copyright [2020-2022] Hewlett Packard Enterprise Development LP Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Parse a range list into a list of integers # pylint: disable=raise-missing-from Parse a file containing rangelists into a list of integers Convert a list of integer nids to a list of hostnames Given command-line arguments, produce a host list # Build node list from command line arguments # Build exclude node list from command line arguments # Remove excluded nodes from host list # Check list before returning Given command line arguments, build up the environment array # Copy the environment to avoid modifying the original # Override specified environment variables # Format into array in the expected format Return the current umask value Get the current working directory to use for the launch # If user provided a wdir through argument or env var, use that # Otherwise, attempt to get our cwd # aprun treated this as a fatal error, so we do too Convert aprun-style CPU binding to PALS-style # First check for keywords # If not a keyword, it's colon-separated rangelists Get memory binding to use Get exclusive setting from -F [exclusive|share] option # aprun only checked for e/E (exclusive) or s/S (share) Print output from a stdout/stderr RPC to the given file # If encoded in base64, decode it before printing Get the application argv Parse a string into a positive integer Parse MPMD commands from the given arguments # Split into separate commands # Create first command # Create a parser for each other MPMD command # Add other commands # Parse args for this command # Create MPMD command dict Get resource limits to transfer to application # Check relevant environment variables # Always send CORE, CPU limits # pylint: disable=unused-argument, too-many-arguments, too-many-locals, redefined-builtin Run an application using the Parallel Application Launch Service ARGUMENT PROCESSING Use -- to separate the executable and its arguments from aprun's arguments. For example, use 'cray aprun -n 4 -- a.out -n 2' to launch 4 copies of 'a.out -n 2'. CPU BINDING The --cpu-binding option is formatted as <keyword>|<cpu list>. The cpu list consists of colon-separated range lists of CPU numbers. The first range list will be used for the first PE on each node in the application, second range list for the second PE, etc. \b Keywords: * none - No CPU binding. * cpu(default) - Bind ranks to a single thread. * depth - Bind ranks to -d/--cpus-per-pe threads. * numa_node - Bind ranks each thread in its assigned NUMA node. * core - Bind ranks to every thread on -d/--cpus-per-pe cores. ENVIRONMENT VARIABLES \b Input Environment Variables: * APRUN_WDIR - Default working directory * APRUN_SYNC_TTY - Synchronize output * APRUN_PROCINFO_FILE - Write application process information to the given file * APRUN_ABORT_ON_FAILURE - Whether to abort application on non-zero rank exit * APRUN_PMI - Application PMI wire-up setting (cray, pmix, none) * APRUN_XFER_LIMITS - If set to 1, transfer all resource limits * APRUN_XFER_STACK_LIMIT - If set to 1, transfer stack limit * APRUN_LABEL - If set to 1, label output with hostname and rank number \b Output Environment Variables: * ALPS_APP_DEPTH - CPUs per PE * ALPS_APP_ID - Application ID * ALPS_APP_PE - Rank ID # Create a launch request from arguments # Add optional settings # Make the launch request # Print exit code summary (4 highest nonzero exit codes) # Since this API/CLI is deprecated, hide from the main help message
| 2.154513
| 2
|
design_pattern/creational/abstract_factory-example1/__version__.py
|
kannandreams/python-advance-concepts
| 2
|
6626953
|
VERSION = (0,1,1)
__version__ = '.'.join(map(str, VERSION))
|
VERSION = (0,1,1)
__version__ = '.'.join(map(str, VERSION))
|
none
| 1
| 1.71085
| 2
|
|
src/spyd/punitive_effects/punitive_model.py
|
DanSeraf/spyd
| 0
|
6626954
|
<gh_stars>0
from spyd.utils.net import dottedQuadToLong, simpleMaskedIpToLongIpAndMask
class PunitiveModel(object):
# effect_type: {mask: {masked_ip: effect_info}}
punitive_effects = {}
def __init__(self):
self.clear_effects()
def clear_effects(self, effect_type=None):
if effect_type is None:
self.punitive_effects = {}
else:
self.punitive_effects[effect_type] = {}
def get_effect(self, effect_type, client_ip):
client_ip = dottedQuadToLong(client_ip)
effects_of_type = self.punitive_effects.get(effect_type, {})
for mask, effects in effects_of_type.items():
masked_ip = mask & client_ip
if masked_ip in effects:
return effects[masked_ip]
return None
def add_effect(self, effect_type, effect_desc, effect_info):
if type(effect_desc) is tuple:
if type(effect_desc[0]) == str:
effect_desc = list(map(dottedQuadToLong, effect_desc))
long_ip, long_mask = effect_desc
else:
long_ip, long_mask = simpleMaskedIpToLongIpAndMask(effect_desc)
if effect_type not in self.punitive_effects:
self.punitive_effects[effect_type] = {}
if long_mask not in self.punitive_effects[effect_type]:
self.punitive_effects[effect_type][long_mask] = {}
masked_ip = long_ip & long_mask
self.punitive_effects[effect_type][long_mask][masked_ip] = effect_info
|
from spyd.utils.net import dottedQuadToLong, simpleMaskedIpToLongIpAndMask
class PunitiveModel(object):
# effect_type: {mask: {masked_ip: effect_info}}
punitive_effects = {}
def __init__(self):
self.clear_effects()
def clear_effects(self, effect_type=None):
if effect_type is None:
self.punitive_effects = {}
else:
self.punitive_effects[effect_type] = {}
def get_effect(self, effect_type, client_ip):
client_ip = dottedQuadToLong(client_ip)
effects_of_type = self.punitive_effects.get(effect_type, {})
for mask, effects in effects_of_type.items():
masked_ip = mask & client_ip
if masked_ip in effects:
return effects[masked_ip]
return None
def add_effect(self, effect_type, effect_desc, effect_info):
if type(effect_desc) is tuple:
if type(effect_desc[0]) == str:
effect_desc = list(map(dottedQuadToLong, effect_desc))
long_ip, long_mask = effect_desc
else:
long_ip, long_mask = simpleMaskedIpToLongIpAndMask(effect_desc)
if effect_type not in self.punitive_effects:
self.punitive_effects[effect_type] = {}
if long_mask not in self.punitive_effects[effect_type]:
self.punitive_effects[effect_type][long_mask] = {}
masked_ip = long_ip & long_mask
self.punitive_effects[effect_type][long_mask][masked_ip] = effect_info
|
en
| 0.866416
|
# effect_type: {mask: {masked_ip: effect_info}}
| 2.261188
| 2
|
ingestion/src/metadata/ingestion/api/sink.py
|
rongfengliang/OpenMetadata
| 0
|
6626955
|
<gh_stars>0
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass, field
from typing import Any, List
from .closeable import Closeable
from .common import Record, WorkflowContext
from .status import Status
@dataclass
class SinkStatus(Status):
records: List[str] = field(default_factory=list)
warnings: List[Any] = field(default_factory=list)
failures: List[Any] = field(default_factory=list)
def records_written(self, record: str) -> None:
self.records.append(record)
def warning(self, info: Any) -> None:
self.warnings.append(info)
def failure(self, info: Any) -> None:
self.failures.append(info)
@dataclass # type: ignore[misc]
class Sink(Closeable, metaclass=ABCMeta):
"""All Sinks must inherit this base class."""
ctx: WorkflowContext
@classmethod
@abstractmethod
def create(
cls, config_dict: dict, metadata_config_dict: dict, ctx: WorkflowContext
) -> "Sink":
pass
@abstractmethod
def write_record(self, record: Record) -> None:
# must call callback when done.
pass
@abstractmethod
def get_status(self) -> SinkStatus:
pass
@abstractmethod
def close(self) -> None:
pass
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass, field
from typing import Any, List
from .closeable import Closeable
from .common import Record, WorkflowContext
from .status import Status
@dataclass
class SinkStatus(Status):
records: List[str] = field(default_factory=list)
warnings: List[Any] = field(default_factory=list)
failures: List[Any] = field(default_factory=list)
def records_written(self, record: str) -> None:
self.records.append(record)
def warning(self, info: Any) -> None:
self.warnings.append(info)
def failure(self, info: Any) -> None:
self.failures.append(info)
@dataclass # type: ignore[misc]
class Sink(Closeable, metaclass=ABCMeta):
"""All Sinks must inherit this base class."""
ctx: WorkflowContext
@classmethod
@abstractmethod
def create(
cls, config_dict: dict, metadata_config_dict: dict, ctx: WorkflowContext
) -> "Sink":
pass
@abstractmethod
def write_record(self, record: Record) -> None:
# must call callback when done.
pass
@abstractmethod
def get_status(self) -> SinkStatus:
pass
@abstractmethod
def close(self) -> None:
pass
|
en
| 0.857232
|
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # type: ignore[misc] All Sinks must inherit this base class. # must call callback when done.
| 2.00289
| 2
|
aki/aki.py
|
Obi-Wan3/phen-cogs
| 0
|
6626956
|
"""
MIT License
Copyright (c) 2020-2021 phenom4n4n
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import akinator
from akinator.async_aki import Akinator
import discord
from redbot.core import commands
from redbot.core.bot import Red
from redbot.core.config import Config
from redbot.vendored.discord.ext import menus
import logging
log = logging.getLogger("red.phenom4n4n.aki")
class AkiMenu(menus.Menu):
def __init__(self, game: Akinator, color: discord.Color):
self.aki = game
self.color = color
self.num = 1
self.message = None
super().__init__(timeout=60, delete_message_after=False, clear_reactions_after=True)
async def send_initial_message(self, ctx: commands.Context, channel: discord.TextChannel):
return await channel.send(embed=self.current_question_embed())
@menus.button("✅")
async def yes(self, payload: discord.RawReactionActionEvent):
self.num += 1
await self.answer("yes")
await self.send_current_question()
@menus.button("❎")
async def no(self, payload: discord.RawReactionActionEvent):
self.num += 1
await self.answer("no")
await self.send_current_question()
@menus.button("❔")
async def idk(self, payload: discord.RawReactionActionEvent):
self.num += 1
await self.answer("idk")
await self.send_current_question()
@menus.button("📉")
async def probably(self, payload: discord.RawReactionActionEvent):
self.num += 1
await self.answer("probably")
await self.send_current_question()
@menus.button("📈")
async def probably_not(self, payload: discord.RawReactionActionEvent):
self.num += 1
await self.answer("probably not")
await self.send_current_question()
@menus.button("🔙")
async def back(self, payload: discord.RawReactionActionEvent):
try:
await self.aki.back()
except akinator.CantGoBackAnyFurther:
await self.ctx.send(
"You can't go back on the first question, try a different option instead.",
delete_after=10,
)
else:
self.num -= 1
await self.send_current_question()
@menus.button("🏆")
async def react_win(self, payload: discord.RawReactionActionEvent):
await self.win()
@menus.button("🗑️")
async def end(self, payload: discord.RawReactionActionEvent):
await self.cancel()
def current_question_embed(self):
e = discord.Embed(
color=self.color,
title=f"Question #{self.num}",
description=self.aki.question,
)
if self.aki.progression > 0:
e.set_footer(text=f"{round(self.aki.progression, 2)}% guessed")
return e
async def win(self):
winner = await self.aki.win()
win_embed = discord.Embed(
color=self.color,
title=f"I'm {round(float(winner['proba']) * 100)}% sure it's {winner['name']}!",
description=winner["description"],
)
win_embed.set_image(url=winner["absolute_picture_path"])
await self.edit_or_send(embed=win_embed)
self.stop()
# TODO allow for continuation of game
async def send_current_question(self):
if self.aki.progression < 80:
try:
await self.message.edit(embed=self.current_question_embed())
except discord.HTTPException:
await self.cancel()
else:
await self.win()
async def finalize(self, timed_out: bool):
if timed_out:
await self.edit_or_send(content="Akinator game timed out.", embed=None)
async def cancel(self):
await self.edit_or_send(content="Akinator game cancelled.", embed=None)
self.stop()
async def edit_or_send(self, **kwargs):
try:
await self.message.edit(**kwargs)
except discord.NotFound:
await self.ctx.send(**kwargs)
except discord.Forbidden:
pass
async def answer(self, message: str):
try:
await self.aki.answer(message)
except akinator.AkiNoQuestions:
await self.win()
except Exception as error:
log.exception(
f"Encountered an exception while answering with {message} during Akinator session",
exc_info=True,
)
await self.edit_or_send(content=f"Akinator game errored out:\n`{error}`", embed=None)
self.stop()
class Aki(commands.Cog):
"""
Play Akinator in Discord!
"""
def __init__(self, bot: Red) -> None:
self.bot = bot
self.config = Config.get_conf(
self,
identifier=8237578807127857,
force_registration=True,
)
async def red_delete_data_for_user(self, *, requester: str, user_id: int) -> None:
return
@commands.max_concurrency(1, commands.BucketType.channel)
@commands.bot_has_permissions(embed_links=True, add_reactions=True)
@commands.command()
async def aki(self, ctx: commands.Context, *, language: str.lower = "en"):
"""
Start a game of Akinator!
Controls:
> ✅ : yes
> ❎ : no
> ❔ : i don't know
> 📉 : probably
> 📈 : probably not
> 🔙 : back
> 🏆 : win
> 🗑️ : cancel
"""
await ctx.trigger_typing()
aki = Akinator()
try:
await aki.start_game(language=language.replace(" ", "_"))
except akinator.InvalidLanguageError:
await ctx.send(
"Invalid language. Refer here to view valid languages.\n<https://github.com/NinjaSnail1080/akinator.py#functions>"
)
except Exception:
await ctx.send("I encountered an error while connecting to the Akinator servers.")
else:
menu = AkiMenu(aki, await ctx.embed_color())
await menu.start(ctx)
|
"""
MIT License
Copyright (c) 2020-2021 phenom4n4n
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import akinator
from akinator.async_aki import Akinator
import discord
from redbot.core import commands
from redbot.core.bot import Red
from redbot.core.config import Config
from redbot.vendored.discord.ext import menus
import logging
log = logging.getLogger("red.phenom4n4n.aki")
class AkiMenu(menus.Menu):
def __init__(self, game: Akinator, color: discord.Color):
self.aki = game
self.color = color
self.num = 1
self.message = None
super().__init__(timeout=60, delete_message_after=False, clear_reactions_after=True)
async def send_initial_message(self, ctx: commands.Context, channel: discord.TextChannel):
return await channel.send(embed=self.current_question_embed())
@menus.button("✅")
async def yes(self, payload: discord.RawReactionActionEvent):
self.num += 1
await self.answer("yes")
await self.send_current_question()
@menus.button("❎")
async def no(self, payload: discord.RawReactionActionEvent):
self.num += 1
await self.answer("no")
await self.send_current_question()
@menus.button("❔")
async def idk(self, payload: discord.RawReactionActionEvent):
self.num += 1
await self.answer("idk")
await self.send_current_question()
@menus.button("📉")
async def probably(self, payload: discord.RawReactionActionEvent):
self.num += 1
await self.answer("probably")
await self.send_current_question()
@menus.button("📈")
async def probably_not(self, payload: discord.RawReactionActionEvent):
self.num += 1
await self.answer("probably not")
await self.send_current_question()
@menus.button("🔙")
async def back(self, payload: discord.RawReactionActionEvent):
try:
await self.aki.back()
except akinator.CantGoBackAnyFurther:
await self.ctx.send(
"You can't go back on the first question, try a different option instead.",
delete_after=10,
)
else:
self.num -= 1
await self.send_current_question()
@menus.button("🏆")
async def react_win(self, payload: discord.RawReactionActionEvent):
await self.win()
@menus.button("🗑️")
async def end(self, payload: discord.RawReactionActionEvent):
await self.cancel()
def current_question_embed(self):
e = discord.Embed(
color=self.color,
title=f"Question #{self.num}",
description=self.aki.question,
)
if self.aki.progression > 0:
e.set_footer(text=f"{round(self.aki.progression, 2)}% guessed")
return e
async def win(self):
winner = await self.aki.win()
win_embed = discord.Embed(
color=self.color,
title=f"I'm {round(float(winner['proba']) * 100)}% sure it's {winner['name']}!",
description=winner["description"],
)
win_embed.set_image(url=winner["absolute_picture_path"])
await self.edit_or_send(embed=win_embed)
self.stop()
# TODO allow for continuation of game
async def send_current_question(self):
if self.aki.progression < 80:
try:
await self.message.edit(embed=self.current_question_embed())
except discord.HTTPException:
await self.cancel()
else:
await self.win()
async def finalize(self, timed_out: bool):
if timed_out:
await self.edit_or_send(content="Akinator game timed out.", embed=None)
async def cancel(self):
await self.edit_or_send(content="Akinator game cancelled.", embed=None)
self.stop()
async def edit_or_send(self, **kwargs):
try:
await self.message.edit(**kwargs)
except discord.NotFound:
await self.ctx.send(**kwargs)
except discord.Forbidden:
pass
async def answer(self, message: str):
try:
await self.aki.answer(message)
except akinator.AkiNoQuestions:
await self.win()
except Exception as error:
log.exception(
f"Encountered an exception while answering with {message} during Akinator session",
exc_info=True,
)
await self.edit_or_send(content=f"Akinator game errored out:\n`{error}`", embed=None)
self.stop()
class Aki(commands.Cog):
"""
Play Akinator in Discord!
"""
def __init__(self, bot: Red) -> None:
self.bot = bot
self.config = Config.get_conf(
self,
identifier=8237578807127857,
force_registration=True,
)
async def red_delete_data_for_user(self, *, requester: str, user_id: int) -> None:
return
@commands.max_concurrency(1, commands.BucketType.channel)
@commands.bot_has_permissions(embed_links=True, add_reactions=True)
@commands.command()
async def aki(self, ctx: commands.Context, *, language: str.lower = "en"):
"""
Start a game of Akinator!
Controls:
> ✅ : yes
> ❎ : no
> ❔ : i don't know
> 📉 : probably
> 📈 : probably not
> 🔙 : back
> 🏆 : win
> 🗑️ : cancel
"""
await ctx.trigger_typing()
aki = Akinator()
try:
await aki.start_game(language=language.replace(" ", "_"))
except akinator.InvalidLanguageError:
await ctx.send(
"Invalid language. Refer here to view valid languages.\n<https://github.com/NinjaSnail1080/akinator.py#functions>"
)
except Exception:
await ctx.send("I encountered an error while connecting to the Akinator servers.")
else:
menu = AkiMenu(aki, await ctx.embed_color())
await menu.start(ctx)
|
en
| 0.759501
|
MIT License Copyright (c) 2020-2021 phenom4n4n Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #{self.num}", # TODO allow for continuation of game Play Akinator in Discord! Start a game of Akinator! Controls: > ✅ : yes > ❎ : no > ❔ : i don't know > 📉 : probably > 📈 : probably not > 🔙 : back > 🏆 : win > 🗑️ : cancel #functions>"
| 1.873701
| 2
|
blender/arm/logicnode/animation/LN_on_action_marker.py
|
onelsonic/armory
| 2,583
|
6626957
|
<reponame>onelsonic/armory<filename>blender/arm/logicnode/animation/LN_on_action_marker.py<gh_stars>1000+
from arm.logicnode.arm_nodes import *
class OnActionMarkerNode(ArmLogicTreeNode):
"""Activates the output when the object action reaches the action marker."""
bl_idname = 'LNOnActionMarkerNode'
bl_label = 'On Action Marker'
arm_version = 1
def arm_init(self, context):
self.add_input('ArmNodeSocketObject', 'Object')
self.add_input('ArmStringSocket', 'Marker')
self.add_output('ArmNodeSocketAction', 'Out')
|
from arm.logicnode.arm_nodes import *
class OnActionMarkerNode(ArmLogicTreeNode):
"""Activates the output when the object action reaches the action marker."""
bl_idname = 'LNOnActionMarkerNode'
bl_label = 'On Action Marker'
arm_version = 1
def arm_init(self, context):
self.add_input('ArmNodeSocketObject', 'Object')
self.add_input('ArmStringSocket', 'Marker')
self.add_output('ArmNodeSocketAction', 'Out')
|
en
| 0.462204
|
Activates the output when the object action reaches the action marker.
| 2.528273
| 3
|
SciComputing with Python/lesson_04-24/2-integral.py
|
evtodorov/aerospace
| 0
|
6626958
|
<reponame>evtodorov/aerospace
import math
print "*** Integration of exp(-x^2)dx from xmin to xmax ***"
xmin = input("Choose lower boundary: ")
xmax = input("Choose upper boundary: ")
n = input("Choose the number of subintervals: ")
s = 0
sup = 0
sd= 0
x = float(xmin)
dx = (xmax-xmin)/float(n)
while (x<xmax-dx):
s += math.exp(-(x+dx/2.)**2) #midpoint
sup += math.exp(-(x+dx)**2) #endpoint
sd += math.exp(-x**2) #startpoint
x += dx
res = dx*s
err = abs(sd-sup)
print "Integral of exp(-x^2)dx from "+str(xmin)+" to "+str(xmax)+" is "+str(res)+" +- "+str(err)
|
import math
print "*** Integration of exp(-x^2)dx from xmin to xmax ***"
xmin = input("Choose lower boundary: ")
xmax = input("Choose upper boundary: ")
n = input("Choose the number of subintervals: ")
s = 0
sup = 0
sd= 0
x = float(xmin)
dx = (xmax-xmin)/float(n)
while (x<xmax-dx):
s += math.exp(-(x+dx/2.)**2) #midpoint
sup += math.exp(-(x+dx)**2) #endpoint
sd += math.exp(-x**2) #startpoint
x += dx
res = dx*s
err = abs(sd-sup)
print "Integral of exp(-x^2)dx from "+str(xmin)+" to "+str(xmax)+" is "+str(res)+" +- "+str(err)
|
en
| 0.430996
|
#midpoint #endpoint #startpoint
| 4.072887
| 4
|
k-saap_pkg/src/auctioneer.py
|
joaoquintas/auction_methods_stack
| 2
|
6626959
|
# configuring PYTHONPATH (By default, this will add the src and lib directory for each of your dependencies to your PYTHONPATH)
import roslib; roslib.load_manifest('k-saap_pkg')
# import client library
import rospy
# import messages
import auction_msgs.msg
# import services
import auction_srvs.srv
# import services functions
import auction_common
# import auxiliar libraries
import random
import math
# "global" variables (to be referred as global under def fun(something))
winner_id = ''
winner_cost = 999999
#####################################################################################
## Auction Service (Server Callback)
#####################################################################################
def handle_auction_server_callback(auction_req):
# define global variables
global winner_id
global winner_cost
# update number of messages in parameter server
if rospy.has_param('/num_messages'):
num_messages = rospy.get_param('/num_messages')
num_messages += 2
rospy.set_param('/num_messages', num_messages)
# default bid
bid = auction_msgs.msg.Bid()
# obtain auctioneer_position
auctioneer_position = {'auctioneer_position': rospy.get_param('~position')}
# Obtain nodes list to relay information with k=1
neighbour_nodes_relay_list = auction_common.create_neighbour_nodes_list(auction_req)
print neighbour_nodes_relay_list
# Prepare auction information
if auction_req.auction_data.command == 'close_auction':
auction_req.role = 'none'
else:
auction_req.role = "be_buyer"
auction_req.sending_node = rospy.get_name()
# updated nodes_collected
if rospy.has_param('/nodes_collected'):
auction_req.nodes_collected = rospy.get_param('/nodes_collected')+','+rospy.get_name()
rospy.set_param('/nodes_collected',auction_req.nodes_collected)
else:
auction_req.nodes_collected = rospy.get_param('~neighbour_nodes_list')
# Call the Auction Service from each neighbour node
for node in neighbour_nodes_relay_list:
# compose service name (to be changed)
service_path = node+'/auction_server'
# wait for the service in the neighbour node to be available
rospy.wait_for_service(service_path)
neighbour_node_auction_server = rospy.ServiceProxy(service_path,
auction_srvs.srv.AuctionService,headers=auctioneer_position)
try:
bid_response = neighbour_node_auction_server(auction_req)
bid = bid_response.bid_data
# Evaluate bids, Min(cost_distance)
if winner_cost >= bid.cost_distance:
if bid.buyer_id != '':
winner_cost = bid.cost_distance
winner_id = bid.buyer_id
# log info for momentary winner
# rospy.loginfo("(winning at the moment) %s with offer %d",winner_id, winner_cost)
except rospy.ServiceException, e:
rospy.loginfo("Service call failed: %s",e)
# verbose for auction status (received all the bids)
rospy.loginfo("winner was: %s with offer %d",winner_id, winner_cost)
# return response
# return auction_srvs.srv.AuctionServiceResponse(bid_response)
return {'response_info': 'valid', 'bid_data': bid}
## End Auction Server (Server Callback)
|
# configuring PYTHONPATH (By default, this will add the src and lib directory for each of your dependencies to your PYTHONPATH)
import roslib; roslib.load_manifest('k-saap_pkg')
# import client library
import rospy
# import messages
import auction_msgs.msg
# import services
import auction_srvs.srv
# import services functions
import auction_common
# import auxiliar libraries
import random
import math
# "global" variables (to be referred as global under def fun(something))
winner_id = ''
winner_cost = 999999
#####################################################################################
## Auction Service (Server Callback)
#####################################################################################
def handle_auction_server_callback(auction_req):
# define global variables
global winner_id
global winner_cost
# update number of messages in parameter server
if rospy.has_param('/num_messages'):
num_messages = rospy.get_param('/num_messages')
num_messages += 2
rospy.set_param('/num_messages', num_messages)
# default bid
bid = auction_msgs.msg.Bid()
# obtain auctioneer_position
auctioneer_position = {'auctioneer_position': rospy.get_param('~position')}
# Obtain nodes list to relay information with k=1
neighbour_nodes_relay_list = auction_common.create_neighbour_nodes_list(auction_req)
print neighbour_nodes_relay_list
# Prepare auction information
if auction_req.auction_data.command == 'close_auction':
auction_req.role = 'none'
else:
auction_req.role = "be_buyer"
auction_req.sending_node = rospy.get_name()
# updated nodes_collected
if rospy.has_param('/nodes_collected'):
auction_req.nodes_collected = rospy.get_param('/nodes_collected')+','+rospy.get_name()
rospy.set_param('/nodes_collected',auction_req.nodes_collected)
else:
auction_req.nodes_collected = rospy.get_param('~neighbour_nodes_list')
# Call the Auction Service from each neighbour node
for node in neighbour_nodes_relay_list:
# compose service name (to be changed)
service_path = node+'/auction_server'
# wait for the service in the neighbour node to be available
rospy.wait_for_service(service_path)
neighbour_node_auction_server = rospy.ServiceProxy(service_path,
auction_srvs.srv.AuctionService,headers=auctioneer_position)
try:
bid_response = neighbour_node_auction_server(auction_req)
bid = bid_response.bid_data
# Evaluate bids, Min(cost_distance)
if winner_cost >= bid.cost_distance:
if bid.buyer_id != '':
winner_cost = bid.cost_distance
winner_id = bid.buyer_id
# log info for momentary winner
# rospy.loginfo("(winning at the moment) %s with offer %d",winner_id, winner_cost)
except rospy.ServiceException, e:
rospy.loginfo("Service call failed: %s",e)
# verbose for auction status (received all the bids)
rospy.loginfo("winner was: %s with offer %d",winner_id, winner_cost)
# return response
# return auction_srvs.srv.AuctionServiceResponse(bid_response)
return {'response_info': 'valid', 'bid_data': bid}
## End Auction Server (Server Callback)
|
en
| 0.55165
|
# configuring PYTHONPATH (By default, this will add the src and lib directory for each of your dependencies to your PYTHONPATH) # import client library # import messages # import services # import services functions # import auxiliar libraries # "global" variables (to be referred as global under def fun(something)) ##################################################################################### ## Auction Service (Server Callback) ##################################################################################### # define global variables # update number of messages in parameter server # default bid # obtain auctioneer_position # Obtain nodes list to relay information with k=1 # Prepare auction information # updated nodes_collected # Call the Auction Service from each neighbour node # compose service name (to be changed) # wait for the service in the neighbour node to be available # Evaluate bids, Min(cost_distance) # log info for momentary winner # rospy.loginfo("(winning at the moment) %s with offer %d",winner_id, winner_cost) # verbose for auction status (received all the bids) # return response # return auction_srvs.srv.AuctionServiceResponse(bid_response) ## End Auction Server (Server Callback)
| 2.270768
| 2
|
train_iqn.py
|
robinzixuan/IQN_Agent
| 0
|
6626960
|
<reponame>robinzixuan/IQN_Agent
import os
import yaml
import argparse
from datetime import datetime
from fqf_iqn_qrdqn.env import make_pytorch_env
from fqf_iqn_qrdqn.agent import IQNAgent
def run(args):
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
# Create environments.
env = make_pytorch_env(args.env_id)
env_online = make_pytorch_env(args.env_id)
test_env = make_pytorch_env(
args.env_id, episode_life=False, clip_rewards=False)
print("self.env_online 0:", env_online)
# Specify the directory to log.
name = args.config.split('/')[-1].rstrip('.yaml')
time = datetime.now().strftime("%Y%m%d-%H%M")
log_dir = os.path.join(
'logs', args.env_id, f'{name}-seed{args.seed}-{time}')
# Create the agent and run.
agent_evaluation = IQNAgent(
env=env, test_env=test_env, log_dir=log_dir, seed=args.seed,
cuda=args.cuda, **config)
# load model
agent_evaluation.load_models(os.path.join(args.agent, "best"))
print("Model Load done.", args.env_id)
#
print("Start policy evaluation...")
agent = IQNAgent(
env=env, test_env=test_env, log_dir=log_dir, seed=args.seed,
cuda=args.cuda, agent=agent_evaluation, env_online=env_online, **config)
agent.run()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--config', type=str, default=os.path.join('config', 'iqn.yaml'))
parser.add_argument('--env_id', type=str, default='PongNoFrameskip-v4')
parser.add_argument('--cuda', type=bool, default=True)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--agent', type=str, default="result/PongNoFrameskip-v4/iqn-seed0-20211007-0313/model")
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
run(args)
|
import os
import yaml
import argparse
from datetime import datetime
from fqf_iqn_qrdqn.env import make_pytorch_env
from fqf_iqn_qrdqn.agent import IQNAgent
def run(args):
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
# Create environments.
env = make_pytorch_env(args.env_id)
env_online = make_pytorch_env(args.env_id)
test_env = make_pytorch_env(
args.env_id, episode_life=False, clip_rewards=False)
print("self.env_online 0:", env_online)
# Specify the directory to log.
name = args.config.split('/')[-1].rstrip('.yaml')
time = datetime.now().strftime("%Y%m%d-%H%M")
log_dir = os.path.join(
'logs', args.env_id, f'{name}-seed{args.seed}-{time}')
# Create the agent and run.
agent_evaluation = IQNAgent(
env=env, test_env=test_env, log_dir=log_dir, seed=args.seed,
cuda=args.cuda, **config)
# load model
agent_evaluation.load_models(os.path.join(args.agent, "best"))
print("Model Load done.", args.env_id)
#
print("Start policy evaluation...")
agent = IQNAgent(
env=env, test_env=test_env, log_dir=log_dir, seed=args.seed,
cuda=args.cuda, agent=agent_evaluation, env_online=env_online, **config)
agent.run()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--config', type=str, default=os.path.join('config', 'iqn.yaml'))
parser.add_argument('--env_id', type=str, default='PongNoFrameskip-v4')
parser.add_argument('--cuda', type=bool, default=True)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--agent', type=str, default="result/PongNoFrameskip-v4/iqn-seed0-20211007-0313/model")
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
run(args)
|
en
| 0.725798
|
# Create environments. # Specify the directory to log. # Create the agent and run. # load model #
| 2.230283
| 2
|
run_utils/callbacks/base.py
|
shikhar-srivastava/hover_net
| 0
|
6626961
|
<reponame>shikhar-srivastava/hover_net
import operator
import json
import pickle
import cv2
import matplotlib.pyplot as plt
import numpy as np
import torch
from misc.utils import center_pad_to_shape, cropping_center
from scipy.stats import mode as major_value
from sklearn.metrics import confusion_matrix
####
class BaseCallbacks(object):
def __init__(self):
self.engine_trigger = False
def reset(self):
pass
def run(self, state, event):
pass
####
class TrackLr(BaseCallbacks):
"""
Add learning rate to tracking
"""
def __init__(self, per_n_epoch=1, per_n_step=None):
super().__init__()
self.per_n_epoch = per_n_epoch
self.per_n_step = per_n_step
def run(self, state, event):
# logging learning rate, decouple into another callback?
run_info = state.run_info
for net_name, net_info in run_info.items():
lr = net_info["optimizer"].param_groups[0]["lr"]
state.tracked_step_output["scalar"]["lr-%s" % net_name] = lr
return
####
class ScheduleLr(BaseCallbacks):
"""Trigger all scheduler."""
def __init__(self):
super().__init__()
def run(self, state, event):
# logging learning rate, decouple into another callback?
run_info = state.run_info
for net_name, net_info in run_info.items():
net_info["lr_scheduler"].step()
return
####
class TriggerEngine(BaseCallbacks):
def __init__(self, triggered_engine_name, nr_epoch=1):
self.engine_trigger = True
self.triggered_engine_name = triggered_engine_name
self.triggered_engine = None
self.nr_epoch = nr_epoch
def run(self, state, event):
self.triggered_engine.run(
chained=True, nr_epoch=self.nr_epoch, shared_state=state
)
return
####
class PeriodicSaver(BaseCallbacks):
"""Must declare save dir first in the shared global state of the attached engine."""
def __init__(self, per_n_epoch=1, per_n_step=None):
super().__init__()
self.per_n_epoch = per_n_epoch
self.per_n_step = per_n_step
def run(self, state, event):
if not state.logging:
return
# TODO: add switch so that only one of [per_n_epoch / per_n_step] can run
if state.curr_epoch % self.per_n_epoch != 0:
return
for net_name, net_info in state.run_info.items():
net_checkpoint = {}
for key, value in net_info.items():
if key != "extra_info":
net_checkpoint[key] = value.state_dict()
torch.save(
net_checkpoint,
"%s/%s_epoch=%d.tar" % (state.log_dir, net_name, state.curr_epoch),
)
return
####
class ConditionalSaver(BaseCallbacks):
"""Must declare save dir first in the shared global state of the attached engine."""
def __init__(self, metric_name, comparator=">="):
super().__init__()
self.metric_name = metric_name
self.comparator = comparator
def run(self, state, event):
if not state.logging:
return
ops = {
">": operator.gt,
"<": operator.lt,
">=": operator.ge,
"<=": operator.le,
}
op_func = ops[self.comparator]
if self.comparator == ">" or self.comparator == ">=":
best_value = -float("inf")
else:
best_value = +float("inf")
# json stat log file, update and overwrite
with open(state.log_info["json_file"]) as json_file:
json_data = json.load(json_file)
for epoch, epoch_stat in json_data.items():
epoch_value = epoch_stat[self.metric_name]
if op_func(epoch_value, best_value):
best_value = epoch_value
current_value = json_data[str(state.curr_epoch)][self.metric_name]
if not op_func(current_value, best_value):
return # simply return because not satisfy
print(
state.curr_epoch
) # TODO: better way to track which optimal epoch is saved
for net_name, net_info in state.run_info.items():
net_checkpoint = {}
for key, value in net_info.items():
if key != "extra_info":
net_checkpoint[key] = value.state_dict()
torch.save(
net_checkpoint,
"%s/%s_best=[%s].tar" % (state.log_dir, net_name, self.metric_name),
)
return
####
class AccumulateRawOutput(BaseCallbacks):
def run(self, state, event):
step_output = state.step_output["raw"]
accumulated_output = state.epoch_accumulated_output
for key, step_value in step_output.items():
if key in accumulated_output:
accumulated_output[key].extend(list(step_value))
else:
accumulated_output[key] = list(step_value)
return
####
class ScalarMovingAverage(BaseCallbacks):
"""Calculate the running average for all scalar output of
each runstep of the attached RunEngine."""
def __init__(self, alpha=0.95):
super().__init__()
self.alpha = alpha
self.tracking_dict = {}
def run(self, state, event):
# TODO: protocol for dynamic key retrieval for EMA
step_output = state.step_output["EMA"]
for key, current_value in step_output.items():
if key in self.tracking_dict:
old_ema_value = self.tracking_dict[key]
# calculate the exponential moving average
new_ema_value = (
old_ema_value * self.alpha + (1.0 - self.alpha) * current_value
)
self.tracking_dict[key] = new_ema_value
else: # init for variable which appear for the first time
new_ema_value = current_value
self.tracking_dict[key] = new_ema_value
state.tracked_step_output["scalar"] = self.tracking_dict
return
####
class ProcessAccumulatedRawOutput(BaseCallbacks):
def __init__(self, proc_func, per_n_epoch=1):
# TODO: allow dynamically attach specific procesing for `type`
super().__init__()
self.per_n_epoch = per_n_epoch
self.proc_func = proc_func
def run(self, state, event):
current_epoch = state.curr_epoch
# if current_epoch % self.per_n_epoch != 0: return
raw_data = state.epoch_accumulated_output
track_dict = self.proc_func(raw_data)
# update global shared states
state.tracked_step_output = track_dict
return
class ProcessAccumulatedRawOutput_per_image(BaseCallbacks):
def __init__(self, proc_func,_pickle=True):
super().__init__()
self.proc_func = proc_func
self._pickle = _pickle
def run(self, state, event):
raw_data = state.epoch_accumulated_output
per_image_stat = self.proc_func(raw_data)
state.per_image_stat = per_image_stat
if self._pickle:
with open(state.log_info["per_image_stat_file"], "wb") as f:
pickle.dump(per_image_stat, f)
return
####
class VisualizeOutput(BaseCallbacks):
def __init__(self, proc_func, per_n_epoch=1):
super().__init__()
# TODO: option to dump viz per epoch or per n step
self.per_n_epoch = per_n_epoch
self.proc_func = proc_func
def run(self, state, event):
current_epoch = state.curr_epoch
raw_output = state.step_output["raw"]
viz_image = self.proc_func(raw_output)
state.tracked_step_output["image"]["output"] = viz_image
return
|
import operator
import json
import pickle
import cv2
import matplotlib.pyplot as plt
import numpy as np
import torch
from misc.utils import center_pad_to_shape, cropping_center
from scipy.stats import mode as major_value
from sklearn.metrics import confusion_matrix
####
class BaseCallbacks(object):
def __init__(self):
self.engine_trigger = False
def reset(self):
pass
def run(self, state, event):
pass
####
class TrackLr(BaseCallbacks):
"""
Add learning rate to tracking
"""
def __init__(self, per_n_epoch=1, per_n_step=None):
super().__init__()
self.per_n_epoch = per_n_epoch
self.per_n_step = per_n_step
def run(self, state, event):
# logging learning rate, decouple into another callback?
run_info = state.run_info
for net_name, net_info in run_info.items():
lr = net_info["optimizer"].param_groups[0]["lr"]
state.tracked_step_output["scalar"]["lr-%s" % net_name] = lr
return
####
class ScheduleLr(BaseCallbacks):
"""Trigger all scheduler."""
def __init__(self):
super().__init__()
def run(self, state, event):
# logging learning rate, decouple into another callback?
run_info = state.run_info
for net_name, net_info in run_info.items():
net_info["lr_scheduler"].step()
return
####
class TriggerEngine(BaseCallbacks):
def __init__(self, triggered_engine_name, nr_epoch=1):
self.engine_trigger = True
self.triggered_engine_name = triggered_engine_name
self.triggered_engine = None
self.nr_epoch = nr_epoch
def run(self, state, event):
self.triggered_engine.run(
chained=True, nr_epoch=self.nr_epoch, shared_state=state
)
return
####
class PeriodicSaver(BaseCallbacks):
"""Must declare save dir first in the shared global state of the attached engine."""
def __init__(self, per_n_epoch=1, per_n_step=None):
super().__init__()
self.per_n_epoch = per_n_epoch
self.per_n_step = per_n_step
def run(self, state, event):
if not state.logging:
return
# TODO: add switch so that only one of [per_n_epoch / per_n_step] can run
if state.curr_epoch % self.per_n_epoch != 0:
return
for net_name, net_info in state.run_info.items():
net_checkpoint = {}
for key, value in net_info.items():
if key != "extra_info":
net_checkpoint[key] = value.state_dict()
torch.save(
net_checkpoint,
"%s/%s_epoch=%d.tar" % (state.log_dir, net_name, state.curr_epoch),
)
return
####
class ConditionalSaver(BaseCallbacks):
"""Must declare save dir first in the shared global state of the attached engine."""
def __init__(self, metric_name, comparator=">="):
super().__init__()
self.metric_name = metric_name
self.comparator = comparator
def run(self, state, event):
if not state.logging:
return
ops = {
">": operator.gt,
"<": operator.lt,
">=": operator.ge,
"<=": operator.le,
}
op_func = ops[self.comparator]
if self.comparator == ">" or self.comparator == ">=":
best_value = -float("inf")
else:
best_value = +float("inf")
# json stat log file, update and overwrite
with open(state.log_info["json_file"]) as json_file:
json_data = json.load(json_file)
for epoch, epoch_stat in json_data.items():
epoch_value = epoch_stat[self.metric_name]
if op_func(epoch_value, best_value):
best_value = epoch_value
current_value = json_data[str(state.curr_epoch)][self.metric_name]
if not op_func(current_value, best_value):
return # simply return because not satisfy
print(
state.curr_epoch
) # TODO: better way to track which optimal epoch is saved
for net_name, net_info in state.run_info.items():
net_checkpoint = {}
for key, value in net_info.items():
if key != "extra_info":
net_checkpoint[key] = value.state_dict()
torch.save(
net_checkpoint,
"%s/%s_best=[%s].tar" % (state.log_dir, net_name, self.metric_name),
)
return
####
class AccumulateRawOutput(BaseCallbacks):
def run(self, state, event):
step_output = state.step_output["raw"]
accumulated_output = state.epoch_accumulated_output
for key, step_value in step_output.items():
if key in accumulated_output:
accumulated_output[key].extend(list(step_value))
else:
accumulated_output[key] = list(step_value)
return
####
class ScalarMovingAverage(BaseCallbacks):
"""Calculate the running average for all scalar output of
each runstep of the attached RunEngine."""
def __init__(self, alpha=0.95):
super().__init__()
self.alpha = alpha
self.tracking_dict = {}
def run(self, state, event):
# TODO: protocol for dynamic key retrieval for EMA
step_output = state.step_output["EMA"]
for key, current_value in step_output.items():
if key in self.tracking_dict:
old_ema_value = self.tracking_dict[key]
# calculate the exponential moving average
new_ema_value = (
old_ema_value * self.alpha + (1.0 - self.alpha) * current_value
)
self.tracking_dict[key] = new_ema_value
else: # init for variable which appear for the first time
new_ema_value = current_value
self.tracking_dict[key] = new_ema_value
state.tracked_step_output["scalar"] = self.tracking_dict
return
####
class ProcessAccumulatedRawOutput(BaseCallbacks):
def __init__(self, proc_func, per_n_epoch=1):
# TODO: allow dynamically attach specific procesing for `type`
super().__init__()
self.per_n_epoch = per_n_epoch
self.proc_func = proc_func
def run(self, state, event):
current_epoch = state.curr_epoch
# if current_epoch % self.per_n_epoch != 0: return
raw_data = state.epoch_accumulated_output
track_dict = self.proc_func(raw_data)
# update global shared states
state.tracked_step_output = track_dict
return
class ProcessAccumulatedRawOutput_per_image(BaseCallbacks):
def __init__(self, proc_func,_pickle=True):
super().__init__()
self.proc_func = proc_func
self._pickle = _pickle
def run(self, state, event):
raw_data = state.epoch_accumulated_output
per_image_stat = self.proc_func(raw_data)
state.per_image_stat = per_image_stat
if self._pickle:
with open(state.log_info["per_image_stat_file"], "wb") as f:
pickle.dump(per_image_stat, f)
return
####
class VisualizeOutput(BaseCallbacks):
def __init__(self, proc_func, per_n_epoch=1):
super().__init__()
# TODO: option to dump viz per epoch or per n step
self.per_n_epoch = per_n_epoch
self.proc_func = proc_func
def run(self, state, event):
current_epoch = state.curr_epoch
raw_output = state.step_output["raw"]
viz_image = self.proc_func(raw_output)
state.tracked_step_output["image"]["output"] = viz_image
return
|
en
| 0.711024
|
#### #### Add learning rate to tracking # logging learning rate, decouple into another callback? #### Trigger all scheduler. # logging learning rate, decouple into another callback? #### #### Must declare save dir first in the shared global state of the attached engine. # TODO: add switch so that only one of [per_n_epoch / per_n_step] can run #### Must declare save dir first in the shared global state of the attached engine. # json stat log file, update and overwrite # simply return because not satisfy # TODO: better way to track which optimal epoch is saved #### #### Calculate the running average for all scalar output of each runstep of the attached RunEngine. # TODO: protocol for dynamic key retrieval for EMA # calculate the exponential moving average # init for variable which appear for the first time #### # TODO: allow dynamically attach specific procesing for `type` # if current_epoch % self.per_n_epoch != 0: return # update global shared states #### # TODO: option to dump viz per epoch or per n step
| 2.252367
| 2
|
client/controller/autenticate.py
|
jurandirjdsilva/pycryptomail
| 1
|
6626962
|
<gh_stars>1-10
from client.data.message import Message
class Authentication:
def __init__(self):
self.user_token = None
self.user_name = None
def request_authentication(self, email, password):
msg = Message.msg_autentication(email, password)
def connect(self):
pass
|
from client.data.message import Message
class Authentication:
def __init__(self):
self.user_token = None
self.user_name = None
def request_authentication(self, email, password):
msg = Message.msg_autentication(email, password)
def connect(self):
pass
|
none
| 1
| 2.571547
| 3
|
|
tests/test_iland.py
|
jefftp/python-sdk
| 5
|
6626963
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import time
import unittest
import iland
import requests_mock
BASE_URL = 'http://example.com/ecs'
VALID_TOKEN_PAYLOAD = {'expires_in': 12,
'refresh_expires_in': 17,
'access_token': '<PASSWORD>',
'refresh_token': '<PASSWORD>'}
VALID_REFRESH_TOKEN_PAYLOAD = {'expires_in': 12,
'refresh_expires_in': 17,
'access_token': '<PASSWORD>',
'refresh_token': '<PASSWORD>'}
class TestIland(unittest.TestCase):
session = None
adapter = None
def setUp(self):
self.api = iland.Api(client_id='fake',
client_secret='fake',
username='fake',
password='<PASSWORD>')
self.api._base_url = BASE_URL
self.api._access_token_url = iland.ACCESS_URL
def test_login_ok_200(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
self.api.login()
self.assertEqual(VALID_TOKEN_PAYLOAD, self.api.get_access_token())
def test_login_ok_201(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=201)
self.api.login()
self.assertEqual(VALID_TOKEN_PAYLOAD, self.api.get_access_token())
def test_login_ok_202(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=202)
self.api.login()
self.assertEqual(VALID_TOKEN_PAYLOAD, self.api.get_access_token())
def test_login_ko_500(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps({'error': 'an error occured'}),
status_code=500)
with self.assertRaises(iland.UnauthorizedException):
self.api.login()
def test_login_ko_400(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps({'error': 'an error occured'}),
status_code=400)
with self.assertRaises(iland.UnauthorizedException):
self.api.login()
def test_refresh_token_ok(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
m.post(iland.REFRESH_URL,
text=json.dumps(VALID_REFRESH_TOKEN_PAYLOAD),
status_code=200)
self.api.login()
self.assertEqual(VALID_TOKEN_PAYLOAD, self.api.get_access_token())
# manually refresh token
self.api.refresh_access_token()
# still the same since not expired therefore not renewed
self.assertEqual(VALID_TOKEN_PAYLOAD, self.api.get_access_token())
# let's wait for expiration
time.sleep(5)
self.api.refresh_access_token()
self.assertEqual(VALID_REFRESH_TOKEN_PAYLOAD,
self.api.get_access_token())
# manually remove the actual token so that we refetch an access
# token
self.api._token = None
self.api.refresh_access_token()
self.assertEqual(VALID_TOKEN_PAYLOAD,
self.api.get_access_token())
def test_refresh_token_ko_400(self):
with requests_mock.mock() as m:
login_spy = m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
refresh_spy = m.post(iland.REFRESH_URL,
text=json.dumps(VALID_REFRESH_TOKEN_PAYLOAD),
status_code=400)
self.api.login()
self.assertEqual(VALID_TOKEN_PAYLOAD, self.api.get_access_token())
# wait for access token expiration. since the refresh endpoint
# returns a 400, we expect that a new login will be initiated
# because the existing session can no longer be refreshed
time.sleep(5)
self.assertEqual(VALID_REFRESH_TOKEN_PAYLOAD,
self.api.refresh_access_token())
self.assertEqual(1, refresh_spy.call_count)
self.assertEqual(2, login_spy.call_count)
def test_refresh_token_ko_500(self):
with requests_mock.mock() as m:
login_spy = m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
refresh_spy = m.post(iland.REFRESH_URL,
text=json.dumps(VALID_REFRESH_TOKEN_PAYLOAD),
status_code=500)
self.api.login()
self.assertEqual(VALID_TOKEN_PAYLOAD, self.api.get_access_token())
# wait for access token expiration. since the refresh endpoint
# returns a 400, we expect that a new login will be initiated
# because the existing session can no longer be refreshed
time.sleep(5)
self.assertEqual(VALID_REFRESH_TOKEN_PAYLOAD,
self.api.refresh_access_token())
self.assertEqual(1, refresh_spy.call_count)
self.assertEqual(2, login_spy.call_count)
def test_refresh_token_expired(self):
with requests_mock.mock() as m:
login_spy = m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
self.api.login()
self.assertEqual(VALID_TOKEN_PAYLOAD, self.api.get_access_token())
# wait for refresh token expiration. since the refresh token
# expired, we expect that a new login will be initiated because
# the existing session can no longer be refreshed
time.sleep(8)
self.assertEqual(VALID_TOKEN_PAYLOAD,
self.api.refresh_access_token())
self.assertEqual(2, login_spy.call_count)
def test_get_ok_200(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
req = self.api.get(rpath)
self.assertEquals(user_data, req)
def test_get_ok_201(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(BASE_URL + rpath, text=json.dumps(user_data),
status_code=201)
req = self.api.get(rpath)
self.assertEquals(user_data, req)
def test_get_ok_202(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(BASE_URL + rpath, text=json.dumps(user_data),
status_code=202)
req = self.api.get(rpath)
self.assertEquals(user_data, req)
def test_get_ok_204(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(BASE_URL + rpath, text=json.dumps(user_data),
status_code=204)
req = self.api.get(rpath)
self.assertEquals(user_data, req)
def test_get_ko_400(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(BASE_URL + rpath, text=json.dumps(user_data),
status_code=400)
with self.assertRaises(iland.ApiException):
self.api.get(rpath)
def test_get_ko_500(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(BASE_URL + rpath, text=json.dumps(user_data),
status_code=500)
with self.assertRaises(iland.ApiException):
self.api.get(rpath)
def test_post_ok(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.post(BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
req = self.api.post(rpath, form_data={'a': 'b'})
self.assertEquals(user_data, req)
def test_post_ok_no_formdata(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.post(BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
req = self.api.post(rpath)
self.assertEquals(user_data, req)
def test_put_ok(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.put(BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
req = self.api.put(rpath, form_data={'a': 'b'})
self.assertEquals(user_data, req)
def test_put_ok_no_formdata(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.put(BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
req = self.api.put(rpath)
self.assertEquals(user_data, req)
def test_delete_ok(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.delete(BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
req = self.api.delete(rpath)
self.assertEquals(user_data, req)
def test_unknown_verb_internal(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.delete(BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
with self.assertRaises(iland.ApiException):
self.api._do_request(rpath, verb='ACK')
def test_with_default_base_url(self):
self.api = iland.Api(client_id='fake',
client_secret='fake',
username='fake',
password='<PASSWORD>')
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(iland.BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
req = self.api.get(rpath)
self.assertEquals(user_data, req)
def test_with_proxies_set(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
self.api._proxies = {'https': 'https://10.10.10.10:3128'}
req = self.api.get(rpath)
self.assertEquals(user_data, req)
def test_get_with_extra_header(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(BASE_URL + rpath, text=json.dumps(user_data),
request_headers={'Host': 'api.ilandcloud.com'},
status_code=200)
req = self.api.get(rpath, headers={'Host': 'api.ilandcloud.com'})
self.assertEquals(user_data, req)
def test_get_with_extra_disallowed_header(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
# Set Accept to text/csv but it's ignored by api, so we get json
req = self.api.get(rpath, headers={'Accept': 'text/csv'})
self.assertEquals(user_data, req)
def test_get_with_timeout(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
req = self.api.get(rpath, timeout=5.0)
self.assertEquals(user_data, req)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import time
import unittest
import iland
import requests_mock
BASE_URL = 'http://example.com/ecs'
VALID_TOKEN_PAYLOAD = {'expires_in': 12,
'refresh_expires_in': 17,
'access_token': '<PASSWORD>',
'refresh_token': '<PASSWORD>'}
VALID_REFRESH_TOKEN_PAYLOAD = {'expires_in': 12,
'refresh_expires_in': 17,
'access_token': '<PASSWORD>',
'refresh_token': '<PASSWORD>'}
class TestIland(unittest.TestCase):
session = None
adapter = None
def setUp(self):
self.api = iland.Api(client_id='fake',
client_secret='fake',
username='fake',
password='<PASSWORD>')
self.api._base_url = BASE_URL
self.api._access_token_url = iland.ACCESS_URL
def test_login_ok_200(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
self.api.login()
self.assertEqual(VALID_TOKEN_PAYLOAD, self.api.get_access_token())
def test_login_ok_201(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=201)
self.api.login()
self.assertEqual(VALID_TOKEN_PAYLOAD, self.api.get_access_token())
def test_login_ok_202(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=202)
self.api.login()
self.assertEqual(VALID_TOKEN_PAYLOAD, self.api.get_access_token())
def test_login_ko_500(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps({'error': 'an error occured'}),
status_code=500)
with self.assertRaises(iland.UnauthorizedException):
self.api.login()
def test_login_ko_400(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps({'error': 'an error occured'}),
status_code=400)
with self.assertRaises(iland.UnauthorizedException):
self.api.login()
def test_refresh_token_ok(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
m.post(iland.REFRESH_URL,
text=json.dumps(VALID_REFRESH_TOKEN_PAYLOAD),
status_code=200)
self.api.login()
self.assertEqual(VALID_TOKEN_PAYLOAD, self.api.get_access_token())
# manually refresh token
self.api.refresh_access_token()
# still the same since not expired therefore not renewed
self.assertEqual(VALID_TOKEN_PAYLOAD, self.api.get_access_token())
# let's wait for expiration
time.sleep(5)
self.api.refresh_access_token()
self.assertEqual(VALID_REFRESH_TOKEN_PAYLOAD,
self.api.get_access_token())
# manually remove the actual token so that we refetch an access
# token
self.api._token = None
self.api.refresh_access_token()
self.assertEqual(VALID_TOKEN_PAYLOAD,
self.api.get_access_token())
def test_refresh_token_ko_400(self):
with requests_mock.mock() as m:
login_spy = m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
refresh_spy = m.post(iland.REFRESH_URL,
text=json.dumps(VALID_REFRESH_TOKEN_PAYLOAD),
status_code=400)
self.api.login()
self.assertEqual(VALID_TOKEN_PAYLOAD, self.api.get_access_token())
# wait for access token expiration. since the refresh endpoint
# returns a 400, we expect that a new login will be initiated
# because the existing session can no longer be refreshed
time.sleep(5)
self.assertEqual(VALID_REFRESH_TOKEN_PAYLOAD,
self.api.refresh_access_token())
self.assertEqual(1, refresh_spy.call_count)
self.assertEqual(2, login_spy.call_count)
def test_refresh_token_ko_500(self):
with requests_mock.mock() as m:
login_spy = m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
refresh_spy = m.post(iland.REFRESH_URL,
text=json.dumps(VALID_REFRESH_TOKEN_PAYLOAD),
status_code=500)
self.api.login()
self.assertEqual(VALID_TOKEN_PAYLOAD, self.api.get_access_token())
# wait for access token expiration. since the refresh endpoint
# returns a 400, we expect that a new login will be initiated
# because the existing session can no longer be refreshed
time.sleep(5)
self.assertEqual(VALID_REFRESH_TOKEN_PAYLOAD,
self.api.refresh_access_token())
self.assertEqual(1, refresh_spy.call_count)
self.assertEqual(2, login_spy.call_count)
def test_refresh_token_expired(self):
with requests_mock.mock() as m:
login_spy = m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
self.api.login()
self.assertEqual(VALID_TOKEN_PAYLOAD, self.api.get_access_token())
# wait for refresh token expiration. since the refresh token
# expired, we expect that a new login will be initiated because
# the existing session can no longer be refreshed
time.sleep(8)
self.assertEqual(VALID_TOKEN_PAYLOAD,
self.api.refresh_access_token())
self.assertEqual(2, login_spy.call_count)
def test_get_ok_200(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
req = self.api.get(rpath)
self.assertEquals(user_data, req)
def test_get_ok_201(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(BASE_URL + rpath, text=json.dumps(user_data),
status_code=201)
req = self.api.get(rpath)
self.assertEquals(user_data, req)
def test_get_ok_202(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(BASE_URL + rpath, text=json.dumps(user_data),
status_code=202)
req = self.api.get(rpath)
self.assertEquals(user_data, req)
def test_get_ok_204(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(BASE_URL + rpath, text=json.dumps(user_data),
status_code=204)
req = self.api.get(rpath)
self.assertEquals(user_data, req)
def test_get_ko_400(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(BASE_URL + rpath, text=json.dumps(user_data),
status_code=400)
with self.assertRaises(iland.ApiException):
self.api.get(rpath)
def test_get_ko_500(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(BASE_URL + rpath, text=json.dumps(user_data),
status_code=500)
with self.assertRaises(iland.ApiException):
self.api.get(rpath)
def test_post_ok(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.post(BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
req = self.api.post(rpath, form_data={'a': 'b'})
self.assertEquals(user_data, req)
def test_post_ok_no_formdata(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.post(BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
req = self.api.post(rpath)
self.assertEquals(user_data, req)
def test_put_ok(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.put(BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
req = self.api.put(rpath, form_data={'a': 'b'})
self.assertEquals(user_data, req)
def test_put_ok_no_formdata(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.put(BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
req = self.api.put(rpath)
self.assertEquals(user_data, req)
def test_delete_ok(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.delete(BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
req = self.api.delete(rpath)
self.assertEquals(user_data, req)
def test_unknown_verb_internal(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.delete(BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
with self.assertRaises(iland.ApiException):
self.api._do_request(rpath, verb='ACK')
def test_with_default_base_url(self):
self.api = iland.Api(client_id='fake',
client_secret='fake',
username='fake',
password='<PASSWORD>')
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(iland.BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
req = self.api.get(rpath)
self.assertEquals(user_data, req)
def test_with_proxies_set(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
self.api._proxies = {'https': 'https://10.10.10.10:3128'}
req = self.api.get(rpath)
self.assertEquals(user_data, req)
def test_get_with_extra_header(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(BASE_URL + rpath, text=json.dumps(user_data),
request_headers={'Host': 'api.ilandcloud.com'},
status_code=200)
req = self.api.get(rpath, headers={'Host': 'api.ilandcloud.com'})
self.assertEquals(user_data, req)
def test_get_with_extra_disallowed_header(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
# Set Accept to text/csv but it's ignored by api, so we get json
req = self.api.get(rpath, headers={'Accept': 'text/csv'})
self.assertEquals(user_data, req)
def test_get_with_timeout(self):
with requests_mock.mock() as m:
m.post(iland.ACCESS_URL,
text=json.dumps(VALID_TOKEN_PAYLOAD),
status_code=200)
rpath = '/user/jchirac'
user_data = {'username': 'jchirac'}
m.get(BASE_URL + rpath, text=json.dumps(user_data),
status_code=200)
req = self.api.get(rpath, timeout=5.0)
self.assertEquals(user_data, req)
|
en
| 0.825432
|
#!/usr/bin/env python # -*- coding: utf-8 -*- # manually refresh token # still the same since not expired therefore not renewed # let's wait for expiration # manually remove the actual token so that we refetch an access # token # wait for access token expiration. since the refresh endpoint # returns a 400, we expect that a new login will be initiated # because the existing session can no longer be refreshed # wait for access token expiration. since the refresh endpoint # returns a 400, we expect that a new login will be initiated # because the existing session can no longer be refreshed # wait for refresh token expiration. since the refresh token # expired, we expect that a new login will be initiated because # the existing session can no longer be refreshed # Set Accept to text/csv but it's ignored by api, so we get json
| 2.69646
| 3
|
tests/helpers/test_entity_component.py
|
billyburly/home-assistant
| 5
|
6626964
|
"""The tests for the Entity component helper."""
# pylint: disable=protected-access
from collections import OrderedDict
from datetime import timedelta
import logging
from unittest.mock import Mock, patch
import asynctest
import pytest
from homeassistant.const import ENTITY_MATCH_ALL
import homeassistant.core as ha
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers import discovery
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import (
MockConfigEntry,
MockEntity,
MockModule,
MockPlatform,
async_fire_time_changed,
mock_coro,
mock_entity_platform,
mock_integration,
)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "test_domain"
async def test_setup_loads_platforms(hass):
"""Test the loading of the platforms."""
component_setup = Mock(return_value=True)
platform_setup = Mock(return_value=None)
mock_integration(hass, MockModule("test_component", setup=component_setup))
# mock the dependencies
mock_integration(hass, MockModule("mod2", dependencies=["test_component"]))
mock_entity_platform(hass, "test_domain.mod2", MockPlatform(platform_setup))
component = EntityComponent(_LOGGER, DOMAIN, hass)
assert not component_setup.called
assert not platform_setup.called
component.setup({DOMAIN: {"platform": "mod2"}})
await hass.async_block_till_done()
assert component_setup.called
assert platform_setup.called
async def test_setup_recovers_when_setup_raises(hass):
"""Test the setup if exceptions are happening."""
platform1_setup = Mock(side_effect=Exception("Broken"))
platform2_setup = Mock(return_value=None)
mock_entity_platform(hass, "test_domain.mod1", MockPlatform(platform1_setup))
mock_entity_platform(hass, "test_domain.mod2", MockPlatform(platform2_setup))
component = EntityComponent(_LOGGER, DOMAIN, hass)
assert not platform1_setup.called
assert not platform2_setup.called
component.setup(
OrderedDict(
[
(DOMAIN, {"platform": "mod1"}),
(f"{DOMAIN} 2", {"platform": "non_exist"}),
(f"{DOMAIN} 3", {"platform": "mod2"}),
]
)
)
await hass.async_block_till_done()
assert platform1_setup.called
assert platform2_setup.called
@asynctest.patch(
"homeassistant.helpers.entity_component.EntityComponent.async_setup_platform",
return_value=mock_coro(),
)
@asynctest.patch(
"homeassistant.setup.async_setup_component", return_value=mock_coro(True)
)
async def test_setup_does_discovery(mock_setup_component, mock_setup, hass):
"""Test setup for discovery."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
component.setup({})
discovery.load_platform(
hass, DOMAIN, "platform_test", {"msg": "discovery_info"}, {DOMAIN: {}}
)
await hass.async_block_till_done()
assert mock_setup.called
assert ("platform_test", {}, {"msg": "discovery_info"}) == mock_setup.call_args[0]
@asynctest.patch("homeassistant.helpers.entity_platform.async_track_time_interval")
async def test_set_scan_interval_via_config(mock_track, hass):
"""Test the setting of the scan interval via configuration."""
def platform_setup(hass, config, add_entities, discovery_info=None):
"""Test the platform setup."""
add_entities([MockEntity(should_poll=True)])
mock_entity_platform(hass, "test_domain.platform", MockPlatform(platform_setup))
component = EntityComponent(_LOGGER, DOMAIN, hass)
component.setup(
{DOMAIN: {"platform": "platform", "scan_interval": timedelta(seconds=30)}}
)
await hass.async_block_till_done()
assert mock_track.called
assert timedelta(seconds=30) == mock_track.call_args[0][2]
async def test_set_entity_namespace_via_config(hass):
"""Test setting an entity namespace."""
def platform_setup(hass, config, add_entities, discovery_info=None):
"""Test the platform setup."""
add_entities([MockEntity(name="beer"), MockEntity(name=None)])
platform = MockPlatform(platform_setup)
mock_entity_platform(hass, "test_domain.platform", platform)
component = EntityComponent(_LOGGER, DOMAIN, hass)
component.setup({DOMAIN: {"platform": "platform", "entity_namespace": "yummy"}})
await hass.async_block_till_done()
assert sorted(hass.states.async_entity_ids()) == [
"test_domain.yummy_beer",
"test_domain.yummy_unnamed_device",
]
async def test_extract_from_service_available_device(hass):
"""Test the extraction of entity from service and device is available."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities(
[
MockEntity(name="test_1"),
MockEntity(name="test_2", available=False),
MockEntity(name="test_3"),
MockEntity(name="test_4", available=False),
]
)
call_1 = ha.ServiceCall("test", "service", data={"entity_id": ENTITY_MATCH_ALL})
assert ["test_domain.test_1", "test_domain.test_3"] == sorted(
ent.entity_id for ent in (await component.async_extract_from_service(call_1))
)
call_2 = ha.ServiceCall(
"test",
"service",
data={"entity_id": ["test_domain.test_3", "test_domain.test_4"]},
)
assert ["test_domain.test_3"] == sorted(
ent.entity_id for ent in (await component.async_extract_from_service(call_2))
)
async def test_platform_not_ready(hass):
"""Test that we retry when platform not ready."""
platform1_setup = Mock(side_effect=[PlatformNotReady, PlatformNotReady, None])
mock_integration(hass, MockModule("mod1"))
mock_entity_platform(hass, "test_domain.mod1", MockPlatform(platform1_setup))
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_setup({DOMAIN: {"platform": "mod1"}})
assert len(platform1_setup.mock_calls) == 1
assert "test_domain.mod1" not in hass.config.components
utcnow = dt_util.utcnow()
with patch("homeassistant.util.dt.utcnow", return_value=utcnow):
# Should not trigger attempt 2
async_fire_time_changed(hass, utcnow + timedelta(seconds=29))
await hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 1
# Should trigger attempt 2
async_fire_time_changed(hass, utcnow + timedelta(seconds=30))
await hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 2
assert "test_domain.mod1" not in hass.config.components
# This should not trigger attempt 3
async_fire_time_changed(hass, utcnow + timedelta(seconds=59))
await hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 2
# Trigger attempt 3, which succeeds
async_fire_time_changed(hass, utcnow + timedelta(seconds=60))
await hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 3
assert "test_domain.mod1" in hass.config.components
async def test_extract_from_service_fails_if_no_entity_id(hass):
"""Test the extraction of everything from service."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities(
[MockEntity(name="test_1"), MockEntity(name="test_2")]
)
call = ha.ServiceCall("test", "service")
assert [] == sorted(
ent.entity_id for ent in (await component.async_extract_from_service(call))
)
async def test_extract_from_service_filter_out_non_existing_entities(hass):
"""Test the extraction of non existing entities from service."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities(
[MockEntity(name="test_1"), MockEntity(name="test_2")]
)
call = ha.ServiceCall(
"test",
"service",
{"entity_id": ["test_domain.test_2", "test_domain.non_exist"]},
)
assert ["test_domain.test_2"] == [
ent.entity_id for ent in await component.async_extract_from_service(call)
]
async def test_extract_from_service_no_group_expand(hass):
"""Test not expanding a group."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities([MockEntity(entity_id="group.test_group")])
call = ha.ServiceCall("test", "service", {"entity_id": ["group.test_group"]})
extracted = await component.async_extract_from_service(call, expand_group=False)
assert len(extracted) == 1
assert extracted[0].entity_id == "group.test_group"
async def test_setup_dependencies_platform(hass):
"""Test we setup the dependencies of a platform.
We're explictely testing that we process dependencies even if a component
with the same name has already been loaded.
"""
mock_integration(
hass, MockModule("test_component", dependencies=["test_component2"])
)
mock_integration(hass, MockModule("test_component2"))
mock_entity_platform(hass, "test_domain.test_component", MockPlatform())
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_setup({DOMAIN: {"platform": "test_component"}})
assert "test_component" in hass.config.components
assert "test_component2" in hass.config.components
assert "test_domain.test_component" in hass.config.components
async def test_setup_entry(hass):
"""Test setup entry calls async_setup_entry on platform."""
mock_setup_entry = Mock(return_value=mock_coro(True))
mock_entity_platform(
hass,
"test_domain.entry_domain",
MockPlatform(
async_setup_entry=mock_setup_entry, scan_interval=timedelta(seconds=5)
),
)
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain="entry_domain")
assert await component.async_setup_entry(entry)
assert len(mock_setup_entry.mock_calls) == 1
p_hass, p_entry, _ = mock_setup_entry.mock_calls[0][1]
assert p_hass is hass
assert p_entry is entry
assert component._platforms[entry.entry_id].scan_interval == timedelta(seconds=5)
async def test_setup_entry_platform_not_exist(hass):
"""Test setup entry fails if platform doesnt exist."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain="non_existing")
assert (await component.async_setup_entry(entry)) is False
async def test_setup_entry_fails_duplicate(hass):
"""Test we don't allow setting up a config entry twice."""
mock_setup_entry = Mock(return_value=mock_coro(True))
mock_entity_platform(
hass,
"test_domain.entry_domain",
MockPlatform(async_setup_entry=mock_setup_entry),
)
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain="entry_domain")
assert await component.async_setup_entry(entry)
with pytest.raises(ValueError):
await component.async_setup_entry(entry)
async def test_unload_entry_resets_platform(hass):
"""Test unloading an entry removes all entities."""
mock_setup_entry = Mock(return_value=mock_coro(True))
mock_entity_platform(
hass,
"test_domain.entry_domain",
MockPlatform(async_setup_entry=mock_setup_entry),
)
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain="entry_domain")
assert await component.async_setup_entry(entry)
assert len(mock_setup_entry.mock_calls) == 1
add_entities = mock_setup_entry.mock_calls[0][1][2]
add_entities([MockEntity()])
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids()) == 1
assert await component.async_unload_entry(entry)
assert len(hass.states.async_entity_ids()) == 0
async def test_unload_entry_fails_if_never_loaded(hass):
"""."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain="entry_domain")
with pytest.raises(ValueError):
await component.async_unload_entry(entry)
async def test_update_entity(hass):
"""Test that we can update an entity with the helper."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entity = MockEntity()
entity.async_update_ha_state = Mock(return_value=mock_coro())
await component.async_add_entities([entity])
# Called as part of async_add_entities
assert len(entity.async_update_ha_state.mock_calls) == 1
await hass.helpers.entity_component.async_update_entity(entity.entity_id)
assert len(entity.async_update_ha_state.mock_calls) == 2
assert entity.async_update_ha_state.mock_calls[-1][1][0] is True
async def test_set_service_race(hass):
"""Test race condition on setting service."""
exception = False
def async_loop_exception_handler(_, _2) -> None:
"""Handle all exception inside the core loop."""
nonlocal exception
exception = True
hass.loop.set_exception_handler(async_loop_exception_handler)
await async_setup_component(hass, "group", {})
component = EntityComponent(_LOGGER, DOMAIN, hass)
for _ in range(2):
hass.async_create_task(component.async_add_entities([MockEntity()]))
await hass.async_block_till_done()
assert not exception
async def test_extract_all_omit_entity_id(hass, caplog):
"""Test extract all with None and *."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities(
[MockEntity(name="test_1"), MockEntity(name="test_2")]
)
call = ha.ServiceCall("test", "service")
assert [] == sorted(
ent.entity_id for ent in await component.async_extract_from_service(call)
)
async def test_extract_all_use_match_all(hass, caplog):
"""Test extract all with None and *."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities(
[MockEntity(name="test_1"), MockEntity(name="test_2")]
)
call = ha.ServiceCall("test", "service", {"entity_id": "all"})
assert ["test_domain.test_1", "test_domain.test_2"] == sorted(
ent.entity_id for ent in await component.async_extract_from_service(call)
)
assert (
"Not passing an entity ID to a service to target all entities is deprecated"
) not in caplog.text
|
"""The tests for the Entity component helper."""
# pylint: disable=protected-access
from collections import OrderedDict
from datetime import timedelta
import logging
from unittest.mock import Mock, patch
import asynctest
import pytest
from homeassistant.const import ENTITY_MATCH_ALL
import homeassistant.core as ha
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers import discovery
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import (
MockConfigEntry,
MockEntity,
MockModule,
MockPlatform,
async_fire_time_changed,
mock_coro,
mock_entity_platform,
mock_integration,
)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "test_domain"
async def test_setup_loads_platforms(hass):
"""Test the loading of the platforms."""
component_setup = Mock(return_value=True)
platform_setup = Mock(return_value=None)
mock_integration(hass, MockModule("test_component", setup=component_setup))
# mock the dependencies
mock_integration(hass, MockModule("mod2", dependencies=["test_component"]))
mock_entity_platform(hass, "test_domain.mod2", MockPlatform(platform_setup))
component = EntityComponent(_LOGGER, DOMAIN, hass)
assert not component_setup.called
assert not platform_setup.called
component.setup({DOMAIN: {"platform": "mod2"}})
await hass.async_block_till_done()
assert component_setup.called
assert platform_setup.called
async def test_setup_recovers_when_setup_raises(hass):
"""Test the setup if exceptions are happening."""
platform1_setup = Mock(side_effect=Exception("Broken"))
platform2_setup = Mock(return_value=None)
mock_entity_platform(hass, "test_domain.mod1", MockPlatform(platform1_setup))
mock_entity_platform(hass, "test_domain.mod2", MockPlatform(platform2_setup))
component = EntityComponent(_LOGGER, DOMAIN, hass)
assert not platform1_setup.called
assert not platform2_setup.called
component.setup(
OrderedDict(
[
(DOMAIN, {"platform": "mod1"}),
(f"{DOMAIN} 2", {"platform": "non_exist"}),
(f"{DOMAIN} 3", {"platform": "mod2"}),
]
)
)
await hass.async_block_till_done()
assert platform1_setup.called
assert platform2_setup.called
@asynctest.patch(
"homeassistant.helpers.entity_component.EntityComponent.async_setup_platform",
return_value=mock_coro(),
)
@asynctest.patch(
"homeassistant.setup.async_setup_component", return_value=mock_coro(True)
)
async def test_setup_does_discovery(mock_setup_component, mock_setup, hass):
"""Test setup for discovery."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
component.setup({})
discovery.load_platform(
hass, DOMAIN, "platform_test", {"msg": "discovery_info"}, {DOMAIN: {}}
)
await hass.async_block_till_done()
assert mock_setup.called
assert ("platform_test", {}, {"msg": "discovery_info"}) == mock_setup.call_args[0]
@asynctest.patch("homeassistant.helpers.entity_platform.async_track_time_interval")
async def test_set_scan_interval_via_config(mock_track, hass):
"""Test the setting of the scan interval via configuration."""
def platform_setup(hass, config, add_entities, discovery_info=None):
"""Test the platform setup."""
add_entities([MockEntity(should_poll=True)])
mock_entity_platform(hass, "test_domain.platform", MockPlatform(platform_setup))
component = EntityComponent(_LOGGER, DOMAIN, hass)
component.setup(
{DOMAIN: {"platform": "platform", "scan_interval": timedelta(seconds=30)}}
)
await hass.async_block_till_done()
assert mock_track.called
assert timedelta(seconds=30) == mock_track.call_args[0][2]
async def test_set_entity_namespace_via_config(hass):
"""Test setting an entity namespace."""
def platform_setup(hass, config, add_entities, discovery_info=None):
"""Test the platform setup."""
add_entities([MockEntity(name="beer"), MockEntity(name=None)])
platform = MockPlatform(platform_setup)
mock_entity_platform(hass, "test_domain.platform", platform)
component = EntityComponent(_LOGGER, DOMAIN, hass)
component.setup({DOMAIN: {"platform": "platform", "entity_namespace": "yummy"}})
await hass.async_block_till_done()
assert sorted(hass.states.async_entity_ids()) == [
"test_domain.yummy_beer",
"test_domain.yummy_unnamed_device",
]
async def test_extract_from_service_available_device(hass):
"""Test the extraction of entity from service and device is available."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities(
[
MockEntity(name="test_1"),
MockEntity(name="test_2", available=False),
MockEntity(name="test_3"),
MockEntity(name="test_4", available=False),
]
)
call_1 = ha.ServiceCall("test", "service", data={"entity_id": ENTITY_MATCH_ALL})
assert ["test_domain.test_1", "test_domain.test_3"] == sorted(
ent.entity_id for ent in (await component.async_extract_from_service(call_1))
)
call_2 = ha.ServiceCall(
"test",
"service",
data={"entity_id": ["test_domain.test_3", "test_domain.test_4"]},
)
assert ["test_domain.test_3"] == sorted(
ent.entity_id for ent in (await component.async_extract_from_service(call_2))
)
async def test_platform_not_ready(hass):
"""Test that we retry when platform not ready."""
platform1_setup = Mock(side_effect=[PlatformNotReady, PlatformNotReady, None])
mock_integration(hass, MockModule("mod1"))
mock_entity_platform(hass, "test_domain.mod1", MockPlatform(platform1_setup))
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_setup({DOMAIN: {"platform": "mod1"}})
assert len(platform1_setup.mock_calls) == 1
assert "test_domain.mod1" not in hass.config.components
utcnow = dt_util.utcnow()
with patch("homeassistant.util.dt.utcnow", return_value=utcnow):
# Should not trigger attempt 2
async_fire_time_changed(hass, utcnow + timedelta(seconds=29))
await hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 1
# Should trigger attempt 2
async_fire_time_changed(hass, utcnow + timedelta(seconds=30))
await hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 2
assert "test_domain.mod1" not in hass.config.components
# This should not trigger attempt 3
async_fire_time_changed(hass, utcnow + timedelta(seconds=59))
await hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 2
# Trigger attempt 3, which succeeds
async_fire_time_changed(hass, utcnow + timedelta(seconds=60))
await hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 3
assert "test_domain.mod1" in hass.config.components
async def test_extract_from_service_fails_if_no_entity_id(hass):
"""Test the extraction of everything from service."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities(
[MockEntity(name="test_1"), MockEntity(name="test_2")]
)
call = ha.ServiceCall("test", "service")
assert [] == sorted(
ent.entity_id for ent in (await component.async_extract_from_service(call))
)
async def test_extract_from_service_filter_out_non_existing_entities(hass):
"""Test the extraction of non existing entities from service."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities(
[MockEntity(name="test_1"), MockEntity(name="test_2")]
)
call = ha.ServiceCall(
"test",
"service",
{"entity_id": ["test_domain.test_2", "test_domain.non_exist"]},
)
assert ["test_domain.test_2"] == [
ent.entity_id for ent in await component.async_extract_from_service(call)
]
async def test_extract_from_service_no_group_expand(hass):
"""Test not expanding a group."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities([MockEntity(entity_id="group.test_group")])
call = ha.ServiceCall("test", "service", {"entity_id": ["group.test_group"]})
extracted = await component.async_extract_from_service(call, expand_group=False)
assert len(extracted) == 1
assert extracted[0].entity_id == "group.test_group"
async def test_setup_dependencies_platform(hass):
"""Test we setup the dependencies of a platform.
We're explictely testing that we process dependencies even if a component
with the same name has already been loaded.
"""
mock_integration(
hass, MockModule("test_component", dependencies=["test_component2"])
)
mock_integration(hass, MockModule("test_component2"))
mock_entity_platform(hass, "test_domain.test_component", MockPlatform())
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_setup({DOMAIN: {"platform": "test_component"}})
assert "test_component" in hass.config.components
assert "test_component2" in hass.config.components
assert "test_domain.test_component" in hass.config.components
async def test_setup_entry(hass):
"""Test setup entry calls async_setup_entry on platform."""
mock_setup_entry = Mock(return_value=mock_coro(True))
mock_entity_platform(
hass,
"test_domain.entry_domain",
MockPlatform(
async_setup_entry=mock_setup_entry, scan_interval=timedelta(seconds=5)
),
)
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain="entry_domain")
assert await component.async_setup_entry(entry)
assert len(mock_setup_entry.mock_calls) == 1
p_hass, p_entry, _ = mock_setup_entry.mock_calls[0][1]
assert p_hass is hass
assert p_entry is entry
assert component._platforms[entry.entry_id].scan_interval == timedelta(seconds=5)
async def test_setup_entry_platform_not_exist(hass):
"""Test setup entry fails if platform doesnt exist."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain="non_existing")
assert (await component.async_setup_entry(entry)) is False
async def test_setup_entry_fails_duplicate(hass):
"""Test we don't allow setting up a config entry twice."""
mock_setup_entry = Mock(return_value=mock_coro(True))
mock_entity_platform(
hass,
"test_domain.entry_domain",
MockPlatform(async_setup_entry=mock_setup_entry),
)
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain="entry_domain")
assert await component.async_setup_entry(entry)
with pytest.raises(ValueError):
await component.async_setup_entry(entry)
async def test_unload_entry_resets_platform(hass):
"""Test unloading an entry removes all entities."""
mock_setup_entry = Mock(return_value=mock_coro(True))
mock_entity_platform(
hass,
"test_domain.entry_domain",
MockPlatform(async_setup_entry=mock_setup_entry),
)
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain="entry_domain")
assert await component.async_setup_entry(entry)
assert len(mock_setup_entry.mock_calls) == 1
add_entities = mock_setup_entry.mock_calls[0][1][2]
add_entities([MockEntity()])
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids()) == 1
assert await component.async_unload_entry(entry)
assert len(hass.states.async_entity_ids()) == 0
async def test_unload_entry_fails_if_never_loaded(hass):
"""."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain="entry_domain")
with pytest.raises(ValueError):
await component.async_unload_entry(entry)
async def test_update_entity(hass):
"""Test that we can update an entity with the helper."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entity = MockEntity()
entity.async_update_ha_state = Mock(return_value=mock_coro())
await component.async_add_entities([entity])
# Called as part of async_add_entities
assert len(entity.async_update_ha_state.mock_calls) == 1
await hass.helpers.entity_component.async_update_entity(entity.entity_id)
assert len(entity.async_update_ha_state.mock_calls) == 2
assert entity.async_update_ha_state.mock_calls[-1][1][0] is True
async def test_set_service_race(hass):
"""Test race condition on setting service."""
exception = False
def async_loop_exception_handler(_, _2) -> None:
"""Handle all exception inside the core loop."""
nonlocal exception
exception = True
hass.loop.set_exception_handler(async_loop_exception_handler)
await async_setup_component(hass, "group", {})
component = EntityComponent(_LOGGER, DOMAIN, hass)
for _ in range(2):
hass.async_create_task(component.async_add_entities([MockEntity()]))
await hass.async_block_till_done()
assert not exception
async def test_extract_all_omit_entity_id(hass, caplog):
"""Test extract all with None and *."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities(
[MockEntity(name="test_1"), MockEntity(name="test_2")]
)
call = ha.ServiceCall("test", "service")
assert [] == sorted(
ent.entity_id for ent in await component.async_extract_from_service(call)
)
async def test_extract_all_use_match_all(hass, caplog):
"""Test extract all with None and *."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities(
[MockEntity(name="test_1"), MockEntity(name="test_2")]
)
call = ha.ServiceCall("test", "service", {"entity_id": "all"})
assert ["test_domain.test_1", "test_domain.test_2"] == sorted(
ent.entity_id for ent in await component.async_extract_from_service(call)
)
assert (
"Not passing an entity ID to a service to target all entities is deprecated"
) not in caplog.text
|
en
| 0.85727
|
The tests for the Entity component helper. # pylint: disable=protected-access Test the loading of the platforms. # mock the dependencies Test the setup if exceptions are happening. Test setup for discovery. Test the setting of the scan interval via configuration. Test the platform setup. Test setting an entity namespace. Test the platform setup. Test the extraction of entity from service and device is available. Test that we retry when platform not ready. # Should not trigger attempt 2 # Should trigger attempt 2 # This should not trigger attempt 3 # Trigger attempt 3, which succeeds Test the extraction of everything from service. Test the extraction of non existing entities from service. Test not expanding a group. Test we setup the dependencies of a platform. We're explictely testing that we process dependencies even if a component with the same name has already been loaded. Test setup entry calls async_setup_entry on platform. Test setup entry fails if platform doesnt exist. Test we don't allow setting up a config entry twice. Test unloading an entry removes all entities. . Test that we can update an entity with the helper. # Called as part of async_add_entities Test race condition on setting service. Handle all exception inside the core loop. Test extract all with None and *. Test extract all with None and *.
| 2.212576
| 2
|
lib/galaxy/visualization/plugins/resource_parser.py
|
rikeshi/galaxy
| 4
|
6626965
|
<gh_stars>1-10
"""
Deserialize Galaxy resources (hdas, ldas, datasets, genomes, etc.) from
a dictionary of string data/ids (often from a query string).
"""
import json
import logging
import weakref
import galaxy.exceptions
import galaxy.util
from galaxy.managers import (
hdas as hda_manager,
visualizations as visualization_manager
)
from galaxy.util import bunch
log = logging.getLogger(__name__)
class ResourceParser:
"""
Given a parameter dictionary (often a converted query string) and a
configuration dictionary (curr. only VisualizationsRegistry uses this),
convert the entries in the parameter dictionary into resources (Galaxy
models, primitive types, lists of either, etc.) and return
in a new dictionary.
The keys used to store the new values can optionally be re-mapped to
new keys (e.g. dataset_id="NNN" -> hda=<HistoryDatasetAssociation>).
"""
primitive_parsers = {
'str' : lambda param: galaxy.util.sanitize_html.sanitize_html(param),
'bool' : lambda param: galaxy.util.string_as_bool(param),
'int' : int,
'float' : float,
# 'date' : lambda param: ,
'json' : (lambda param: json.loads(
galaxy.util.sanitize_html.sanitize_html(param))),
}
def __init__(self, app, *args, **kwargs):
self.app = weakref.ref(app)
self.managers = self._init_managers(app)
def _init_managers(self, app):
return bunch.Bunch(
visualization=visualization_manager.VisualizationManager(app),
hda=hda_manager.HDAManager(app)
)
def parse_parameter_dictionary(self, trans, param_config_dict, query_params, param_modifiers=None):
"""
Parse all expected params from the query dictionary `query_params`.
If param is required and not present, raises a `KeyError`.
"""
# log.debug( 'parse_parameter_dictionary, query_params:\n%s', query_params )
# parse the modifiers first since they modify the params coming next
# TODO: this is all really for hda_ldda - which we could replace with model polymorphism
params_that_modify_other_params = self.parse_parameter_modifiers(
trans, param_modifiers, query_params)
resources = {}
for param_name, param_config in param_config_dict.items():
# optionally rename the variable returned, defaulting to the original name
var_name_in_template = param_config.get('var_name_in_template', param_name)
# if the param is present, get its value, any param modifiers for that param, and parse it into a resource
# use try catch here and not caller to fall back on the default value or re-raise if required
resource = None
query_val = query_params.get(param_name, None)
if query_val is not None:
try:
target_param_modifiers = params_that_modify_other_params.get(param_name, None)
resource = self.parse_parameter(trans, param_config,
query_val, param_modifiers=target_param_modifiers)
except Exception as exception:
if trans.debug:
raise
else:
log.warning('Exception parsing visualization param from query: %s, %s, (%s) %s',
param_name, query_val, str(type(exception)), str(exception))
resource = None
# here - we've either had no value in the query_params or there was a failure to parse
# so: error if required, otherwise get a default (which itself defaults to None)
if resource is None:
if param_config['required']:
raise KeyError('required param %s not found in URL' % (param_name))
resource = self.parse_parameter_default(trans, param_config)
resources[var_name_in_template] = resource
return resources
def parse_config(self, trans, param_config_dict, query_params):
"""
Return `query_params` dict parsing only JSON serializable params.
Complex params such as models, etc. are left as the original query value.
Keys in `query_params` not found in the `param_config_dict` will not be
returned.
"""
# log.debug( 'parse_config, query_params:\n%s', query_params )
config = {}
for param_name, param_config in param_config_dict.items():
config_val = query_params.get(param_name, None)
if config_val is not None and param_config['type'] in self.primitive_parsers:
try:
config_val = self.parse_parameter(trans, param_config, config_val)
except Exception as exception:
log.warning('Exception parsing visualization param from query: ' +
'{}, {}, ({}) {}'.format(param_name, config_val, str(type(exception)), str(exception)))
config_val = None
# here - we've either had no value in the query_params or there was a failure to parse
# so: if there's a default and it's not None, add it to the config
if config_val is None:
if param_config.get('default', None) is None:
continue
config_val = self.parse_parameter_default(trans, param_config)
config[param_name] = config_val
return config
# TODO: I would LOVE to rip modifiers out completely
def parse_parameter_modifiers(self, trans, param_modifiers, query_params):
"""
Parse and return parameters that are meant to modify other parameters,
be grouped with them, or are needed to successfully parse other parameters.
"""
# only one level of modification - down that road lies madness
# parse the modifiers out of query_params first since they modify the other params coming next
parsed_modifiers = {}
if not param_modifiers:
return parsed_modifiers
# precondition: expects a two level dictionary
# { target_param_name -> { param_modifier_name -> { param_modifier_data }}}
for target_param_name, modifier_dict in param_modifiers.items():
parsed_modifiers[target_param_name] = target_modifiers = {}
for modifier_name, modifier_config in modifier_dict.items():
query_val = query_params.get(modifier_name, None)
if query_val is not None:
modifier = self.parse_parameter(trans, modifier_config, query_val)
target_modifiers[modifier_name] = modifier
else:
# TODO: required attr?
target_modifiers[modifier_name] = self.parse_parameter_default(trans, modifier_config)
return parsed_modifiers
def parse_parameter_default(self, trans, param_config):
"""
Parse any default values for the given param, defaulting the default
to `None`.
"""
# currently, *default* default is None, so this is quaranteed to be part of the dictionary
default = param_config['default']
# if default is None, do not attempt to parse it
if default is None:
return default
# otherwise, parse (currently param_config['default'] is a string just like query param and needs to be parsed)
# this saves us the trouble of parsing the default when the config file is read
# (and adding this code to the xml parser)
return self.parse_parameter(trans, param_config, default)
def parse_parameter(self, trans, expected_param_data, query_param,
recurse=True, param_modifiers=None):
"""
Use data in `expected_param_data` to parse `query_param` from a string into
a resource usable directly by a template.
"""
param_type = expected_param_data.get('type')
# constrain_to = expected_param_data.get( 'constrain_to' )
csv = expected_param_data.get('csv')
parsed_param = None
# handle recursion for csv values
if csv and recurse:
parsed_param = []
query_param_list = galaxy.util.listify(query_param)
for query_param in query_param_list:
parsed_param.append(self._parse_param(trans, expected_param_data, query_param, recurse=False))
return parsed_param
if param_type in self.primitive_parsers:
# TODO: what about param modifiers on primitives?
parsed_param = self.primitive_parsers[param_type](query_param)
# TODO: constrain_to: this gets complicated - remove?
# db models
elif param_type == 'visualization':
# ?: is this even used anymore/anywhere?
decoded_visualization_id = self._decode_id(query_param)
parsed_param = self.managers.visualization.get_accessible(decoded_visualization_id, trans.user)
elif param_type == 'dataset':
decoded_dataset_id = self._decode_id(query_param)
parsed_param = self.managers.hda.get_accessible(decoded_dataset_id, trans.user)
elif param_type == 'hda_or_ldda':
encoded_dataset_id = query_param
# needs info from another param...
hda_ldda = param_modifiers.get('hda_ldda')
if hda_ldda == 'hda':
decoded_dataset_id = self._decode_id(encoded_dataset_id)
parsed_param = self.managers.hda.get_accessible(decoded_dataset_id, trans.user)
else:
parsed_param = self.managers.ldda.get(trans, encoded_dataset_id)
# TODO: ideally this would check v. a list of valid dbkeys
elif param_type == 'dbkey':
dbkey = query_param
parsed_param = galaxy.util.sanitize_html.sanitize_html(dbkey)
return parsed_param
def _decode_id(self, id):
try:
return self.app().security.decode_id(str(id))
except (ValueError, TypeError):
raise galaxy.exceptions.MalformedId(
"Malformed id ( %s ) specified, unable to decode" % (str(id)),
id=str(id)
)
|
"""
Deserialize Galaxy resources (hdas, ldas, datasets, genomes, etc.) from
a dictionary of string data/ids (often from a query string).
"""
import json
import logging
import weakref
import galaxy.exceptions
import galaxy.util
from galaxy.managers import (
hdas as hda_manager,
visualizations as visualization_manager
)
from galaxy.util import bunch
log = logging.getLogger(__name__)
class ResourceParser:
"""
Given a parameter dictionary (often a converted query string) and a
configuration dictionary (curr. only VisualizationsRegistry uses this),
convert the entries in the parameter dictionary into resources (Galaxy
models, primitive types, lists of either, etc.) and return
in a new dictionary.
The keys used to store the new values can optionally be re-mapped to
new keys (e.g. dataset_id="NNN" -> hda=<HistoryDatasetAssociation>).
"""
primitive_parsers = {
'str' : lambda param: galaxy.util.sanitize_html.sanitize_html(param),
'bool' : lambda param: galaxy.util.string_as_bool(param),
'int' : int,
'float' : float,
# 'date' : lambda param: ,
'json' : (lambda param: json.loads(
galaxy.util.sanitize_html.sanitize_html(param))),
}
def __init__(self, app, *args, **kwargs):
self.app = weakref.ref(app)
self.managers = self._init_managers(app)
def _init_managers(self, app):
return bunch.Bunch(
visualization=visualization_manager.VisualizationManager(app),
hda=hda_manager.HDAManager(app)
)
def parse_parameter_dictionary(self, trans, param_config_dict, query_params, param_modifiers=None):
"""
Parse all expected params from the query dictionary `query_params`.
If param is required and not present, raises a `KeyError`.
"""
# log.debug( 'parse_parameter_dictionary, query_params:\n%s', query_params )
# parse the modifiers first since they modify the params coming next
# TODO: this is all really for hda_ldda - which we could replace with model polymorphism
params_that_modify_other_params = self.parse_parameter_modifiers(
trans, param_modifiers, query_params)
resources = {}
for param_name, param_config in param_config_dict.items():
# optionally rename the variable returned, defaulting to the original name
var_name_in_template = param_config.get('var_name_in_template', param_name)
# if the param is present, get its value, any param modifiers for that param, and parse it into a resource
# use try catch here and not caller to fall back on the default value or re-raise if required
resource = None
query_val = query_params.get(param_name, None)
if query_val is not None:
try:
target_param_modifiers = params_that_modify_other_params.get(param_name, None)
resource = self.parse_parameter(trans, param_config,
query_val, param_modifiers=target_param_modifiers)
except Exception as exception:
if trans.debug:
raise
else:
log.warning('Exception parsing visualization param from query: %s, %s, (%s) %s',
param_name, query_val, str(type(exception)), str(exception))
resource = None
# here - we've either had no value in the query_params or there was a failure to parse
# so: error if required, otherwise get a default (which itself defaults to None)
if resource is None:
if param_config['required']:
raise KeyError('required param %s not found in URL' % (param_name))
resource = self.parse_parameter_default(trans, param_config)
resources[var_name_in_template] = resource
return resources
def parse_config(self, trans, param_config_dict, query_params):
"""
Return `query_params` dict parsing only JSON serializable params.
Complex params such as models, etc. are left as the original query value.
Keys in `query_params` not found in the `param_config_dict` will not be
returned.
"""
# log.debug( 'parse_config, query_params:\n%s', query_params )
config = {}
for param_name, param_config in param_config_dict.items():
config_val = query_params.get(param_name, None)
if config_val is not None and param_config['type'] in self.primitive_parsers:
try:
config_val = self.parse_parameter(trans, param_config, config_val)
except Exception as exception:
log.warning('Exception parsing visualization param from query: ' +
'{}, {}, ({}) {}'.format(param_name, config_val, str(type(exception)), str(exception)))
config_val = None
# here - we've either had no value in the query_params or there was a failure to parse
# so: if there's a default and it's not None, add it to the config
if config_val is None:
if param_config.get('default', None) is None:
continue
config_val = self.parse_parameter_default(trans, param_config)
config[param_name] = config_val
return config
# TODO: I would LOVE to rip modifiers out completely
def parse_parameter_modifiers(self, trans, param_modifiers, query_params):
"""
Parse and return parameters that are meant to modify other parameters,
be grouped with them, or are needed to successfully parse other parameters.
"""
# only one level of modification - down that road lies madness
# parse the modifiers out of query_params first since they modify the other params coming next
parsed_modifiers = {}
if not param_modifiers:
return parsed_modifiers
# precondition: expects a two level dictionary
# { target_param_name -> { param_modifier_name -> { param_modifier_data }}}
for target_param_name, modifier_dict in param_modifiers.items():
parsed_modifiers[target_param_name] = target_modifiers = {}
for modifier_name, modifier_config in modifier_dict.items():
query_val = query_params.get(modifier_name, None)
if query_val is not None:
modifier = self.parse_parameter(trans, modifier_config, query_val)
target_modifiers[modifier_name] = modifier
else:
# TODO: required attr?
target_modifiers[modifier_name] = self.parse_parameter_default(trans, modifier_config)
return parsed_modifiers
def parse_parameter_default(self, trans, param_config):
"""
Parse any default values for the given param, defaulting the default
to `None`.
"""
# currently, *default* default is None, so this is quaranteed to be part of the dictionary
default = param_config['default']
# if default is None, do not attempt to parse it
if default is None:
return default
# otherwise, parse (currently param_config['default'] is a string just like query param and needs to be parsed)
# this saves us the trouble of parsing the default when the config file is read
# (and adding this code to the xml parser)
return self.parse_parameter(trans, param_config, default)
def parse_parameter(self, trans, expected_param_data, query_param,
recurse=True, param_modifiers=None):
"""
Use data in `expected_param_data` to parse `query_param` from a string into
a resource usable directly by a template.
"""
param_type = expected_param_data.get('type')
# constrain_to = expected_param_data.get( 'constrain_to' )
csv = expected_param_data.get('csv')
parsed_param = None
# handle recursion for csv values
if csv and recurse:
parsed_param = []
query_param_list = galaxy.util.listify(query_param)
for query_param in query_param_list:
parsed_param.append(self._parse_param(trans, expected_param_data, query_param, recurse=False))
return parsed_param
if param_type in self.primitive_parsers:
# TODO: what about param modifiers on primitives?
parsed_param = self.primitive_parsers[param_type](query_param)
# TODO: constrain_to: this gets complicated - remove?
# db models
elif param_type == 'visualization':
# ?: is this even used anymore/anywhere?
decoded_visualization_id = self._decode_id(query_param)
parsed_param = self.managers.visualization.get_accessible(decoded_visualization_id, trans.user)
elif param_type == 'dataset':
decoded_dataset_id = self._decode_id(query_param)
parsed_param = self.managers.hda.get_accessible(decoded_dataset_id, trans.user)
elif param_type == 'hda_or_ldda':
encoded_dataset_id = query_param
# needs info from another param...
hda_ldda = param_modifiers.get('hda_ldda')
if hda_ldda == 'hda':
decoded_dataset_id = self._decode_id(encoded_dataset_id)
parsed_param = self.managers.hda.get_accessible(decoded_dataset_id, trans.user)
else:
parsed_param = self.managers.ldda.get(trans, encoded_dataset_id)
# TODO: ideally this would check v. a list of valid dbkeys
elif param_type == 'dbkey':
dbkey = query_param
parsed_param = galaxy.util.sanitize_html.sanitize_html(dbkey)
return parsed_param
def _decode_id(self, id):
try:
return self.app().security.decode_id(str(id))
except (ValueError, TypeError):
raise galaxy.exceptions.MalformedId(
"Malformed id ( %s ) specified, unable to decode" % (str(id)),
id=str(id)
)
|
en
| 0.724391
|
Deserialize Galaxy resources (hdas, ldas, datasets, genomes, etc.) from a dictionary of string data/ids (often from a query string). Given a parameter dictionary (often a converted query string) and a configuration dictionary (curr. only VisualizationsRegistry uses this), convert the entries in the parameter dictionary into resources (Galaxy models, primitive types, lists of either, etc.) and return in a new dictionary. The keys used to store the new values can optionally be re-mapped to new keys (e.g. dataset_id="NNN" -> hda=<HistoryDatasetAssociation>). # 'date' : lambda param: , Parse all expected params from the query dictionary `query_params`. If param is required and not present, raises a `KeyError`. # log.debug( 'parse_parameter_dictionary, query_params:\n%s', query_params ) # parse the modifiers first since they modify the params coming next # TODO: this is all really for hda_ldda - which we could replace with model polymorphism # optionally rename the variable returned, defaulting to the original name # if the param is present, get its value, any param modifiers for that param, and parse it into a resource # use try catch here and not caller to fall back on the default value or re-raise if required # here - we've either had no value in the query_params or there was a failure to parse # so: error if required, otherwise get a default (which itself defaults to None) Return `query_params` dict parsing only JSON serializable params. Complex params such as models, etc. are left as the original query value. Keys in `query_params` not found in the `param_config_dict` will not be returned. # log.debug( 'parse_config, query_params:\n%s', query_params ) # here - we've either had no value in the query_params or there was a failure to parse # so: if there's a default and it's not None, add it to the config # TODO: I would LOVE to rip modifiers out completely Parse and return parameters that are meant to modify other parameters, be grouped with them, or are needed to successfully parse other parameters. # only one level of modification - down that road lies madness # parse the modifiers out of query_params first since they modify the other params coming next # precondition: expects a two level dictionary # { target_param_name -> { param_modifier_name -> { param_modifier_data }}} # TODO: required attr? Parse any default values for the given param, defaulting the default to `None`. # currently, *default* default is None, so this is quaranteed to be part of the dictionary # if default is None, do not attempt to parse it # otherwise, parse (currently param_config['default'] is a string just like query param and needs to be parsed) # this saves us the trouble of parsing the default when the config file is read # (and adding this code to the xml parser) Use data in `expected_param_data` to parse `query_param` from a string into a resource usable directly by a template. # constrain_to = expected_param_data.get( 'constrain_to' ) # handle recursion for csv values # TODO: what about param modifiers on primitives? # TODO: constrain_to: this gets complicated - remove? # db models # ?: is this even used anymore/anywhere? # needs info from another param... # TODO: ideally this would check v. a list of valid dbkeys
| 2.849273
| 3
|
convert/convert_nest_flax.py
|
wenh18/OnDeviceNAS
| 1
|
6626966
|
<reponame>wenh18/OnDeviceNAS<filename>convert/convert_nest_flax.py
"""
Convert weights from https://github.com/google-research/nested-transformer
NOTE: You'll need https://github.com/google/CommonLoopUtils, not included in requirements.txt
"""
import sys
import numpy as np
import torch
from clu import checkpoint
arch_depths = {
'nest_base': [2, 2, 20],
'nest_small': [2, 2, 20],
'nest_tiny': [2, 2, 8],
}
def convert_nest(checkpoint_path, arch):
"""
Expects path to checkpoint which is a dir containing 4 files like in each of these folders
- https://console.cloud.google.com/storage/browser/gresearch/nest-checkpoints
`arch` is needed to
Returns a state dict that can be used with `torch.nn.Module.load_state_dict`
Hint: Follow mytimm.models.nest.Nest.__init__ and
https://github.com/google-research/nested-transformer/blob/main/models/nest_net.py
"""
assert arch in ['nest_base', 'nest_small', 'nest_tiny'], "Your `arch` is not supported"
flax_dict = checkpoint.load_state_dict(checkpoint_path)['optimizer']['target']
state_dict = {}
# Patch embedding
state_dict['patch_embed.proj.weight'] = torch.tensor(
flax_dict['PatchEmbedding_0']['Conv_0']['kernel']).permute(3, 2, 0, 1)
state_dict['patch_embed.proj.bias'] = torch.tensor(flax_dict['PatchEmbedding_0']['Conv_0']['bias'])
# Positional embeddings
posemb_keys = [k for k in flax_dict.keys() if k.startswith('PositionEmbedding')]
for i, k in enumerate(posemb_keys):
state_dict[f'levels.{i}.pos_embed'] = torch.tensor(flax_dict[k]['pos_embedding'])
# Transformer encoders
depths = arch_depths[arch]
for level in range(len(depths)):
for layer in range(depths[level]):
global_layer_ix = sum(depths[:level]) + layer
# Norms
for i in range(2):
state_dict[f'levels.{level}.transformer_encoder.{layer}.norm{i+1}.weight'] = torch.tensor(
flax_dict[f'EncoderNDBlock_{global_layer_ix}'][f'LayerNorm_{i}']['scale'])
state_dict[f'levels.{level}.transformer_encoder.{layer}.norm{i+1}.bias'] = torch.tensor(
flax_dict[f'EncoderNDBlock_{global_layer_ix}'][f'LayerNorm_{i}']['bias'])
# Attention qkv
w_q = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_0']['kernel']
w_kv = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_1']['kernel']
# Pay attention to dims here (maybe get pen and paper)
w_kv = np.concatenate(np.split(w_kv, 2, -1), 1)
w_qkv = np.concatenate([w_q, w_kv], 1)
state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.qkv.weight'] = torch.tensor(w_qkv).flatten(1).permute(1,0)
b_q = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_0']['bias']
b_kv = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_1']['bias']
# Pay attention to dims here (maybe get pen and paper)
b_kv = np.concatenate(np.split(b_kv, 2, -1), 0)
b_qkv = np.concatenate([b_q, b_kv], 0)
state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.qkv.bias'] = torch.tensor(b_qkv).reshape(-1)
# Attention proj
w_proj = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['proj_kernel']
w_proj = torch.tensor(w_proj).permute(2, 1, 0).flatten(1)
state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.proj.weight'] = w_proj
state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.proj.bias'] = torch.tensor(
flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['bias'])
# MLP
for i in range(2):
state_dict[f'levels.{level}.transformer_encoder.{layer}.mlp.fc{i+1}.weight'] = torch.tensor(
flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MlpBlock_0'][f'Dense_{i}']['kernel']).permute(1, 0)
state_dict[f'levels.{level}.transformer_encoder.{layer}.mlp.fc{i+1}.bias'] = torch.tensor(
flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MlpBlock_0'][f'Dense_{i}']['bias'])
# Block aggregations (ConvPool)
for level in range(1, len(depths)):
# Convs
state_dict[f'levels.{level}.pool.conv.weight'] = torch.tensor(
flax_dict[f'ConvPool_{level-1}']['Conv_0']['kernel']).permute(3, 2, 0, 1)
state_dict[f'levels.{level}.pool.conv.bias'] = torch.tensor(
flax_dict[f'ConvPool_{level-1}']['Conv_0']['bias'])
# Norms
state_dict[f'levels.{level}.pool.norm.weight'] = torch.tensor(
flax_dict[f'ConvPool_{level-1}']['LayerNorm_0']['scale'])
state_dict[f'levels.{level}.pool.norm.bias'] = torch.tensor(
flax_dict[f'ConvPool_{level-1}']['LayerNorm_0']['bias'])
# Final norm
state_dict[f'norm.weight'] = torch.tensor(flax_dict['LayerNorm_0']['scale'])
state_dict[f'norm.bias'] = torch.tensor(flax_dict['LayerNorm_0']['bias'])
# Classifier
state_dict['head.weight'] = torch.tensor(flax_dict['Dense_0']['kernel']).permute(1, 0)
state_dict['head.bias'] = torch.tensor(flax_dict['Dense_0']['bias'])
return state_dict
if __name__ == '__main__':
variant = sys.argv[1] # base, small, or tiny
state_dict = convert_nest(f'./nest-{variant[0]}_imagenet', f'nest_{variant}')
torch.save(state_dict, f'./jx_nest_{variant}.pth')
|
"""
Convert weights from https://github.com/google-research/nested-transformer
NOTE: You'll need https://github.com/google/CommonLoopUtils, not included in requirements.txt
"""
import sys
import numpy as np
import torch
from clu import checkpoint
arch_depths = {
'nest_base': [2, 2, 20],
'nest_small': [2, 2, 20],
'nest_tiny': [2, 2, 8],
}
def convert_nest(checkpoint_path, arch):
"""
Expects path to checkpoint which is a dir containing 4 files like in each of these folders
- https://console.cloud.google.com/storage/browser/gresearch/nest-checkpoints
`arch` is needed to
Returns a state dict that can be used with `torch.nn.Module.load_state_dict`
Hint: Follow mytimm.models.nest.Nest.__init__ and
https://github.com/google-research/nested-transformer/blob/main/models/nest_net.py
"""
assert arch in ['nest_base', 'nest_small', 'nest_tiny'], "Your `arch` is not supported"
flax_dict = checkpoint.load_state_dict(checkpoint_path)['optimizer']['target']
state_dict = {}
# Patch embedding
state_dict['patch_embed.proj.weight'] = torch.tensor(
flax_dict['PatchEmbedding_0']['Conv_0']['kernel']).permute(3, 2, 0, 1)
state_dict['patch_embed.proj.bias'] = torch.tensor(flax_dict['PatchEmbedding_0']['Conv_0']['bias'])
# Positional embeddings
posemb_keys = [k for k in flax_dict.keys() if k.startswith('PositionEmbedding')]
for i, k in enumerate(posemb_keys):
state_dict[f'levels.{i}.pos_embed'] = torch.tensor(flax_dict[k]['pos_embedding'])
# Transformer encoders
depths = arch_depths[arch]
for level in range(len(depths)):
for layer in range(depths[level]):
global_layer_ix = sum(depths[:level]) + layer
# Norms
for i in range(2):
state_dict[f'levels.{level}.transformer_encoder.{layer}.norm{i+1}.weight'] = torch.tensor(
flax_dict[f'EncoderNDBlock_{global_layer_ix}'][f'LayerNorm_{i}']['scale'])
state_dict[f'levels.{level}.transformer_encoder.{layer}.norm{i+1}.bias'] = torch.tensor(
flax_dict[f'EncoderNDBlock_{global_layer_ix}'][f'LayerNorm_{i}']['bias'])
# Attention qkv
w_q = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_0']['kernel']
w_kv = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_1']['kernel']
# Pay attention to dims here (maybe get pen and paper)
w_kv = np.concatenate(np.split(w_kv, 2, -1), 1)
w_qkv = np.concatenate([w_q, w_kv], 1)
state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.qkv.weight'] = torch.tensor(w_qkv).flatten(1).permute(1,0)
b_q = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_0']['bias']
b_kv = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_1']['bias']
# Pay attention to dims here (maybe get pen and paper)
b_kv = np.concatenate(np.split(b_kv, 2, -1), 0)
b_qkv = np.concatenate([b_q, b_kv], 0)
state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.qkv.bias'] = torch.tensor(b_qkv).reshape(-1)
# Attention proj
w_proj = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['proj_kernel']
w_proj = torch.tensor(w_proj).permute(2, 1, 0).flatten(1)
state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.proj.weight'] = w_proj
state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.proj.bias'] = torch.tensor(
flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['bias'])
# MLP
for i in range(2):
state_dict[f'levels.{level}.transformer_encoder.{layer}.mlp.fc{i+1}.weight'] = torch.tensor(
flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MlpBlock_0'][f'Dense_{i}']['kernel']).permute(1, 0)
state_dict[f'levels.{level}.transformer_encoder.{layer}.mlp.fc{i+1}.bias'] = torch.tensor(
flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MlpBlock_0'][f'Dense_{i}']['bias'])
# Block aggregations (ConvPool)
for level in range(1, len(depths)):
# Convs
state_dict[f'levels.{level}.pool.conv.weight'] = torch.tensor(
flax_dict[f'ConvPool_{level-1}']['Conv_0']['kernel']).permute(3, 2, 0, 1)
state_dict[f'levels.{level}.pool.conv.bias'] = torch.tensor(
flax_dict[f'ConvPool_{level-1}']['Conv_0']['bias'])
# Norms
state_dict[f'levels.{level}.pool.norm.weight'] = torch.tensor(
flax_dict[f'ConvPool_{level-1}']['LayerNorm_0']['scale'])
state_dict[f'levels.{level}.pool.norm.bias'] = torch.tensor(
flax_dict[f'ConvPool_{level-1}']['LayerNorm_0']['bias'])
# Final norm
state_dict[f'norm.weight'] = torch.tensor(flax_dict['LayerNorm_0']['scale'])
state_dict[f'norm.bias'] = torch.tensor(flax_dict['LayerNorm_0']['bias'])
# Classifier
state_dict['head.weight'] = torch.tensor(flax_dict['Dense_0']['kernel']).permute(1, 0)
state_dict['head.bias'] = torch.tensor(flax_dict['Dense_0']['bias'])
return state_dict
if __name__ == '__main__':
variant = sys.argv[1] # base, small, or tiny
state_dict = convert_nest(f'./nest-{variant[0]}_imagenet', f'nest_{variant}')
torch.save(state_dict, f'./jx_nest_{variant}.pth')
|
en
| 0.799573
|
Convert weights from https://github.com/google-research/nested-transformer NOTE: You'll need https://github.com/google/CommonLoopUtils, not included in requirements.txt Expects path to checkpoint which is a dir containing 4 files like in each of these folders - https://console.cloud.google.com/storage/browser/gresearch/nest-checkpoints `arch` is needed to Returns a state dict that can be used with `torch.nn.Module.load_state_dict` Hint: Follow mytimm.models.nest.Nest.__init__ and https://github.com/google-research/nested-transformer/blob/main/models/nest_net.py # Patch embedding # Positional embeddings # Transformer encoders # Norms # Attention qkv # Pay attention to dims here (maybe get pen and paper) # Pay attention to dims here (maybe get pen and paper) # Attention proj # MLP # Block aggregations (ConvPool) # Convs # Norms # Final norm # Classifier # base, small, or tiny
| 2.310481
| 2
|
TorchMLP/mlp.py
|
euirim/fundraising-propensity
| 0
|
6626967
|
import torch
from torch.utils.data import IterableDataset, DataLoader
import csv
import torch.nn.functional as F
LOG_FILE = 'logs.txt'
FILENAME = 'full_dataset_bert_title_doc2vec_story.csv'
NUM_CATEGORIES = 19
IGNORE_COLS = ['url', 'story', 'title', 'first_cover_image', 'story_images']
Y_COLS = ['raised']
NUM_TEST = 30000
LIMIT = -1
BATCH_SIZE = 200
DEVICE = 'cuda'
LEARNING_RATE=0.01
HIDDEN_DIMS = [300, 300, 200, 100, 50, 25, 10]
class FundraisingDataset(IterableDataset):
def __init__(self, file_name, skip=0, limit=-1):
self.skipped = skip
self.limit = limit
self.n = 0
self.csv_file = open(file_name, 'r')
self.csv_reader = csv.reader(self.csv_file, delimiter=',')
self.row0 = next(self.csv_reader)
self.category_index = self.row0.index('category')
self.num_cols = len(self.row0)
self._skip_init()
def _skip_init(self):
for _ in range(self.skipped):
next(self.csv_reader)
def __iter__(self):
self.n = 0
return self
def __next__(self):
if self.n != self.limit:
self.n += 1
return next(self.csv_reader)
else:
raise StopIteration
def rewind(self):
self.csv_file.seek(0)
next(self.csv_reader)
self._skip_init()
train_dataset = FundraisingDataset(FILENAME, skip=NUM_TEST)
EXPECTED_X_COLS = len(train_dataset.row0) - len(IGNORE_COLS) - 1 + NUM_CATEGORIES - len(Y_COLS)
EXPECTED_Y_COLS = len(Y_COLS)
device = torch.device(DEVICE)
log_file = open(LOG_FILE, 'ab+', buffering=0)
def generate_data(dataset, sample, batch_size):
torch_x, torch_y = [], []
for batch_index in range(batch_size):
if len(sample) != dataset.num_cols:
return None
curr_x, curr_y = [], []
for index, data in enumerate(sample):
col_name = dataset.row0[index]
if col_name in IGNORE_COLS:
continue
try:
if col_name in Y_COLS:
curr_y.append(float(data[batch_index]))
elif index == dataset.category_index:
category_onehot = [0. for _ in range(NUM_CATEGORIES)]
category_onehot[int(data[batch_index])] = 1.
curr_x.extend(category_onehot)
else:
curr_x.append(float(data[batch_index]))
except:
return None
if len(curr_x) != EXPECTED_X_COLS:
raise RuntimeError('Expected {} data cols, found {} cols'.format(EXPECTED_X_COLS, len(curr_x)))
if len(curr_y) != EXPECTED_Y_COLS:
raise RuntimeError('Expected {} result cols, found {} cols'.format(EXPECTED_Y_COLS, len(curr_y)))
torch_x.append(curr_x)
torch_y.append(curr_y)
x = torch.FloatTensor(torch_x).to(device)
y = torch.FloatTensor(torch_y).to(device)
return x, y
log_file.write('Generating {} test samples.\n'.format(NUM_TEST).encode())
test_dataset = FundraisingDataset(FILENAME, limit=NUM_TEST)
test_data = []
for test_sample in DataLoader(test_dataset, batch_size=1):
data = generate_data(test_dataset, test_sample, 1)
if data is None:
raise RuntimeError('Expected valid test data.')
x, y = data
test_data.append((x, y))
log_file.write('Generated test data.\n'.encode())
class MLP(torch.nn.Module):
def __init__(self, hidden_dims=None):
super(MLP, self).__init__()
if hidden_dims is None or len(hidden_dims) == 0:
hidden_dims = [100]
self.input_layer = torch.nn.Linear(EXPECTED_X_COLS, hidden_dims[0])
self.hidden_layers = torch.nn.ModuleList()
for curr in range(1, len(hidden_dims)):
self.hidden_layers.append(torch.nn.Linear(hidden_dims[curr - 1], hidden_dims[curr]))
self.output_layer = torch.nn.Linear(hidden_dims[-1], EXPECTED_Y_COLS)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.input_layer(x)
x = self.relu(x)
for layer in self.hidden_layers:
x = layer(x)
x = self.relu(x)
return self.output_layer(x)
model = MLP(hidden_dims=HIDDEN_DIMS).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
loss_func = torch.nn.MSELoss()
log_file.write('Built model\n'.encode())
it = 0
for train_sample in DataLoader(train_dataset, batch_size=BATCH_SIZE):
model.train()
data = generate_data(train_dataset, train_sample, BATCH_SIZE)
if data is None:
continue
x, y = data
pred = model(x)
train_loss = loss_func(pred, y)
optimizer.zero_grad()
train_loss.backward()
optimizer.step()
train_loss = train_loss.item()
if it % 10 == 0:
model.eval()
test_loss = 0.0
abs_loss = 0.0
for x, y in test_data:
pred_test = model(x)
test_loss += loss_func(pred_test, y).item()
abs_loss += F.l1_loss(pred_test, y).item()
log_file.write('Iteration: {}, MSE train loss: {}, MSE test loss: {}, Absolute test loss: {}\n'.format(
it, train_loss, test_loss, abs_loss).encode())
it += 1
log_file.write('Finished.\n')
|
import torch
from torch.utils.data import IterableDataset, DataLoader
import csv
import torch.nn.functional as F
LOG_FILE = 'logs.txt'
FILENAME = 'full_dataset_bert_title_doc2vec_story.csv'
NUM_CATEGORIES = 19
IGNORE_COLS = ['url', 'story', 'title', 'first_cover_image', 'story_images']
Y_COLS = ['raised']
NUM_TEST = 30000
LIMIT = -1
BATCH_SIZE = 200
DEVICE = 'cuda'
LEARNING_RATE=0.01
HIDDEN_DIMS = [300, 300, 200, 100, 50, 25, 10]
class FundraisingDataset(IterableDataset):
def __init__(self, file_name, skip=0, limit=-1):
self.skipped = skip
self.limit = limit
self.n = 0
self.csv_file = open(file_name, 'r')
self.csv_reader = csv.reader(self.csv_file, delimiter=',')
self.row0 = next(self.csv_reader)
self.category_index = self.row0.index('category')
self.num_cols = len(self.row0)
self._skip_init()
def _skip_init(self):
for _ in range(self.skipped):
next(self.csv_reader)
def __iter__(self):
self.n = 0
return self
def __next__(self):
if self.n != self.limit:
self.n += 1
return next(self.csv_reader)
else:
raise StopIteration
def rewind(self):
self.csv_file.seek(0)
next(self.csv_reader)
self._skip_init()
train_dataset = FundraisingDataset(FILENAME, skip=NUM_TEST)
EXPECTED_X_COLS = len(train_dataset.row0) - len(IGNORE_COLS) - 1 + NUM_CATEGORIES - len(Y_COLS)
EXPECTED_Y_COLS = len(Y_COLS)
device = torch.device(DEVICE)
log_file = open(LOG_FILE, 'ab+', buffering=0)
def generate_data(dataset, sample, batch_size):
torch_x, torch_y = [], []
for batch_index in range(batch_size):
if len(sample) != dataset.num_cols:
return None
curr_x, curr_y = [], []
for index, data in enumerate(sample):
col_name = dataset.row0[index]
if col_name in IGNORE_COLS:
continue
try:
if col_name in Y_COLS:
curr_y.append(float(data[batch_index]))
elif index == dataset.category_index:
category_onehot = [0. for _ in range(NUM_CATEGORIES)]
category_onehot[int(data[batch_index])] = 1.
curr_x.extend(category_onehot)
else:
curr_x.append(float(data[batch_index]))
except:
return None
if len(curr_x) != EXPECTED_X_COLS:
raise RuntimeError('Expected {} data cols, found {} cols'.format(EXPECTED_X_COLS, len(curr_x)))
if len(curr_y) != EXPECTED_Y_COLS:
raise RuntimeError('Expected {} result cols, found {} cols'.format(EXPECTED_Y_COLS, len(curr_y)))
torch_x.append(curr_x)
torch_y.append(curr_y)
x = torch.FloatTensor(torch_x).to(device)
y = torch.FloatTensor(torch_y).to(device)
return x, y
log_file.write('Generating {} test samples.\n'.format(NUM_TEST).encode())
test_dataset = FundraisingDataset(FILENAME, limit=NUM_TEST)
test_data = []
for test_sample in DataLoader(test_dataset, batch_size=1):
data = generate_data(test_dataset, test_sample, 1)
if data is None:
raise RuntimeError('Expected valid test data.')
x, y = data
test_data.append((x, y))
log_file.write('Generated test data.\n'.encode())
class MLP(torch.nn.Module):
def __init__(self, hidden_dims=None):
super(MLP, self).__init__()
if hidden_dims is None or len(hidden_dims) == 0:
hidden_dims = [100]
self.input_layer = torch.nn.Linear(EXPECTED_X_COLS, hidden_dims[0])
self.hidden_layers = torch.nn.ModuleList()
for curr in range(1, len(hidden_dims)):
self.hidden_layers.append(torch.nn.Linear(hidden_dims[curr - 1], hidden_dims[curr]))
self.output_layer = torch.nn.Linear(hidden_dims[-1], EXPECTED_Y_COLS)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.input_layer(x)
x = self.relu(x)
for layer in self.hidden_layers:
x = layer(x)
x = self.relu(x)
return self.output_layer(x)
model = MLP(hidden_dims=HIDDEN_DIMS).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
loss_func = torch.nn.MSELoss()
log_file.write('Built model\n'.encode())
it = 0
for train_sample in DataLoader(train_dataset, batch_size=BATCH_SIZE):
model.train()
data = generate_data(train_dataset, train_sample, BATCH_SIZE)
if data is None:
continue
x, y = data
pred = model(x)
train_loss = loss_func(pred, y)
optimizer.zero_grad()
train_loss.backward()
optimizer.step()
train_loss = train_loss.item()
if it % 10 == 0:
model.eval()
test_loss = 0.0
abs_loss = 0.0
for x, y in test_data:
pred_test = model(x)
test_loss += loss_func(pred_test, y).item()
abs_loss += F.l1_loss(pred_test, y).item()
log_file.write('Iteration: {}, MSE train loss: {}, MSE test loss: {}, Absolute test loss: {}\n'.format(
it, train_loss, test_loss, abs_loss).encode())
it += 1
log_file.write('Finished.\n')
|
none
| 1
| 2.849447
| 3
|
|
tests/unit/test_availability_check.py
|
tbicr/sites-availability-checker
| 0
|
6626968
|
from datetime import datetime
import httpx
import pytest
from service.utils import fetch, regexp_check
@pytest.mark.asyncio
async def test_fetch__ok(httpx_mock):
httpx_mock.add_response(status_code=200, data=b"ok")
async with httpx.AsyncClient() as client:
check_result, content = await fetch(client, "http://test.com")
assert check_result.url == "http://test.com"
assert check_result.created_at > datetime(2020, 12, 12)
assert check_result.status_code == 200
assert check_result.duration > 0
assert content == b"ok"
@pytest.mark.asyncio
async def test_fetch__raise_timeout(httpx_mock):
def raise_timeout(request, ext):
raise httpx.ReadTimeout("timeout", request=request)
httpx_mock.add_callback(raise_timeout)
async with httpx.AsyncClient() as client:
check_result, content = await fetch(client, "http://test.com")
assert check_result.url == "http://test.com"
assert check_result.created_at > datetime(2020, 12, 12)
assert check_result.status_code is None
assert check_result.duration > 0
assert content is None
def test_regexp__pattern_found():
assert regexp_check("a", b"aaa") is True
def test_regexp__pattern_not_found():
assert regexp_check("b", b"aaa") is False
def test_regexp__invalid_pattern():
assert regexp_check("(b", b"aaa") is False
def test_regexp__no_pattern():
assert regexp_check(None, b"aaa") is None
def test_regexp__no_content():
assert regexp_check("a", None) is None
|
from datetime import datetime
import httpx
import pytest
from service.utils import fetch, regexp_check
@pytest.mark.asyncio
async def test_fetch__ok(httpx_mock):
httpx_mock.add_response(status_code=200, data=b"ok")
async with httpx.AsyncClient() as client:
check_result, content = await fetch(client, "http://test.com")
assert check_result.url == "http://test.com"
assert check_result.created_at > datetime(2020, 12, 12)
assert check_result.status_code == 200
assert check_result.duration > 0
assert content == b"ok"
@pytest.mark.asyncio
async def test_fetch__raise_timeout(httpx_mock):
def raise_timeout(request, ext):
raise httpx.ReadTimeout("timeout", request=request)
httpx_mock.add_callback(raise_timeout)
async with httpx.AsyncClient() as client:
check_result, content = await fetch(client, "http://test.com")
assert check_result.url == "http://test.com"
assert check_result.created_at > datetime(2020, 12, 12)
assert check_result.status_code is None
assert check_result.duration > 0
assert content is None
def test_regexp__pattern_found():
assert regexp_check("a", b"aaa") is True
def test_regexp__pattern_not_found():
assert regexp_check("b", b"aaa") is False
def test_regexp__invalid_pattern():
assert regexp_check("(b", b"aaa") is False
def test_regexp__no_pattern():
assert regexp_check(None, b"aaa") is None
def test_regexp__no_content():
assert regexp_check("a", None) is None
|
none
| 1
| 2.425895
| 2
|
|
gnsstools/galileo/e5aq.py
|
wumouyan/GNSS-SDR-Python
| 68
|
6626969
|
<filename>gnsstools/galileo/e5aq.py
# Galileo E5a-Q code construction
#
# Copyright 2014 <NAME>
import numpy as np
chip_rate = 10230000
code_length = 10230
# secondary code table from Table 19 (page 18) of Galileo ICD SIS (2014)
# index is PRN
secondary_code = {
1: '83F6F69D8F6E15411FB8C9B1C', 2: '66558BD3CE0C7792E83350525',
3: '59A025A9C1AF0651B779A8381', 4: 'D3A32640782F7B18E4DF754B7',
5: 'B91FCAD7760C218FA59348A93', 6: 'BAC77E933A779140F094FBF98',
7: '537785DE280927C6B58BA6776', 8: 'EFCAB4B65F38531ECA22257E2',
9: '79F8CAE838475EA5584BEFC9B', 10: 'CA5170FEA3A810EC606B66494',
11: '1FC32410652A2C49BD845E567', 12: 'FE0A9A7AFDAC44E42CB95D261',
13: 'B03062DC2B71995D5AD8B7DBE', 14: 'F6C398993F598E2DF4235D3D5',
15: '1BB2FB8B5BF24395C2EF3C5A1', 16: '2F920687D238CC7046EF6AFC9',
17: '34163886FC4ED7F2A92EFDBB8', 18: '66A872CE47833FB2DFD5625AD',
19: '99D5A70162C920A4BB9DE1CA8', 20: '81D71BD6E069A7ACCBEDC66CA',
21: 'A654524074A9E6780DB9D3EC6', 22: 'C3396A101BEDAF623CFC5BB37',
23: 'C3D4AB211DF36F2111F2141CD', 24: '3DFF25EAE761739265AF145C1',
25: '994909E0757D70CDE389102B5', 26: 'B938535522D119F40C25FDAEC',
27: 'C71AB549C0491537026B390B7', 28: '0CDB8C9E7B53F55F5B0A0597B',
29: '61C5FA252F1AF81144766494F', 30: '626027778FD3C6BB4BAA7A59D',
31: 'E745412FF53DEBD03F1C9A633', 32: '3592AC083F3175FA724639098',
33: '52284D941C3DCAF2721DDB1FD', 34: '73B3D8F0AD55DF4FE814ED890',
35: '94BF16C83BD7462F6498E0282', 36: 'A8C3DE1AC668089B0B45B3579',
37: 'E23FFC2DD2C14388AD8D6BEC8', 38: 'F2AC871CDF89DDC06B5960D2B',
39: '06191EC1F622A77A526868BA1', 40: '22D6E2A768E5F35FFC8E01796',
41: '25310A06675EB271F2A09EA1D', 42: '9F7993C621D4BEC81A0535703',
43: 'D62999EACF1C99083C0B4A417', 44: 'F665A7EA441BAA4EA0D01078C',
45: '46F3D3043F24CDEABD6F79543', 46: 'E2E3E8254616BD96CEFCA651A',
47: 'E548231A82F9A01A19DB5E1B2', 48: '265C7F90A16F49EDE2AA706C8',
49: '364A3A9EB0F0481DA0199D7EA', 50: '9810A7A898961263A0F749F56'
}
# parse the hex string, return a 100-bit secondary-code array
def secondary_seq(s):
x = np.zeros(100)
for i in range(100):
nib = i//4
bit = 3-(i%4)
x[i] = (int(s[nib],16)>>bit)&1
x[i] = 1.0 - 2.0*x[i]
return x
# transform the secondary-code table entries to sequences
for i in range(1,51):
secondary_code[i] = secondary_seq(secondary_code[i])
# initial-state table from Table 16 (page 15) of Galileo ICD SIS (2014)
# index is PRN
e5aq_init = {
1: 0o25652, 2: 0o05142, 3: 0o24723, 4: 0o31751,
5: 0o27366, 6: 0o24660, 7: 0o33655, 8: 0o27450,
9: 0o07626, 10: 0o01705, 11: 0o12717, 12: 0o32122,
13: 0o16075, 14: 0o16644, 15: 0o37556, 16: 0o02477,
17: 0o02265, 18: 0o06430, 19: 0o25046, 20: 0o12735,
21: 0o04262, 22: 0o11230, 23: 0o00037, 24: 0o06137,
25: 0o04312, 26: 0o20606, 27: 0o11162, 28: 0o22252,
29: 0o30533, 30: 0o24614, 31: 0o07767, 32: 0o32705,
33: 0o05052, 34: 0o27553, 35: 0o03711, 36: 0o02041,
37: 0o34775, 38: 0o05274, 39: 0o37356, 40: 0o16205,
41: 0o36270, 42: 0o06600, 43: 0o26773, 44: 0o17375,
45: 0o35267, 46: 0o36255, 47: 0o12044, 48: 0o26442,
49: 0o21621, 50: 0o25411
}
def e5aq_reg1_shift(x):
return [x[13]^x[7]^x[5]^x[0]] + x[0:13]
def e5aq_reg2_shift(x):
return [x[13]^x[11]^x[7]^x[6]^x[4]^x[3]] + x[0:13]
def make_e5aq_reg1():
x = [1,1,1,1,1,1,1,1,1,1,1,1,1,1]
y = np.zeros(code_length)
for i in range(code_length):
y[i] = x[13]
x = e5aq_reg1_shift(x)
return y
def make_e5aq_reg2(start):
x = start
y = np.zeros(code_length)
for i in range(code_length):
y[i] = x[13]
x = e5aq_reg2_shift(x)
return y
r1 = make_e5aq_reg1()
def seq(a):
s = []
for i in range(14):
s = s + [(a>>i)&1]
return s
codes = {}
def make_e5aq(prn):
start = seq(e5aq_init[prn])
r2 = make_e5aq_reg2(start)
return np.logical_xor(r1,r2)
def e5aq_code(prn):
if prn not in codes:
codes[prn] = make_e5aq(prn)
return codes[prn]
def code(prn,chips,frac,incr,n):
c = e5aq_code(prn)
idx = (chips%code_length) + frac + incr*np.arange(n)
idx = np.floor(idx).astype('int')
idx = np.mod(idx,code_length)
x = c[idx]
return 1.0 - 2.0*x
try:
from numba import jit
except:
def jit(**kwargs):
return lambda x: x
@jit(nopython=True)
def correlate(x,prn,chips,frac,incr,c):
n = len(x)
p = 0.0j
cp = (chips+frac)%code_length
for i in range(n):
p += x[i]*(1.0-2.0*c[int(cp)])
cp = (cp+incr)%code_length
return p
# test vectors in Galileo ICD
if __name__=='__main__':
for prn in [1,2,3,4]:
x = e5aq_code(prn)
print(x[0:24].astype('int'))
|
<filename>gnsstools/galileo/e5aq.py
# Galileo E5a-Q code construction
#
# Copyright 2014 <NAME>
import numpy as np
chip_rate = 10230000
code_length = 10230
# secondary code table from Table 19 (page 18) of Galileo ICD SIS (2014)
# index is PRN
secondary_code = {
1: '83F6F69D8F6E15411FB8C9B1C', 2: '66558BD3CE0C7792E83350525',
3: '59A025A9C1AF0651B779A8381', 4: 'D3A32640782F7B18E4DF754B7',
5: 'B91FCAD7760C218FA59348A93', 6: 'BAC77E933A779140F094FBF98',
7: '537785DE280927C6B58BA6776', 8: 'EFCAB4B65F38531ECA22257E2',
9: '79F8CAE838475EA5584BEFC9B', 10: 'CA5170FEA3A810EC606B66494',
11: '1FC32410652A2C49BD845E567', 12: 'FE0A9A7AFDAC44E42CB95D261',
13: 'B03062DC2B71995D5AD8B7DBE', 14: 'F6C398993F598E2DF4235D3D5',
15: '1BB2FB8B5BF24395C2EF3C5A1', 16: '2F920687D238CC7046EF6AFC9',
17: '34163886FC4ED7F2A92EFDBB8', 18: '66A872CE47833FB2DFD5625AD',
19: '99D5A70162C920A4BB9DE1CA8', 20: '81D71BD6E069A7ACCBEDC66CA',
21: 'A654524074A9E6780DB9D3EC6', 22: 'C3396A101BEDAF623CFC5BB37',
23: 'C3D4AB211DF36F2111F2141CD', 24: '3DFF25EAE761739265AF145C1',
25: '994909E0757D70CDE389102B5', 26: 'B938535522D119F40C25FDAEC',
27: 'C71AB549C0491537026B390B7', 28: '0CDB8C9E7B53F55F5B0A0597B',
29: '61C5FA252F1AF81144766494F', 30: '626027778FD3C6BB4BAA7A59D',
31: 'E745412FF53DEBD03F1C9A633', 32: '3592AC083F3175FA724639098',
33: '52284D941C3DCAF2721DDB1FD', 34: '73B3D8F0AD55DF4FE814ED890',
35: '94BF16C83BD7462F6498E0282', 36: 'A8C3DE1AC668089B0B45B3579',
37: 'E23FFC2DD2C14388AD8D6BEC8', 38: 'F2AC871CDF89DDC06B5960D2B',
39: '06191EC1F622A77A526868BA1', 40: '22D6E2A768E5F35FFC8E01796',
41: '25310A06675EB271F2A09EA1D', 42: '9F7993C621D4BEC81A0535703',
43: 'D62999EACF1C99083C0B4A417', 44: 'F665A7EA441BAA4EA0D01078C',
45: '46F3D3043F24CDEABD6F79543', 46: 'E2E3E8254616BD96CEFCA651A',
47: 'E548231A82F9A01A19DB5E1B2', 48: '265C7F90A16F49EDE2AA706C8',
49: '364A3A9EB0F0481DA0199D7EA', 50: '9810A7A898961263A0F749F56'
}
# parse the hex string, return a 100-bit secondary-code array
def secondary_seq(s):
x = np.zeros(100)
for i in range(100):
nib = i//4
bit = 3-(i%4)
x[i] = (int(s[nib],16)>>bit)&1
x[i] = 1.0 - 2.0*x[i]
return x
# transform the secondary-code table entries to sequences
for i in range(1,51):
secondary_code[i] = secondary_seq(secondary_code[i])
# initial-state table from Table 16 (page 15) of Galileo ICD SIS (2014)
# index is PRN
e5aq_init = {
1: 0o25652, 2: 0o05142, 3: 0o24723, 4: 0o31751,
5: 0o27366, 6: 0o24660, 7: 0o33655, 8: 0o27450,
9: 0o07626, 10: 0o01705, 11: 0o12717, 12: 0o32122,
13: 0o16075, 14: 0o16644, 15: 0o37556, 16: 0o02477,
17: 0o02265, 18: 0o06430, 19: 0o25046, 20: 0o12735,
21: 0o04262, 22: 0o11230, 23: 0o00037, 24: 0o06137,
25: 0o04312, 26: 0o20606, 27: 0o11162, 28: 0o22252,
29: 0o30533, 30: 0o24614, 31: 0o07767, 32: 0o32705,
33: 0o05052, 34: 0o27553, 35: 0o03711, 36: 0o02041,
37: 0o34775, 38: 0o05274, 39: 0o37356, 40: 0o16205,
41: 0o36270, 42: 0o06600, 43: 0o26773, 44: 0o17375,
45: 0o35267, 46: 0o36255, 47: 0o12044, 48: 0o26442,
49: 0o21621, 50: 0o25411
}
def e5aq_reg1_shift(x):
return [x[13]^x[7]^x[5]^x[0]] + x[0:13]
def e5aq_reg2_shift(x):
return [x[13]^x[11]^x[7]^x[6]^x[4]^x[3]] + x[0:13]
def make_e5aq_reg1():
x = [1,1,1,1,1,1,1,1,1,1,1,1,1,1]
y = np.zeros(code_length)
for i in range(code_length):
y[i] = x[13]
x = e5aq_reg1_shift(x)
return y
def make_e5aq_reg2(start):
x = start
y = np.zeros(code_length)
for i in range(code_length):
y[i] = x[13]
x = e5aq_reg2_shift(x)
return y
r1 = make_e5aq_reg1()
def seq(a):
s = []
for i in range(14):
s = s + [(a>>i)&1]
return s
codes = {}
def make_e5aq(prn):
start = seq(e5aq_init[prn])
r2 = make_e5aq_reg2(start)
return np.logical_xor(r1,r2)
def e5aq_code(prn):
if prn not in codes:
codes[prn] = make_e5aq(prn)
return codes[prn]
def code(prn,chips,frac,incr,n):
c = e5aq_code(prn)
idx = (chips%code_length) + frac + incr*np.arange(n)
idx = np.floor(idx).astype('int')
idx = np.mod(idx,code_length)
x = c[idx]
return 1.0 - 2.0*x
try:
from numba import jit
except:
def jit(**kwargs):
return lambda x: x
@jit(nopython=True)
def correlate(x,prn,chips,frac,incr,c):
n = len(x)
p = 0.0j
cp = (chips+frac)%code_length
for i in range(n):
p += x[i]*(1.0-2.0*c[int(cp)])
cp = (cp+incr)%code_length
return p
# test vectors in Galileo ICD
if __name__=='__main__':
for prn in [1,2,3,4]:
x = e5aq_code(prn)
print(x[0:24].astype('int'))
|
en
| 0.611792
|
# Galileo E5a-Q code construction # # Copyright 2014 <NAME> # secondary code table from Table 19 (page 18) of Galileo ICD SIS (2014) # index is PRN # parse the hex string, return a 100-bit secondary-code array # transform the secondary-code table entries to sequences # initial-state table from Table 16 (page 15) of Galileo ICD SIS (2014) # index is PRN # test vectors in Galileo ICD
| 1.924432
| 2
|
solutions/100_solution_05.py
|
UFResearchComputing/py4ai
| 0
|
6626970
|
sum = 0
data = [1,2,2,5]
cumulative = []
for number in data:
sum += number
cumulative.append(sum)
print(cumulative)
|
sum = 0
data = [1,2,2,5]
cumulative = []
for number in data:
sum += number
cumulative.append(sum)
print(cumulative)
|
none
| 1
| 3.61978
| 4
|
|
auxiliary/aux_m/test_problems_info.py
|
OpenSourceEconomics/ose-scientific-computing-course-wirecard
| 0
|
6626971
|
<filename>auxiliary/aux_m/test_problems_info.py<gh_stars>0
from numpy import *
import pandas as pd
import random
import nlopt
import numpy as np
import matplotlib.pyplot as plt
import numbers
import math
import random
import autograd.numpy as ag
from autograd import grad
from mpl_toolkits.mplot3d import Axes3D
from numpy.lib.function_base import vectorize
from autograd import value_and_grad
np.set_printoptions(precision=20)
pd.set_option("display.precision", 14)
#### this file contains classes which store general information about a test problem as for example optimum, domain, upper bound
#### lower bound, function value of optimum, name of test problem
class griewank_info: ##### This class stores the general information for a griewank function
def __init__(self,dim,a): ### arguments are the number of dimensions of the problem and the parameter a
domain=([-100]*dim,[100]*dim)
self.domain=domain ### returns the domain of the function
self.lower_bound=([-100]*dim) ### returns thw lower bound of the function
self.upper_bound=([100]*dim) ### returns the upper bound of the function
name= 'Griewank Function'
self.name=name
problem_solver=np.array([0]*dim)
self.solver=problem_solver ### returns the known solution to the problem
def function_value(x,a):
input = np.array(x)
sum = (1 / a) * np.dot(input,input)
prod = np.prod( np.cos( input / np.sqrt( np.arange(1,input.size + 1,1) ) ) )
out = sum - prod + 2
return out
self.solver_function_value=function_value(problem_solver,a) ### returns the function value of the known solution to the problem
class rastrigin_info: ##### This class stores the general information for a rastrigin function
def __init__(self,dim,A): ### arguments are the number of dimensions of the problem and the parameter a
domain=([-5.12]*dim,[5.12]*dim)
self.domain=domain ### returns the domain of the function
self.lower_bound=([-5.12]*dim) ### returns thw lower bound of the function
self.upper_bound=([5.12]*dim) ### returns the upper bound of the function
name= '<NAME>'
self.name=name
problem_solver=np.array([0]*dim)
self.solver=problem_solver ### returns the known solution to the problem
def function_value(x,A):
input = np.array(x)
n = input.size
out = A * n + np.sum(input * input - A * np.cos(2 * np.pi * input)) + 1
return out
self.solver_function_value=function_value(problem_solver,A) ### returns the function value of the known solution to the problem
class levi_info: ##### This class stores the general information for a levi function
def __init__(self,dim): ### arguments are the number of dimensions of the problem and the parameter a
domain=([-10]*dim,[10]*dim)
self.domain=domain ### returns the domain of the function
self.lower_bound=([-10]*dim) ### returns thw lower bound of the function
self.upper_bound=([10]*dim) ### returns the upper bound of the function
name= 'Levi'
self.name=name
problem_solver=np.array([1]*dim)
self.solver=problem_solver ### returns the known solution to the problem
def function_value(x):
x = np.asarray_chkfinite(x)
n = len(x)
z = 1 + (x - 1) / 4
return (np.sin( np.pi * z[0] )**2 + np.sum( (z[:-1] - 1)**2 * (1 + 10 * np.sin( np.pi * z[:-1] + 1 )**2 ))
+ (z[-1] - 1)**2 * (1 + np.sin( 2 * np.pi * z[-1] )**2 ))
self.solver_function_value=function_value(problem_solver) ### returns the function value of the known solution to the problem
class rosenbrock_info: ##### This class stores the general information for a rosenbrock function
def __init__(self,dim): ### arguments are the number of dimensions of the problem and the parameter a
domain=([-100]*dim,[100]*dim)
self.domain=domain ### returns the domain of the function
self.lower_bound=([-100]*dim) ### returns thw lower bound of the function
self.upper_bound=([100]*dim) ### returns the upper bound of the function
name= 'Rosenbrock'
self.name=name
problem_solver=np.array([1]*dim)
self.solver=problem_solver ### returns the known solution to the problem
def function_value(x):
input = np.array(x)
if input.size <= 1:
return 0
else:
return (np.sum(100 * (input[1:] - input[:-1] ** 2) ** 2 + (1 - input[:-1]) ** 2) + 1)
self.solver_function_value=function_value(problem_solver) ### returns the function value of the known solution to the problem
class bukin_6_info: ##### This class stores the general information for a bukin_no_6 function
def __init__(self,dim):
dim=2 ### arguments are the number of dimensions of the problem and the parameter a
domain=([-15,-5],[-3,3])
self.domain=domain ### returns the domain of the function
self.lower_bound=([-15,-3]) ### returns thw lower bound of the function
self.upper_bound=([-5,3]) ### returns the upper bound of the function
name= 'Bukin_6'
self.name=name
problem_solver=np.array([-10,1])
self.solver=problem_solver ### returns the known solution to the problem
def function_value(x):
return [100 * np.sqrt(np.abs(x[1] - 0.01 * x[0] ** 2)) + 0.01 * np.abs(x[0] + 10)]
self.solver_function_value=function_value(problem_solver) ### returns the function value of the known solution to the problem
class camel_3_info: ##### This class stores the general information for a three hump camel function
def __init__(self,dim):
dim=2 ### arguments are the number of dimensions of the problem and the parameter a
domain=([-5]*dim,[5]*dim)
self.domain=domain ### returns the domain of the function
self.lower_bound=([-5]*dim) ### returns thw lower bound of the function
self.upper_bound=([5]*dim) ### returns the upper bound of the function
name= 'camel_3'
self.name=name
problem_solver=np.array([0,0])
self.solver=problem_solver ### returns the known solution to the problem
def function_value(x):
return [2 * x[0] ** 2 - 1.05 * x[0] ** 4 + x[0] ** 6 / 6 + np.prod(x) + x[1] ** 2]
self.solver_function_value=function_value(problem_solver) ### returns the function value of the known solution to the problem
class easom_info: ##### This class stores the general information for a easom function
def __init__(self,dim):
dim=2 ### arguments are the number of dimensions of the problem and the parameter a
domain=([-100]*dim,[100]*dim)
self.domain=domain ### returns the domain of the function
self.lower_bound=([-100]*dim) ### returns thw lower bound of the function
self.upper_bound=([100]*dim) ### returns the upper bound of the function
name= 'Easom'
self.name=name
problem_solver=np.array([np.pi,np.pi])
self.solver=problem_solver ### returns the known solution to the problem
def function_value(x):
return [(-np.cos(x[0]) * np.cos(x[1]) * np.exp(-((x[0] - np.pi) ** 2 + (x[1] - np.pi) ** 2)))]
self.solver_function_value=function_value(problem_solver) ### returns the function value of the known solution to the problem
class mc_cormick_info: ##### This class stores the general information for a bukin_no_6 function
def __init__(self,dim):
dim=2 ### arguments are the number of dimensions of the problem and the parameter a
domain=([-1.5,4],[-3,4])
self.domain=domain ### returns the domain of the function
self.lower_bound=([-1.5,-3]) ### returns thw lower bound of the function
self.upper_bound=([4,4]) ### returns the upper bound of the function
name= 'Mc_Cormick'
self.name=name
problem_solver=np.array([-0.54719,-1.54719])
self.solver=problem_solver ### returns the known solution to the problem
def function_value(x):
return [np.sin(np.sum(x)) + (x[0] - x[1]) ** 2 - 1.5 * x[0] + 2.5 * x[1] + 1]
self.solver_function_value=function_value(problem_solver) ### returns the function value of the known solution to the problem
class markowitz_info: ##### This class stores the general information for markowitz function
def __init__(self,dim,B):
dim=3 ### arguments are the number of dimensions of the problem and the parameter a
domain=([0,B],[0,B],[0,B])
self.domain=domain ### returns the domain of the function
self.lower_bound=([0,0,0]) ### returns thw lower bound of the function
self.upper_bound=([B,B,B]) ### returns the upper bound of the function
name= 'Markowitz'
self.name=name
problem_solver=np.array([0.4411,0.3656,0.1933])
self.solver=problem_solver
self.solver_function_value=np.array([0.0052820694790865])
class ackley_info: ##### This class stores the general information for a ackley function
def __init__(self,dim): ### arguments are the number of dimensions of the problem and the parameter a
domain=([-32.768]*dim,[32.768]*dim)
self.domain=domain ### returns the domain of the function
self.lower_bound=([-32.768]*dim) ### returns thw lower bound of the function
self.upper_bound=([32.768]*dim) ### returns the upper bound of the function
name= '<NAME>'
self.name=name
problem_solver=np.array([0]*dim)
self.solver=problem_solver ### returns the known solution to the problem
def function_value(x):
a=20
b=0.2
c=2*np.pi
x = np.asarray_chkfinite(x)
n = len(x)
s1 = np.sum( x**2 )
s2 = np.sum( cos( c * x ))
return -a*np.exp( -b*np.sqrt( s1 / n )) - np.exp( s2 / n ) + a + np.exp(1)
self.solver_function_value=function_value(problem_solver) ### returns the function value of the known solution to the problem
class schwefel_info: ##### This class stores the general information for a schwefel function
def __init__(self,dim): ### arguments are the number of dimensions of the problem and the parameter a
domain=([-500]*dim,[500]*dim)
self.domain=domain ### returns the domain of the function
self.lower_bound=([-500]*dim) ### returns thw lower bound of the function
self.upper_bound=([500]*dim) ### returns the upper bound of the function
name= 'Schwefel Function'
self.name=name
problem_solver=np.array([420.968746]*dim)
self.solver=problem_solver ### returns the known solution to the problem
def function_value(x):
x = np.asarray_chkfinite(x)
n = len(x)
return 418.9829*n - np.sum( x * np.sin( np.sqrt( np.abs( x ))))
self.solver_function_value=function_value(problem_solver) ### returns the function value of the known solution to the problem
class zakharov_info: ##### This class stores the general information for a zakharov function
def __init__(self,dim): ### arguments are the number of dimensions of the problem and the parameter a
domain=([-50]*dim,[50]*dim)
self.domain=domain ### returns the domain of the function
self.lower_bound=([-50]*dim) ### returns thw lower bound of the function
self.upper_bound=([50]*dim) ### returns the upper bound of the function
name= '<NAME>'
self.name=name
problem_solver=np.array([0]*dim)
self.solver=problem_solver ### returns the known solution to the problem
def function_value(x):
x = np.asarray_chkfinite(x)
n = len(x)
j = np.arange( 1., n+1 )
s2 = np.sum( j * x ) / 2
return np.sum( x**2 ) + s2**2 + s2**4
self.solver_function_value=function_value(problem_solver) ### returns the function value of the known solution to the problem
|
<filename>auxiliary/aux_m/test_problems_info.py<gh_stars>0
from numpy import *
import pandas as pd
import random
import nlopt
import numpy as np
import matplotlib.pyplot as plt
import numbers
import math
import random
import autograd.numpy as ag
from autograd import grad
from mpl_toolkits.mplot3d import Axes3D
from numpy.lib.function_base import vectorize
from autograd import value_and_grad
np.set_printoptions(precision=20)
pd.set_option("display.precision", 14)
#### this file contains classes which store general information about a test problem as for example optimum, domain, upper bound
#### lower bound, function value of optimum, name of test problem
class griewank_info: ##### This class stores the general information for a griewank function
def __init__(self,dim,a): ### arguments are the number of dimensions of the problem and the parameter a
domain=([-100]*dim,[100]*dim)
self.domain=domain ### returns the domain of the function
self.lower_bound=([-100]*dim) ### returns thw lower bound of the function
self.upper_bound=([100]*dim) ### returns the upper bound of the function
name= 'Griewank Function'
self.name=name
problem_solver=np.array([0]*dim)
self.solver=problem_solver ### returns the known solution to the problem
def function_value(x,a):
input = np.array(x)
sum = (1 / a) * np.dot(input,input)
prod = np.prod( np.cos( input / np.sqrt( np.arange(1,input.size + 1,1) ) ) )
out = sum - prod + 2
return out
self.solver_function_value=function_value(problem_solver,a) ### returns the function value of the known solution to the problem
class rastrigin_info: ##### This class stores the general information for a rastrigin function
def __init__(self,dim,A): ### arguments are the number of dimensions of the problem and the parameter a
domain=([-5.12]*dim,[5.12]*dim)
self.domain=domain ### returns the domain of the function
self.lower_bound=([-5.12]*dim) ### returns thw lower bound of the function
self.upper_bound=([5.12]*dim) ### returns the upper bound of the function
name= '<NAME>'
self.name=name
problem_solver=np.array([0]*dim)
self.solver=problem_solver ### returns the known solution to the problem
def function_value(x,A):
input = np.array(x)
n = input.size
out = A * n + np.sum(input * input - A * np.cos(2 * np.pi * input)) + 1
return out
self.solver_function_value=function_value(problem_solver,A) ### returns the function value of the known solution to the problem
class levi_info: ##### This class stores the general information for a levi function
def __init__(self,dim): ### arguments are the number of dimensions of the problem and the parameter a
domain=([-10]*dim,[10]*dim)
self.domain=domain ### returns the domain of the function
self.lower_bound=([-10]*dim) ### returns thw lower bound of the function
self.upper_bound=([10]*dim) ### returns the upper bound of the function
name= 'Levi'
self.name=name
problem_solver=np.array([1]*dim)
self.solver=problem_solver ### returns the known solution to the problem
def function_value(x):
x = np.asarray_chkfinite(x)
n = len(x)
z = 1 + (x - 1) / 4
return (np.sin( np.pi * z[0] )**2 + np.sum( (z[:-1] - 1)**2 * (1 + 10 * np.sin( np.pi * z[:-1] + 1 )**2 ))
+ (z[-1] - 1)**2 * (1 + np.sin( 2 * np.pi * z[-1] )**2 ))
self.solver_function_value=function_value(problem_solver) ### returns the function value of the known solution to the problem
class rosenbrock_info: ##### This class stores the general information for a rosenbrock function
def __init__(self,dim): ### arguments are the number of dimensions of the problem and the parameter a
domain=([-100]*dim,[100]*dim)
self.domain=domain ### returns the domain of the function
self.lower_bound=([-100]*dim) ### returns thw lower bound of the function
self.upper_bound=([100]*dim) ### returns the upper bound of the function
name= 'Rosenbrock'
self.name=name
problem_solver=np.array([1]*dim)
self.solver=problem_solver ### returns the known solution to the problem
def function_value(x):
input = np.array(x)
if input.size <= 1:
return 0
else:
return (np.sum(100 * (input[1:] - input[:-1] ** 2) ** 2 + (1 - input[:-1]) ** 2) + 1)
self.solver_function_value=function_value(problem_solver) ### returns the function value of the known solution to the problem
class bukin_6_info: ##### This class stores the general information for a bukin_no_6 function
def __init__(self,dim):
dim=2 ### arguments are the number of dimensions of the problem and the parameter a
domain=([-15,-5],[-3,3])
self.domain=domain ### returns the domain of the function
self.lower_bound=([-15,-3]) ### returns thw lower bound of the function
self.upper_bound=([-5,3]) ### returns the upper bound of the function
name= 'Bukin_6'
self.name=name
problem_solver=np.array([-10,1])
self.solver=problem_solver ### returns the known solution to the problem
def function_value(x):
return [100 * np.sqrt(np.abs(x[1] - 0.01 * x[0] ** 2)) + 0.01 * np.abs(x[0] + 10)]
self.solver_function_value=function_value(problem_solver) ### returns the function value of the known solution to the problem
class camel_3_info: ##### This class stores the general information for a three hump camel function
def __init__(self,dim):
dim=2 ### arguments are the number of dimensions of the problem and the parameter a
domain=([-5]*dim,[5]*dim)
self.domain=domain ### returns the domain of the function
self.lower_bound=([-5]*dim) ### returns thw lower bound of the function
self.upper_bound=([5]*dim) ### returns the upper bound of the function
name= 'camel_3'
self.name=name
problem_solver=np.array([0,0])
self.solver=problem_solver ### returns the known solution to the problem
def function_value(x):
return [2 * x[0] ** 2 - 1.05 * x[0] ** 4 + x[0] ** 6 / 6 + np.prod(x) + x[1] ** 2]
self.solver_function_value=function_value(problem_solver) ### returns the function value of the known solution to the problem
class easom_info: ##### This class stores the general information for a easom function
def __init__(self,dim):
dim=2 ### arguments are the number of dimensions of the problem and the parameter a
domain=([-100]*dim,[100]*dim)
self.domain=domain ### returns the domain of the function
self.lower_bound=([-100]*dim) ### returns thw lower bound of the function
self.upper_bound=([100]*dim) ### returns the upper bound of the function
name= 'Easom'
self.name=name
problem_solver=np.array([np.pi,np.pi])
self.solver=problem_solver ### returns the known solution to the problem
def function_value(x):
return [(-np.cos(x[0]) * np.cos(x[1]) * np.exp(-((x[0] - np.pi) ** 2 + (x[1] - np.pi) ** 2)))]
self.solver_function_value=function_value(problem_solver) ### returns the function value of the known solution to the problem
class mc_cormick_info: ##### This class stores the general information for a bukin_no_6 function
def __init__(self,dim):
dim=2 ### arguments are the number of dimensions of the problem and the parameter a
domain=([-1.5,4],[-3,4])
self.domain=domain ### returns the domain of the function
self.lower_bound=([-1.5,-3]) ### returns thw lower bound of the function
self.upper_bound=([4,4]) ### returns the upper bound of the function
name= 'Mc_Cormick'
self.name=name
problem_solver=np.array([-0.54719,-1.54719])
self.solver=problem_solver ### returns the known solution to the problem
def function_value(x):
return [np.sin(np.sum(x)) + (x[0] - x[1]) ** 2 - 1.5 * x[0] + 2.5 * x[1] + 1]
self.solver_function_value=function_value(problem_solver) ### returns the function value of the known solution to the problem
class markowitz_info: ##### This class stores the general information for markowitz function
def __init__(self,dim,B):
dim=3 ### arguments are the number of dimensions of the problem and the parameter a
domain=([0,B],[0,B],[0,B])
self.domain=domain ### returns the domain of the function
self.lower_bound=([0,0,0]) ### returns thw lower bound of the function
self.upper_bound=([B,B,B]) ### returns the upper bound of the function
name= 'Markowitz'
self.name=name
problem_solver=np.array([0.4411,0.3656,0.1933])
self.solver=problem_solver
self.solver_function_value=np.array([0.0052820694790865])
class ackley_info: ##### This class stores the general information for a ackley function
def __init__(self,dim): ### arguments are the number of dimensions of the problem and the parameter a
domain=([-32.768]*dim,[32.768]*dim)
self.domain=domain ### returns the domain of the function
self.lower_bound=([-32.768]*dim) ### returns thw lower bound of the function
self.upper_bound=([32.768]*dim) ### returns the upper bound of the function
name= '<NAME>'
self.name=name
problem_solver=np.array([0]*dim)
self.solver=problem_solver ### returns the known solution to the problem
def function_value(x):
a=20
b=0.2
c=2*np.pi
x = np.asarray_chkfinite(x)
n = len(x)
s1 = np.sum( x**2 )
s2 = np.sum( cos( c * x ))
return -a*np.exp( -b*np.sqrt( s1 / n )) - np.exp( s2 / n ) + a + np.exp(1)
self.solver_function_value=function_value(problem_solver) ### returns the function value of the known solution to the problem
class schwefel_info: ##### This class stores the general information for a schwefel function
def __init__(self,dim): ### arguments are the number of dimensions of the problem and the parameter a
domain=([-500]*dim,[500]*dim)
self.domain=domain ### returns the domain of the function
self.lower_bound=([-500]*dim) ### returns thw lower bound of the function
self.upper_bound=([500]*dim) ### returns the upper bound of the function
name= 'Schwefel Function'
self.name=name
problem_solver=np.array([420.968746]*dim)
self.solver=problem_solver ### returns the known solution to the problem
def function_value(x):
x = np.asarray_chkfinite(x)
n = len(x)
return 418.9829*n - np.sum( x * np.sin( np.sqrt( np.abs( x ))))
self.solver_function_value=function_value(problem_solver) ### returns the function value of the known solution to the problem
class zakharov_info: ##### This class stores the general information for a zakharov function
def __init__(self,dim): ### arguments are the number of dimensions of the problem and the parameter a
domain=([-50]*dim,[50]*dim)
self.domain=domain ### returns the domain of the function
self.lower_bound=([-50]*dim) ### returns thw lower bound of the function
self.upper_bound=([50]*dim) ### returns the upper bound of the function
name= '<NAME>'
self.name=name
problem_solver=np.array([0]*dim)
self.solver=problem_solver ### returns the known solution to the problem
def function_value(x):
x = np.asarray_chkfinite(x)
n = len(x)
j = np.arange( 1., n+1 )
s2 = np.sum( j * x ) / 2
return np.sum( x**2 ) + s2**2 + s2**4
self.solver_function_value=function_value(problem_solver) ### returns the function value of the known solution to the problem
|
en
| 0.5524
|
#### this file contains classes which store general information about a test problem as for example optimum, domain, upper bound #### lower bound, function value of optimum, name of test problem ##### This class stores the general information for a griewank function ### arguments are the number of dimensions of the problem and the parameter a ### returns the domain of the function ### returns thw lower bound of the function ### returns the upper bound of the function ### returns the known solution to the problem ### returns the function value of the known solution to the problem ##### This class stores the general information for a rastrigin function ### arguments are the number of dimensions of the problem and the parameter a ### returns the domain of the function ### returns thw lower bound of the function ### returns the upper bound of the function ### returns the known solution to the problem ### returns the function value of the known solution to the problem ##### This class stores the general information for a levi function ### arguments are the number of dimensions of the problem and the parameter a ### returns the domain of the function ### returns thw lower bound of the function ### returns the upper bound of the function ### returns the known solution to the problem ### returns the function value of the known solution to the problem ##### This class stores the general information for a rosenbrock function ### arguments are the number of dimensions of the problem and the parameter a ### returns the domain of the function ### returns thw lower bound of the function ### returns the upper bound of the function ### returns the known solution to the problem ### returns the function value of the known solution to the problem ##### This class stores the general information for a bukin_no_6 function ### arguments are the number of dimensions of the problem and the parameter a ### returns the domain of the function ### returns thw lower bound of the function ### returns the upper bound of the function ### returns the known solution to the problem ### returns the function value of the known solution to the problem ##### This class stores the general information for a three hump camel function ### arguments are the number of dimensions of the problem and the parameter a ### returns the domain of the function ### returns thw lower bound of the function ### returns the upper bound of the function ### returns the known solution to the problem ### returns the function value of the known solution to the problem ##### This class stores the general information for a easom function ### arguments are the number of dimensions of the problem and the parameter a ### returns the domain of the function ### returns thw lower bound of the function ### returns the upper bound of the function ### returns the known solution to the problem ### returns the function value of the known solution to the problem ##### This class stores the general information for a bukin_no_6 function ### arguments are the number of dimensions of the problem and the parameter a ### returns the domain of the function ### returns thw lower bound of the function ### returns the upper bound of the function ### returns the known solution to the problem ### returns the function value of the known solution to the problem ##### This class stores the general information for markowitz function ### arguments are the number of dimensions of the problem and the parameter a ### returns the domain of the function ### returns thw lower bound of the function ### returns the upper bound of the function ##### This class stores the general information for a ackley function ### arguments are the number of dimensions of the problem and the parameter a ### returns the domain of the function ### returns thw lower bound of the function ### returns the upper bound of the function ### returns the known solution to the problem ### returns the function value of the known solution to the problem ##### This class stores the general information for a schwefel function ### arguments are the number of dimensions of the problem and the parameter a ### returns the domain of the function ### returns thw lower bound of the function ### returns the upper bound of the function ### returns the known solution to the problem ### returns the function value of the known solution to the problem ##### This class stores the general information for a zakharov function ### arguments are the number of dimensions of the problem and the parameter a ### returns the domain of the function ### returns thw lower bound of the function ### returns the upper bound of the function ### returns the known solution to the problem ### returns the function value of the known solution to the problem
| 2.863977
| 3
|
irekua_database/models/terms/synonyms.py
|
CONABIO-audio/irekua-database
| 0
|
6626972
|
<gh_stars>0
from django.db.models import JSONField
from django.db import models
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from irekua_database.utils import empty_JSON
from irekua_database.models import base
class Synonym(base.IrekuaModelBase):
source = models.ForeignKey(
'Term',
related_name='synonym_source',
on_delete=models.CASCADE,
db_column='source_id',
verbose_name=_('source'),
help_text=_('Reference to the source of synonym'),
blank=False)
target = models.ForeignKey(
'Term',
related_name='synonym_target',
on_delete=models.CASCADE,
db_column='target_id',
verbose_name=_('target'),
help_text=_('Reference to the target of the synonym'),
blank=False)
metadata = JSONField(
blank=True,
db_column='metadata',
default=empty_JSON,
verbose_name=_('metadata'),
help_text=_('Metadata associated to the synonym'),
null=True)
class Meta:
verbose_name = _('Synonym')
verbose_name_plural = _('Synonyms')
ordering = ['source']
def __str__(self):
msg = '%(source)s = %(target)s'
params = dict(
source=str(self.source),
target=str(self.target))
return msg % params
def clean(self):
if self.source.term_type != self.target.term_type:
msg = _('Source and target terms are not of the same type')
raise ValidationError({'target': msg})
try:
self.source.term_type.validate_synonym_metadata(self.metadata)
except ValidationError as error:
raise ValidationError({'metadata': error})
super(Synonym, self).clean()
|
from django.db.models import JSONField
from django.db import models
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from irekua_database.utils import empty_JSON
from irekua_database.models import base
class Synonym(base.IrekuaModelBase):
source = models.ForeignKey(
'Term',
related_name='synonym_source',
on_delete=models.CASCADE,
db_column='source_id',
verbose_name=_('source'),
help_text=_('Reference to the source of synonym'),
blank=False)
target = models.ForeignKey(
'Term',
related_name='synonym_target',
on_delete=models.CASCADE,
db_column='target_id',
verbose_name=_('target'),
help_text=_('Reference to the target of the synonym'),
blank=False)
metadata = JSONField(
blank=True,
db_column='metadata',
default=empty_JSON,
verbose_name=_('metadata'),
help_text=_('Metadata associated to the synonym'),
null=True)
class Meta:
verbose_name = _('Synonym')
verbose_name_plural = _('Synonyms')
ordering = ['source']
def __str__(self):
msg = '%(source)s = %(target)s'
params = dict(
source=str(self.source),
target=str(self.target))
return msg % params
def clean(self):
if self.source.term_type != self.target.term_type:
msg = _('Source and target terms are not of the same type')
raise ValidationError({'target': msg})
try:
self.source.term_type.validate_synonym_metadata(self.metadata)
except ValidationError as error:
raise ValidationError({'metadata': error})
super(Synonym, self).clean()
|
none
| 1
| 2.03576
| 2
|
|
SprityBird/spritybird/python3.5/lib/python3.5/site-packages/tables/tests/test_create.py
|
MobileAnalytics/iPython-Framework
| 4
|
6626973
|
<reponame>MobileAnalytics/iPython-Framework<gh_stars>1-10
# -*- coding: utf-8 -*-
"""This test unit checks object creation funtions, like open_file,
create_table, create_array or create_group.
It also checks:
- name identifiers in tree objects
- title character limit for objects (255)
- limit in number in table fields (255)
"""
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import hashlib
import tempfile
import warnings
import numpy
import tables
from tables import (
Group, Leaf, Table, Array, Filters,
StringAtom, Int16Atom, Int64Atom, Float32Atom, Float64Atom,
Col, StringCol, IntCol, Int16Col, FloatCol, Float32Col,
)
from tables.parameters import MAX_COLUMNS
from tables.hdf5extension import HAVE_DIRECT_DRIVER, HAVE_WINDOWS_DRIVER
from tables.utils import quantize
from tables.tests import common
from tables.tests.common import unittest, hdf5_version, blosc_version
from tables.tests.common import PyTablesTestCase as TestCase
from six.moves import range
class Record(tables.IsDescription):
var1 = StringCol(itemsize=4) # 4-character String
var2 = IntCol() # integer
var3 = Int16Col() # short integer
var4 = FloatCol() # double (double-precision)
var5 = Float32Col() # float (single-precision)
class CreateTestCase(common.TempFileMixin, TestCase):
title = "This is the table title"
expectedrows = 100
maxshort = 2 ** 15
maxint = 2147483648 # (2 ** 31)
compress = 0
def setUp(self):
super(CreateTestCase, self).setUp()
# Create an instance of HDF5 Table
self.root = self.h5file.root
# Create a table object
self.table = self.h5file.create_table(self.root, 'atable',
Record, "Table title")
# Create an array object
self.array = self.h5file.create_array(self.root, 'anarray',
[1], "Array title")
# Create a group object
self.group = self.h5file.create_group(self.root, 'agroup',
"Group title")
def test00_isClass(self):
"""Testing table creation."""
self.assertTrue(isinstance(self.table, Table))
self.assertTrue(isinstance(self.array, Array))
self.assertTrue(isinstance(self.array, Leaf))
self.assertTrue(isinstance(self.group, Group))
def test01_overwriteNode(self):
"""Checking protection against node overwriting."""
try:
self.array = self.h5file.create_array(self.root, 'anarray',
[1], "Array title")
except tables.NodeError:
if common.verbose:
(type, value, traceback) = sys.exc_info()
print("\nGreat!, the next NameError was catched!")
print(value)
else:
self.fail("expected a tables.NodeError")
def test02_syntaxname(self):
"""Checking syntax in object tree names."""
with self.assertWarns(tables.NaturalNameWarning):
self.array = self.h5file.create_array(self.root, ' array',
[1], "Array title")
# another name error
with self.assertWarns(tables.NaturalNameWarning):
self.array = self.h5file.create_array(self.root, '$array',
[1], "Array title")
# Finally, test a reserved word
with self.assertWarns(tables.NaturalNameWarning):
self.array = self.h5file.create_array(self.root, 'for',
[1], "Array title")
def test03a_titleAttr(self):
"""Checking the self.title attr in nodes."""
# Close the opened file to destroy the object tree
self._reopen()
# Now, test that self.title exists and is correct in all the nodes
self.assertEqual(self.h5file.root.agroup._v_title, "Group title")
self.assertEqual(self.h5file.root.atable.title, "Table title")
self.assertEqual(self.h5file.root.anarray.title, "Array title")
def test03b_titleLength(self):
"""Checking large title character length limit (1023)"""
titlelength = 1023
# Try to put a very long title on a group object
group = self.h5file.create_group(self.root, 'group', "t" * titlelength)
self.assertEqual(group._v_title, "t" * titlelength)
self.assertEqual(group._f_getattr('TITLE'), "t" * titlelength)
# Now, try with a table object
table = self.h5file.create_table(self.root, 'table',
Record, "t" * titlelength)
self.assertEqual(table.title, "t" * titlelength)
self.assertEqual(table.get_attr("TITLE"), "t" * titlelength)
# Finally, try with an Array object
arr = self.h5file.create_array(self.root, 'arr',
[1], "t" * titlelength)
self.assertEqual(arr.title, "t" * titlelength)
self.assertEqual(arr.get_attr("TITLE"), "t" * titlelength)
def test04_maxFields(self):
"""Checking a large number of fields in tables"""
# The number of fields for a table
varnumber = MAX_COLUMNS
varnames = []
for i in range(varnumber):
varnames.append('int%d' % i)
# Build a dictionary with the types as values and varnames as keys
recordDict = {}
i = 0
for varname in varnames:
recordDict[varname] = Col.from_type("int32", dflt=1, pos=i)
i += 1
# Append this entry to indicate the alignment!
recordDict['_v_align'] = "="
table = self.h5file.create_table(self.root, 'table',
recordDict, "MetaRecord instance")
row = table.row
listrows = []
# Write 10 records
for j in range(10):
rowlist = []
for i in range(len(table.colnames)):
row[varnames[i]] = i * j
rowlist.append(i * j)
row.append()
listrows.append(tuple(rowlist))
# write data on disk
table.flush()
# Read all the data as a list
listout = table.read().tolist()
# Compare the input rowlist and output row list. They should
# be equal.
if common.verbose:
print("Original row list:", listrows[-1])
print("Retrieved row list:", listout[-1])
self.assertEqual(listrows, listout)
# The next limitation has been released. A warning is still there, though
def test05_maxFieldsExceeded(self):
"""Checking an excess of the maximum number of fields in tables"""
# The number of fields for a table
varnumber = MAX_COLUMNS + 1
varnames = []
for i in range(varnumber):
varnames.append('int%d' % i)
# Build a dictionary with the types as values and varnames as keys
recordDict = {}
i = 0
for varname in varnames:
recordDict[varname] = Col.from_type("int32", dflt=1)
i += 1
# Now, create a table with this record object
# This way of creating node objects has been deprecated
# table = Table(recordDict, "MetaRecord instance")
# Attach the table to object tree
warnings.filterwarnings("error", category=tables.PerformanceWarning)
# Here, a tables.PerformanceWarning should be raised!
try:
self.h5file.create_table(self.root, 'table',
recordDict, "MetaRecord instance")
except tables.PerformanceWarning:
if common.verbose:
(type, value, traceback) = sys.exc_info()
print("\nGreat!, the next PerformanceWarning was catched!")
print(value)
else:
self.fail("expected an tables.PerformanceWarning")
# Reset the warning
warnings.filterwarnings("default", category=tables.PerformanceWarning)
# The next limitation has been released
def _test06_maxColumnNameLengthExceeded(self):
"""Checking an excess (256) of the maximum length in column names"""
# Build a dictionary with the types as values and varnames as keys
recordDict = {}
recordDict["a" * 255] = IntCol(dflt=1)
recordDict["b" * 256] = IntCol(dflt=1) # Should trigger a ValueError
# Now, create a table with this record object
# This way of creating node objects has been deprecated
table = Table(recordDict, "MetaRecord instance")
self.assertTrue(table is not None)
# Attach the table to object tree
# Here, ValueError should be raised!
with self.assertRaises(ValueError):
self.h5file.create_table(self.root, 'table',
recordDict, "MetaRecord instance")
def test06_noMaxColumnNameLength(self):
"""Checking unlimited length in column names"""
# Build a dictionary with the types as values and varnames as keys
recordDict = {}
recordDict["a" * 255] = IntCol(dflt=1, pos=0)
recordDict["b" * 1024] = IntCol(dflt=1, pos=1) # Should work well
# Attach the table to object tree
# Here, IndexError should be raised!
table = self.h5file.create_table(self.root, 'table',
recordDict, "MetaRecord instance")
self.assertEqual(table.colnames[0], "a" * 255)
self.assertEqual(table.colnames[1], "b" * 1024)
class Record2(tables.IsDescription):
var1 = StringCol(itemsize=4) # 4-character String
var2 = IntCol() # integer
var3 = Int16Col() # short integer
class FiltersTreeTestCase(common.TempFileMixin, TestCase):
title = "A title"
nrows = 10
def setUp(self):
super(FiltersTreeTestCase, self).setUp()
self.populateFile()
def populateFile(self):
group = self.h5file.root
# Create a tree with three levels of depth
for j in range(5):
# Create a table
table = self.h5file.create_table(group, 'table1', Record2,
title=self.title,
filters=None)
# Get the record object associated with the new table
d = table.row
# Fill the table
for i in range(self.nrows):
d['var1'] = '%04d' % (self.nrows - i)
d['var2'] = i
d['var3'] = i * 2
d.append() # This injects the Record values
# Flush the buffer for this table
table.flush()
# Create a couple of arrays in each group
var1List = [x['var1'] for x in table.iterrows()]
var3List = [x['var3'] for x in table.iterrows()]
self.h5file.create_array(group, 'array1', var1List, "col 1")
self.h5file.create_array(group, 'array2', var3List, "col 3")
# Create a couple of EArrays as well
ea1 = self.h5file.create_earray(group, 'earray1',
StringAtom(itemsize=4), (0,),
"col 1")
ea2 = self.h5file.create_earray(group, 'earray2',
Int16Atom(), (0,), "col 3")
# And fill them with some values
ea1.append(var1List)
ea2.append(var3List)
# Finally a couple of VLArrays too
vla1 = self.h5file.create_vlarray(group, 'vlarray1',
StringAtom(itemsize=4), "col 1")
vla2 = self.h5file.create_vlarray(group, 'vlarray2',
Int16Atom(), "col 3")
# And fill them with some values
vla1.append(var1List)
vla2.append(var3List)
# Create a new group (descendant of group)
if j == 1: # The second level
group2 = self.h5file.create_group(group, 'group' + str(j),
filters=self.gfilters)
elif j == 2: # third level
group2 = self.h5file.create_group(group, 'group' + str(j))
else: # The rest of levels
group2 = self.h5file.create_group(group, 'group' + str(j),
filters=self.filters)
# Iterate over this new group (group2)
group = group2
def test00_checkFilters(self):
"""Checking inheritance of filters on trees (open file version)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test00_checkFilters..." %
self.__class__.__name__)
# First level check
if common.verbose:
print("Test filter:", repr(self.filters))
print("Filters in file:", repr(self.h5file.filters))
if self.filters is None:
filters = Filters()
else:
filters = self.filters
self.assertEqual(repr(filters), repr(self.h5file.filters))
# The next nodes have to have the same filter properties as
# self.filters
nodelist = [
'/table1', '/group0/earray1', '/group0/vlarray1', '/group0',
]
for node in nodelist:
obj = self.h5file.get_node(node)
if isinstance(obj, Group):
self.assertEqual(repr(filters), repr(obj._v_filters))
else:
self.assertEqual(repr(filters), repr(obj.filters))
# Second and third level check
group1 = self.h5file.root.group0.group1
if self.gfilters is None:
if self.filters is None:
gfilters = Filters()
else:
gfilters = self.filters
else:
gfilters = self.gfilters
if common.verbose:
print("Test gfilter:", repr(gfilters))
print("Filters in file:", repr(group1._v_filters))
self.assertEqual(repr(gfilters), repr(group1._v_filters))
# The next nodes have to have the same filter properties as
# gfilters
nodelist = [
'/group0/group1', '/group0/group1/earray1',
'/group0/group1/vlarray1',
'/group0/group1/table1', '/group0/group1/group2/table1',
]
for node in nodelist:
obj = self.h5file.get_node(node)
if isinstance(obj, Group):
self.assertEqual(repr(gfilters), repr(obj._v_filters))
else:
self.assertEqual(repr(gfilters), repr(obj.filters))
# Fourth and fifth level check
if self.filters is None:
# If None, the filters are inherited!
if self.gfilters is None:
filters = Filters()
else:
filters = self.gfilters
else:
filters = self.filters
group3 = self.h5file.root.group0.group1.group2.group3
if common.verbose:
print("Test filter:", repr(filters))
print("Filters in file:", repr(group3._v_filters))
self.assertEqual(repr(filters), repr(group3._v_filters))
# The next nodes have to have the same filter properties as
# self.filter
nodelist = [
'/group0/group1/group2/group3',
'/group0/group1/group2/group3/earray1',
'/group0/group1/group2/group3/vlarray1',
'/group0/group1/group2/group3/table1',
'/group0/group1/group2/group3/group4',
]
for node in nodelist:
obj = self.h5file.get_node(node)
if isinstance(obj, Group):
self.assertEqual(repr(filters), repr(obj._v_filters))
else:
self.assertEqual(repr(filters), repr(obj.filters))
# Checking the special case for Arrays in which the compression
# should always be the empty Filter()
# The next nodes have to have the same filter properties as
# Filter()
nodelist = [
'/array1',
'/group0/array1',
'/group0/group1/array1',
'/group0/group1/group2/array1',
'/group0/group1/group2/group3/array1',
]
for node in nodelist:
obj = self.h5file.get_node(node)
self.assertEqual(repr(Filters()), repr(obj.filters))
def test01_checkFilters(self):
"""Checking inheritance of filters on trees (close file version)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test01_checkFilters..." %
self.__class__.__name__)
# Close the file
self._reopen()
# First level check
if self.filters is None:
filters = Filters()
else:
filters = self.filters
if common.verbose:
print("Test filter:", repr(filters))
print("Filters in file:", repr(self.h5file.filters))
self.assertEqual(repr(filters), repr(self.h5file.filters))
# The next nodes have to have the same filter properties as
# self.filters
nodelist = [
'/table1', '/group0/earray1', '/group0/vlarray1', '/group0',
]
for node in nodelist:
object_ = self.h5file.get_node(node)
if isinstance(object_, Group):
self.assertEqual(repr(filters), repr(object_._v_filters))
else:
self.assertEqual(repr(filters), repr(object_.filters))
# Second and third level check
group1 = self.h5file.root.group0.group1
if self.gfilters is None:
if self.filters is None:
gfilters = Filters()
else:
gfilters = self.filters
else:
gfilters = self.gfilters
if common.verbose:
print("Test filter:", repr(gfilters))
print("Filters in file:", repr(group1._v_filters))
self.assertEqual(repr(gfilters), repr(group1._v_filters))
# The next nodes have to have the same filter properties as
# gfilters
nodelist = [
'/group0/group1', '/group0/group1/earray1',
'/group0/group1/vlarray1',
'/group0/group1/table1', '/group0/group1/group2/table1',
]
for node in nodelist:
object_ = self.h5file.get_node(node)
if isinstance(object_, Group):
self.assertEqual(repr(gfilters), repr(object_._v_filters))
else:
self.assertEqual(repr(gfilters), repr(object_.filters))
# Fourth and fifth level check
if self.filters is None:
if self.gfilters is None:
filters = Filters()
else:
filters = self.gfilters
else:
filters = self.filters
group3 = self.h5file.root.group0.group1.group2.group3
if common.verbose:
print("Test filter:", repr(filters))
print("Filters in file:", repr(group3._v_filters))
repr(filters) == repr(group3._v_filters)
# The next nodes have to have the same filter properties as
# self.filters
nodelist = [
'/group0/group1/group2/group3',
'/group0/group1/group2/group3/earray1',
'/group0/group1/group2/group3/vlarray1',
'/group0/group1/group2/group3/table1',
'/group0/group1/group2/group3/group4',
]
for node in nodelist:
obj = self.h5file.get_node(node)
if isinstance(obj, Group):
self.assertEqual(repr(filters), repr(obj._v_filters))
else:
self.assertEqual(repr(filters), repr(obj.filters))
# Checking the special case for Arrays in which the compression
# should always be the empty Filter()
# The next nodes have to have the same filter properties as
# Filter()
nodelist = [
'/array1',
'/group0/array1',
'/group0/group1/array1',
'/group0/group1/group2/array1',
'/group0/group1/group2/group3/array1',
]
for node in nodelist:
obj = self.h5file.get_node(node)
self.assertEqual(repr(Filters()), repr(obj.filters))
class FiltersCase1(FiltersTreeTestCase):
filters = Filters()
gfilters = Filters(complevel=1)
open_kwargs = dict(filters=filters)
@unittest.skipIf(not common.bzip2_avail,
'BZIP2 compression library not available')
class FiltersCase2(FiltersTreeTestCase):
filters = Filters(complevel=1, complib="bzip2")
gfilters = Filters(complevel=1)
open_kwargs = dict(filters=filters)
@unittest.skipIf(not common.lzo_avail, 'LZO compression library not available')
class FiltersCase3(FiltersTreeTestCase):
filters = Filters(shuffle=True, complib="zlib")
gfilters = Filters(complevel=1, shuffle=False, complib="lzo")
open_kwargs = dict(filters=filters)
class FiltersCase4(FiltersTreeTestCase):
filters = Filters(shuffle=True)
gfilters = Filters(complevel=1, shuffle=False)
open_kwargs = dict(filters=filters)
class FiltersCase5(FiltersTreeTestCase):
filters = Filters(fletcher32=True)
gfilters = Filters(complevel=1, shuffle=False)
open_kwargs = dict(filters=filters)
class FiltersCase6(FiltersTreeTestCase):
filters = None
gfilters = Filters(complevel=1, shuffle=False)
open_kwargs = dict(filters=filters)
class FiltersCase7(FiltersTreeTestCase):
filters = Filters(complevel=1)
gfilters = None
open_kwargs = dict(filters=filters)
class FiltersCase8(FiltersTreeTestCase):
filters = None
gfilters = None
open_kwargs = dict(filters=filters)
@unittest.skipIf(not common.bzip2_avail,
'BZIP2 compression library not available')
class FiltersCase9(FiltersTreeTestCase):
filters = Filters(shuffle=True, complib="zlib")
gfilters = Filters(complevel=5, shuffle=True, complib="bzip2")
open_kwargs = dict(filters=filters)
@unittest.skipIf(not common.blosc_avail,
'BLOSC compression library not available')
class FiltersCase10(FiltersTreeTestCase):
filters = Filters(shuffle=False, complevel=1, complib="blosc")
gfilters = Filters(complevel=5, shuffle=True, complib="blosc")
open_kwargs = dict(filters=filters)
@unittest.skipIf(not common.blosc_avail,
'BLOSC compression library not available')
class FiltersCaseBloscBloscLZ(FiltersTreeTestCase):
filters = Filters(shuffle=False, complevel=1, complib="blosc:blosclz")
gfilters = Filters(complevel=5, shuffle=True, complib="blosc:blosclz")
open_kwargs = dict(filters=filters)
@unittest.skipIf(not common.blosc_avail,
'BLOSC compression library not available')
@unittest.skipIf('lz4' not in tables.blosc_compressor_list(), 'lz4 required')
class FiltersCaseBloscLZ4(FiltersTreeTestCase):
filters = Filters(shuffle=False, complevel=1, complib="blosc:lz4")
gfilters = Filters(complevel=5, shuffle=True, complib="blosc:lz4")
open_kwargs = dict(filters=filters)
@unittest.skipIf(not common.blosc_avail,
'BLOSC compression library not available')
@unittest.skipIf('lz4' not in tables.blosc_compressor_list(), 'lz4 required')
class FiltersCaseBloscLZ4HC(FiltersTreeTestCase):
filters = Filters(shuffle=False, complevel=1, complib="blosc:lz4hc")
gfilters = Filters(complevel=5, shuffle=True, complib="blosc:lz4hc")
open_kwargs = dict(filters=filters)
@unittest.skipIf(not common.blosc_avail,
'BLOSC compression library not available')
@unittest.skipIf('snappy' not in tables.blosc_compressor_list(),
'snappy required')
class FiltersCaseBloscSnappy(FiltersTreeTestCase):
filters = Filters(shuffle=False, complevel=1, complib="blosc:snappy")
gfilters = Filters(complevel=5, shuffle=True, complib="blosc:snappy")
open_kwargs = dict(filters=filters)
@unittest.skipIf(not common.blosc_avail,
'BLOSC compression library not available')
@unittest.skipIf('zlib' not in tables.blosc_compressor_list(), 'zlib required')
class FiltersCaseBloscZlib(FiltersTreeTestCase):
filters = Filters(shuffle=False, complevel=1, complib="blosc:zlib")
gfilters = Filters(complevel=5, shuffle=True, complib="blosc:zlib")
open_kwargs = dict(filters=filters)
@unittest.skipIf(not common.blosc_avail,
'BLOSC compression library not available')
@unittest.skipIf(blosc_version < common.min_blosc_bitshuffle_version,
'BLOSC >= %s required' % common.min_blosc_bitshuffle_version)
class FiltersCaseBloscBitShuffle(FiltersTreeTestCase):
filters = Filters(shuffle=False, complevel=1, complib="blosc:blosclz")
gfilters = Filters(complevel=5, shuffle=False, bitshuffle=True, complib="blosc:blosclz")
open_kwargs = dict(filters=filters)
print("version:", tables.which_lib_version("blosc")[1])
class CopyGroupTestCase(common.TempFileMixin, TestCase):
title = "A title"
nrows = 10
def setUp(self):
super(CopyGroupTestCase, self).setUp()
# Create a temporary file
self.h5fname2 = tempfile.mktemp(".h5")
# Create the destination
self.h5file2 = tables.open_file(self.h5fname2, "w")
self.populateFile()
def populateFile(self):
group = self.h5file.root
# Add some user attrs:
group._v_attrs.attr1 = "an string for root group"
group._v_attrs.attr2 = 124
# Create a tree
for j in range(5):
for i in range(2):
# Create a new group (brother of group)
group2 = self.h5file.create_group(group, 'bgroup' + str(i),
filters=None)
# Create a table
table = self.h5file.create_table(group2, 'table1', Record2,
title=self.title,
filters=None)
# Get the record object associated with the new table
d = table.row
# Fill the table
for i in range(self.nrows):
d['var1'] = '%04d' % (self.nrows - i)
d['var2'] = i
d['var3'] = i * 2
d.append() # This injects the Record values
# Flush the buffer for this table
table.flush()
# Add some user attrs:
table.attrs.attr1 = "an string"
table.attrs.attr2 = 234
# Create a couple of arrays in each group
var1List = [x['var1'] for x in table.iterrows()]
var3List = [x['var3'] for x in table.iterrows()]
self.h5file.create_array(group2, 'array1', var1List, "col 1")
self.h5file.create_array(group2, 'array2', var3List, "col 3")
# Create a couple of EArrays as well
ea1 = self.h5file.create_earray(group2, 'earray1',
StringAtom(itemsize=4), (0,),
"col 1")
ea2 = self.h5file.create_earray(group2, 'earray2',
Int16Atom(), (0,), "col 3")
# Add some user attrs:
ea1.attrs.attr1 = "an string for earray"
ea2.attrs.attr2 = 123
# And fill them with some values
ea1.append(var1List)
ea2.append(var3List)
# Create a new group (descendant of group)
group3 = self.h5file.create_group(group, 'group' + str(j),
filters=None)
# Iterate over this new group (group3)
group = group3
# Add some user attrs:
group._v_attrs.attr1 = "an string for group"
group._v_attrs.attr2 = 124
def tearDown(self):
# Close the file
if self.h5file2.isopen:
self.h5file2.close()
os.remove(self.h5fname2)
super(CopyGroupTestCase, self).tearDown()
def test00_nonRecursive(self):
"""Checking non-recursive copy of a Group"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test00_nonRecursive..." %
self.__class__.__name__)
# Copy a group non-recursively
srcgroup = self.h5file.root.group0.group1
#srcgroup._f_copy_children(self.h5file2.root, recursive=False,
# filters=self.filters)
self.h5file.copy_children(srcgroup, self.h5file2.root,
recursive=False, filters=self.filters)
if self.close:
# Close the destination file
self.h5file2.close()
# And open it again
self.h5file2 = tables.open_file(self.h5fname2, "r")
# Check that the copy has been done correctly
dstgroup = self.h5file2.root
nodelist1 = list(srcgroup._v_children.keys())
nodelist2 = list(dstgroup._v_children.keys())
# Sort the lists
nodelist1.sort()
nodelist2.sort()
if common.verbose:
print("The origin node list -->", nodelist1)
print("The copied node list -->", nodelist2)
self.assertEqual(srcgroup._v_nchildren, dstgroup._v_nchildren)
self.assertEqual(nodelist1, nodelist2)
def test01_nonRecursiveAttrs(self):
"""Checking non-recursive copy of a Group (attributes copied)"""
if common.verbose:
print('\n', '-=' * 30)
print(("Running %s.test01_nonRecursiveAttrs..." %
self.__class__.__name__))
# Copy a group non-recursively with attrs
srcgroup = self.h5file.root.group0.group1
srcgroup._f_copy_children(self.h5file2.root,
recursive=False,
filters=self.filters,
copyuserattrs=1)
if self.close:
# Close the destination file
self.h5file2.close()
# And open it again
self.h5file2 = tables.open_file(self.h5fname2, "r")
# Check that the copy has been done correctly
dstgroup = self.h5file2.root
for srcnode in srcgroup:
dstnode = getattr(dstgroup, srcnode._v_name)
if isinstance(srcnode, Group):
srcattrs = srcnode._v_attrs
srcattrskeys = srcattrs._f_list("all")
dstattrs = dstnode._v_attrs
dstattrskeys = dstattrs._f_list("all")
else:
srcattrs = srcnode.attrs
srcattrskeys = srcattrs._f_list("all")
dstattrs = dstnode.attrs
dstattrskeys = dstattrs._f_list("all")
# Filters may differ, do not take into account
if self.filters is not None:
dstattrskeys.remove('FILTERS')
# These lists should already be ordered
if common.verbose:
print("srcattrskeys for node %s: %s" % (srcnode._v_name,
srcattrskeys))
print("dstattrskeys for node %s: %s" % (dstnode._v_name,
dstattrskeys))
self.assertEqual(srcattrskeys, dstattrskeys)
if common.verbose:
print("The attrs names has been copied correctly")
# Now, for the contents of attributes
for srcattrname in srcattrskeys:
srcattrvalue = str(getattr(srcattrs, srcattrname))
dstattrvalue = str(getattr(dstattrs, srcattrname))
self.assertEqual(srcattrvalue, dstattrvalue)
if self.filters is not None:
self.assertEqual(dstattrs.FILTERS, self.filters)
if common.verbose:
print("The attrs contents has been copied correctly")
def test02_Recursive(self):
"""Checking recursive copy of a Group"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test02_Recursive..." % self.__class__.__name__)
# Create the destination node
group = self.h5file2.root
for groupname in self.dstnode.split("/"):
if groupname:
group = self.h5file2.create_group(group, groupname)
dstgroup = self.h5file2.get_node(self.dstnode)
# Copy a group non-recursively
srcgroup = self.h5file.get_node(self.srcnode)
self.h5file.copy_children(srcgroup, dstgroup,
recursive=True,
filters=self.filters)
lenSrcGroup = len(srcgroup._v_pathname)
if lenSrcGroup == 1:
lenSrcGroup = 0 # Case where srcgroup == "/"
if self.close:
# Close the destination file
self.h5file2.close()
# And open it again
self.h5file2 = tables.open_file(self.h5fname2, "r")
dstgroup = self.h5file2.get_node(self.dstnode)
# Check that the copy has been done correctly
lenDstGroup = len(dstgroup._v_pathname)
if lenDstGroup == 1:
lenDstGroup = 0 # Case where dstgroup == "/"
first = 1
nodelist1 = []
for node in srcgroup._f_walknodes():
if first:
# skip the first group
first = 0
continue
nodelist1.append(node._v_pathname[lenSrcGroup:])
first = 1
nodelist2 = []
for node in dstgroup._f_walknodes():
if first:
# skip the first group
first = 0
continue
nodelist2.append(node._v_pathname[lenDstGroup:])
if common.verbose:
print("The origin node list -->", nodelist1)
print("The copied node list -->", nodelist2)
self.assertEqual(nodelist1, nodelist2)
def test03_RecursiveFilters(self):
"""Checking recursive copy of a Group (cheking Filters)"""
if common.verbose:
print('\n', '-=' * 30)
print(("Running %s.test03_RecursiveFilters..." %
self.__class__.__name__))
# Create the destination node
group = self.h5file2.root
for groupname in self.dstnode.split("/"):
if groupname:
group = self.h5file2.create_group(group, groupname)
dstgroup = self.h5file2.get_node(self.dstnode)
# Copy a group non-recursively
srcgroup = self.h5file.get_node(self.srcnode)
srcgroup._f_copy_children(dstgroup,
recursive=True,
filters=self.filters)
lenSrcGroup = len(srcgroup._v_pathname)
if lenSrcGroup == 1:
lenSrcGroup = 0 # Case where srcgroup == "/"
if self.close:
# Close the destination file
self.h5file2.close()
# And open it again
self.h5file2 = tables.open_file(self.h5fname2, "r")
dstgroup = self.h5file2.get_node(self.dstnode)
# Check that the copy has been done correctly
lenDstGroup = len(dstgroup._v_pathname)
if lenDstGroup == 1:
lenDstGroup = 0 # Case where dstgroup == "/"
first = 1
nodelist1 = {}
for node in srcgroup._f_walknodes():
if first:
# skip the first group
first = 0
continue
nodelist1[node._v_name] = node._v_pathname[lenSrcGroup:]
first = 1
for node in dstgroup._f_walknodes():
if first:
# skip the first group
first = 0
continue
if isinstance(node, Group):
repr(node._v_filters) == repr(nodelist1[node._v_name])
else:
repr(node.filters) == repr(nodelist1[node._v_name])
class CopyGroupCase1(CopyGroupTestCase):
close = 0
filters = None
srcnode = '/group0/group1'
dstnode = '/'
class CopyGroupCase2(CopyGroupTestCase):
close = 1
filters = None
srcnode = '/group0/group1'
dstnode = '/'
class CopyGroupCase3(CopyGroupTestCase):
close = 0
filters = None
srcnode = '/group0'
dstnode = '/group2/group3'
class CopyGroupCase4(CopyGroupTestCase):
close = 1
filters = Filters(complevel=1)
srcnode = '/group0'
dstnode = '/group2/group3'
class CopyGroupCase5(CopyGroupTestCase):
close = 0
filters = Filters()
srcnode = '/'
dstnode = '/group2/group3'
class CopyGroupCase6(CopyGroupTestCase):
close = 1
filters = Filters(fletcher32=True)
srcnode = '/group0'
dstnode = '/group2/group3'
class CopyGroupCase7(CopyGroupTestCase):
close = 0
filters = Filters(complevel=1, shuffle=False)
srcnode = '/'
dstnode = '/'
@unittest.skipIf(not common.lzo_avail, 'LZO compression library not available')
class CopyGroupCase8(CopyGroupTestCase):
close = 1
filters = Filters(complevel=1, complib="lzo")
srcnode = '/'
dstnode = '/'
class CopyFileTestCase(common.TempFileMixin, TestCase):
title = "A title"
nrows = 10
def setUp(self):
super(CopyFileTestCase, self).setUp()
# Create a temporary file
self.h5fname2 = tempfile.mktemp(".h5")
# Create the source file
self.populateFile()
def populateFile(self):
group = self.h5file.root
# Add some user attrs:
group._v_attrs.attr1 = "an string for root group"
group._v_attrs.attr2 = 124
# Create a tree
for j in range(5):
for i in range(2):
# Create a new group (brother of group)
group2 = self.h5file.create_group(group, 'bgroup' + str(i),
filters=None)
# Create a table
table = self.h5file.create_table(group2, 'table1', Record2,
title=self.title,
filters=None)
# Get the record object associated with the new table
d = table.row
# Fill the table
for i in range(self.nrows):
d['var1'] = '%04d' % (self.nrows - i)
d['var2'] = i
d['var3'] = i * 2
d.append() # This injects the Record values
# Flush the buffer for this table
table.flush()
# Add some user attrs:
table.attrs.attr1 = "an string"
table.attrs.attr2 = 234
# Create a couple of arrays in each group
var1List = [x['var1'] for x in table.iterrows()]
var3List = [x['var3'] for x in table.iterrows()]
self.h5file.create_array(group2, 'array1', var1List, "col 1")
self.h5file.create_array(group2, 'array2', var3List, "col 3")
# Create a couple of EArrays as well
ea1 = self.h5file.create_earray(group2, 'earray1',
StringAtom(itemsize=4), (0,),
"col 1")
ea2 = self.h5file.create_earray(group2, 'earray2',
Int16Atom(), (0,),
"col 3")
# Add some user attrs:
ea1.attrs.attr1 = "an string for earray"
ea2.attrs.attr2 = 123
# And fill them with some values
ea1.append(var1List)
ea2.append(var3List)
# Create a new group (descendant of group)
group3 = self.h5file.create_group(group, 'group' + str(j),
filters=None)
# Iterate over this new group (group3)
group = group3
# Add some user attrs:
group._v_attrs.attr1 = "an string for group"
group._v_attrs.attr2 = 124
def tearDown(self):
# Close the file
if hasattr(self, 'h5file2') and self.h5file2.isopen:
self.h5file2.close()
if hasattr(self, 'h5fname2') and os.path.exists(self.h5fname2):
os.remove(self.h5fname2)
super(CopyFileTestCase, self).tearDown()
def test00_overwrite(self):
"""Checking copy of a File (overwriting file)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test00_overwrite..." % self.__class__.__name__)
# Create a temporary file
file2h = open(self.h5fname2, "w")
file2h.close()
# Copy the file to the destination
self.h5file.copy_file(self.h5fname2, title=self.title,
overwrite=1,
copyuserattrs=0,
filters=None)
# Close the original file, if needed
if self.close:
self._reopen()
# ...and open the destination file
self.h5file2 = tables.open_file(self.h5fname2, "r")
# Check that the copy has been done correctly
srcgroup = self.h5file.root
dstgroup = self.h5file2.root
nodelist1 = list(srcgroup._v_children.keys())
nodelist2 = list(dstgroup._v_children.keys())
# Sort the lists
nodelist1.sort()
nodelist2.sort()
if common.verbose:
print("The origin node list -->", nodelist1)
print("The copied node list -->", nodelist2)
self.assertEqual(srcgroup._v_nchildren, dstgroup._v_nchildren)
self.assertEqual(nodelist1, nodelist2)
self.assertEqual(self.h5file2.title, self.title)
def test00a_srcdstequal(self):
"""Checking copy of a File (srcfile == dstfile)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test00a_srcdstequal..." %
self.__class__.__name__)
# Copy the file to the destination
self.assertRaises(IOError, self.h5file.copy_file, self.h5file.filename)
def test00b_firstclass(self):
"""Checking copy of a File (first-class function)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test00b_firstclass..." % self.__class__.__name__)
# Close the temporary file
self.h5file.close()
# Copy the file to the destination
tables.copy_file(self.h5fname, self.h5fname2, title=self.title,
copyuserattrs=0, filters=None, overwrite=1)
# ...and open the source and destination file
self.h5file = tables.open_file(self.h5fname, "r")
self.h5file2 = tables.open_file(self.h5fname2, "r")
# Check that the copy has been done correctly
srcgroup = self.h5file.root
dstgroup = self.h5file2.root
nodelist1 = list(srcgroup._v_children.keys())
nodelist2 = list(dstgroup._v_children.keys())
# Sort the lists
nodelist1.sort()
nodelist2.sort()
if common.verbose:
print("The origin node list -->", nodelist1)
print("The copied node list -->", nodelist2)
self.assertEqual(srcgroup._v_nchildren, dstgroup._v_nchildren)
self.assertEqual(nodelist1, nodelist2)
self.assertEqual(self.h5file2.title, self.title)
def test01_copy(self):
"""Checking copy of a File (attributes not copied)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test01_copy..." % self.__class__.__name__)
# Copy the file to the destination
self.h5file.copy_file(self.h5fname2, title=self.title,
copyuserattrs=0,
filters=self.filters)
# Close the original file, if needed
if self.close:
self._reopen()
# ...and open the destination file
self.h5file2 = tables.open_file(self.h5fname2, "r")
# Check that the copy has been done correctly
srcgroup = self.h5file.root
dstgroup = self.h5file2.root
nodelist1 = list(srcgroup._v_children.keys())
nodelist2 = list(dstgroup._v_children.keys())
# Sort the lists
nodelist1.sort()
nodelist2.sort()
if common.verbose:
print("The origin node list -->", nodelist1)
print("The copied node list -->", nodelist2)
self.assertEqual(srcgroup._v_nchildren, dstgroup._v_nchildren)
self.assertEqual(nodelist1, nodelist2)
# print("_v_attrnames-->", self.h5file2.root._v_attrs._v_attrnames)
# print("--> <%s,%s>" % (self.h5file2.title, self.title))
self.assertEqual(self.h5file2.title, self.title)
# Check that user attributes has not been copied
for srcnode in srcgroup:
dstnode = getattr(dstgroup, srcnode._v_name)
srcattrs = srcnode._v_attrs
srcattrskeys = srcattrs._f_list("sys")
dstattrs = dstnode._v_attrs
dstattrskeys = dstattrs._f_list("all")
# Filters may differ, do not take into account
if self.filters is not None:
dstattrskeys.remove('FILTERS')
# These lists should already be ordered
if common.verbose:
print("srcattrskeys for node %s: %s" % (srcnode._v_name,
srcattrskeys))
print("dstattrskeys for node %s: %s" % (dstnode._v_name,
dstattrskeys))
self.assertEqual(srcattrskeys, dstattrskeys)
if common.verbose:
print("The attrs names has been copied correctly")
# Now, for the contents of attributes
for srcattrname in srcattrskeys:
srcattrvalue = str(getattr(srcattrs, srcattrname))
dstattrvalue = str(getattr(dstattrs, srcattrname))
self.assertEqual(srcattrvalue, dstattrvalue)
if self.filters is not None:
self.assertEqual(dstattrs.FILTERS, self.filters)
if common.verbose:
print("The attrs contents has been copied correctly")
def test02_Attrs(self):
"""Checking copy of a File (attributes copied)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test02_Attrs..." % self.__class__.__name__)
# Copy the file to the destination
self.h5file.copy_file(self.h5fname2, title=self.title,
copyuserattrs=1,
filters=self.filters)
# Close the original file, if needed
if self.close:
self._reopen()
# ...and open the destination file
self.h5file2 = tables.open_file(self.h5fname2, "r")
# Check that the copy has been done correctly
srcgroup = self.h5file.root
dstgroup = self.h5file2.root
for srcnode in srcgroup:
dstnode = getattr(dstgroup, srcnode._v_name)
srcattrs = srcnode._v_attrs
srcattrskeys = srcattrs._f_list("all")
dstattrs = dstnode._v_attrs
dstattrskeys = dstattrs._f_list("all")
# These lists should already be ordered
if common.verbose:
print("srcattrskeys for node %s: %s" % (srcnode._v_name,
srcattrskeys))
print("dstattrskeys for node %s: %s" % (dstnode._v_name,
dstattrskeys))
# Filters may differ, do not take into account
if self.filters is not None:
dstattrskeys.remove('FILTERS')
self.assertEqual(srcattrskeys, dstattrskeys)
if common.verbose:
print("The attrs names has been copied correctly")
# Now, for the contents of attributes
for srcattrname in srcattrskeys:
srcattrvalue = str(getattr(srcattrs, srcattrname))
dstattrvalue = str(getattr(dstattrs, srcattrname))
self.assertEqual(srcattrvalue, dstattrvalue)
if self.filters is not None:
self.assertEqual(dstattrs.FILTERS, self.filters)
if common.verbose:
print("The attrs contents has been copied correctly")
class CopyFileCase1(CopyFileTestCase):
close = 0
title = "A new title"
filters = None
class CopyFileCase2(CopyFileTestCase):
close = 1
title = "A new title"
filters = None
class CopyFileCase3(CopyFileTestCase):
close = 0
title = "A new title"
filters = Filters(complevel=1)
class CopyFileCase4(CopyFileTestCase):
close = 1
title = "A new title"
filters = Filters(complevel=1)
class CopyFileCase5(CopyFileTestCase):
close = 0
title = "A new title"
filters = Filters(fletcher32=True)
class CopyFileCase6(CopyFileTestCase):
close = 1
title = "A new title"
filters = Filters(fletcher32=True)
class CopyFileCase7(CopyFileTestCase):
close = 0
title = "A new title"
filters = Filters(complevel=1, complib="lzo")
class CopyFileCase8(CopyFileTestCase):
close = 1
title = "A new title"
filters = Filters(complevel=1, complib="lzo")
class CopyFileCase10(common.TempFileMixin, TestCase):
def test01_notoverwrite(self):
"""Checking copy of a File (checking not overwriting)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test01_notoverwrite..." %
self.__class__.__name__)
# Create two empty files:
self.h5fname2 = tempfile.mktemp(".h5")
self.h5file2 = tables.open_file(self.h5fname2, "w")
self.h5file2.close() # close the second one
try:
# Copy the first into the second
self.assertRaises(
IOError, self.h5file.copy_file, self.h5fname2, overwrite=False)
finally:
# Delete files
os.remove(self.h5fname2)
class GroupFiltersTestCase(common.TempFileMixin, TestCase):
filters = tables.Filters(complevel=4) # something non-default
def setUp(self):
super(GroupFiltersTestCase, self).setUp()
atom, shape = tables.IntAtom(), (1, 1)
create_group = self.h5file.create_group
create_carray = self.h5file.create_carray
create_group('/', 'implicit_no')
create_group('/implicit_no', 'implicit_no')
create_carray('/implicit_no/implicit_no', 'implicit_no',
atom=atom, shape=shape)
create_carray('/implicit_no/implicit_no', 'explicit_no',
atom=atom, shape=shape, filters=tables.Filters())
create_carray('/implicit_no/implicit_no', 'explicit_yes',
atom=atom, shape=shape, filters=self.filters)
create_group('/', 'explicit_yes', filters=self.filters)
create_group('/explicit_yes', 'implicit_yes')
create_carray('/explicit_yes/implicit_yes', 'implicit_yes',
atom=atom, shape=shape)
create_carray('/explicit_yes/implicit_yes', 'explicit_yes',
atom=atom, shape=shape, filters=self.filters)
create_carray('/explicit_yes/implicit_yes', 'explicit_no',
atom=atom, shape=shape, filters=tables.Filters())
def _check_filters(self, h5file, filters=None):
for node in h5file:
# Get node filters.
if hasattr(node, 'filters'):
node_filters = node.filters
else:
node_filters = node._v_filters
# Compare to given filters.
if filters is not None:
self.assertEqual(node_filters, filters)
return
# Guess filters to compare to by node name.
if node._v_name.endswith('_no'):
self.assertEqual(
node_filters, tables.Filters(),
"node ``%s`` should have no filters" % node._v_pathname)
elif node._v_name.endswith('_yes'):
self.assertEqual(
node_filters, self.filters,
"node ``%s`` should have filters" % node._v_pathname)
def test00_propagate(self):
"""Filters propagating to children."""
self._check_filters(self.h5file)
def _test_copyFile(self, filters=None):
copyfname = tempfile.mktemp(suffix='.h5')
try:
self.h5file.copy_file(copyfname, filters=filters)
try:
copyf = tables.open_file(copyfname)
self._check_filters(copyf, filters=filters)
finally:
copyf.close()
finally:
os.remove(copyfname)
def test01_copyFile(self):
"""Keeping filters when copying a file."""
self._test_copyFile()
def test02_copyFile_override(self):
"""Overriding filters when copying a file."""
self._test_copyFile(self.filters)
def _test_change(self, pathname, change_filters, new_filters):
group = self.h5file.get_node(pathname)
# Check expected current filters.
old_filters = tables.Filters()
if pathname.endswith('_yes'):
old_filters = self.filters
self.assertEqual(group._v_filters, old_filters)
# Change filters.
change_filters(group)
self.assertEqual(group._v_filters, new_filters)
# Get and check changed filters.
if self._reopen():
group = self.h5file.get_node(pathname)
self.assertEqual(group._v_filters, new_filters)
def test03_change(self):
"""Changing the filters of a group."""
def set_filters(group):
group._v_filters = self.filters
self._test_change('/', set_filters, self.filters)
def test04_delete(self):
"""Deleting the filters of a group."""
def del_filters(group):
del group._v_filters
self._test_change('/explicit_yes', del_filters, tables.Filters())
@unittest.skipIf(not common.blosc_avail, 'BLOSC not available')
class SetBloscMaxThreadsTestCase(common.TempFileMixin, TestCase):
filters = tables.Filters(complevel=4, complib="blosc")
def test00(self):
"""Checking set_blosc_max_threads()"""
nthreads_old = tables.set_blosc_max_threads(4)
if common.verbose:
print("Previous max threads:", nthreads_old)
print("Should be:", self.h5file.params['MAX_BLOSC_THREADS'])
self.assertEqual(nthreads_old, self.h5file.params['MAX_BLOSC_THREADS'])
self.h5file.create_carray('/', 'some_array',
atom=tables.Int32Atom(), shape=(3, 3),
filters = self.filters)
nthreads_old = tables.set_blosc_max_threads(1)
if common.verbose:
print("Previous max threads:", nthreads_old)
print("Should be:", 4)
self.assertEqual(nthreads_old, 4)
def test01(self):
"""Checking set_blosc_max_threads() (re-open)"""
nthreads_old = tables.set_blosc_max_threads(4)
self.h5file.create_carray('/', 'some_array',
atom=tables.Int32Atom(), shape=(3, 3),
filters = self.filters)
self._reopen()
nthreads_old = tables.set_blosc_max_threads(4)
if common.verbose:
print("Previous max threads:", nthreads_old)
print("Should be:", self.h5file.params['MAX_BLOSC_THREADS'])
self.assertEqual(nthreads_old, self.h5file.params['MAX_BLOSC_THREADS'])
class FilterTestCase(TestCase):
def test_filter_pack_type(self):
self.assertEqual(type(Filters()._pack()), numpy.int64)
@staticmethod
def _hexl(n):
if sys.version_info[0] > 2:
return hex(int(n))
else:
return hex(int(n)).rstrip('L')
def test_filter_pack_01(self):
filter_ = Filters()
self.assertEqual(self._hexl(filter_._pack()), '0x0')
def test_filter_pack_02(self):
filter_ = Filters(1, shuffle=False)
self.assertEqual(self._hexl(filter_._pack()), '0x101')
def test_filter_pack_03(self):
filter_ = Filters(9, 'zlib', shuffle=True, fletcher32=True)
self.assertEqual(self._hexl(filter_._pack()), '0x30109')
def test_filter_pack_04(self):
filter_ = Filters(1, shuffle=False, least_significant_digit=5)
self.assertEqual(self._hexl(filter_._pack()), '0x5040101')
def test_filter_unpack_01(self):
filter_ = Filters._unpack(numpy.int64(0x0))
self.assertFalse(filter_.shuffle)
self.assertFalse(filter_.fletcher32)
self.assertEqual(filter_.least_significant_digit, None)
self.assertEqual(filter_.complevel, 0)
self.assertEqual(filter_.complib, None)
def test_filter_unpack_02(self):
filter_ = Filters._unpack(numpy.int64(0x101))
self.assertFalse(filter_.shuffle)
self.assertFalse(filter_.fletcher32)
self.assertEqual(filter_.least_significant_digit, None)
self.assertEqual(filter_.complevel, 1)
self.assertEqual(filter_.complib, 'zlib')
def test_filter_unpack_03(self):
filter_ = Filters._unpack(numpy.int64(0x30109))
self.assertTrue(filter_.shuffle)
self.assertTrue(filter_.fletcher32)
self.assertEqual(filter_.least_significant_digit, None)
self.assertEqual(filter_.complevel, 9)
self.assertEqual(filter_.complib, 'zlib')
def test_filter_unpack_04(self):
filter_ = Filters._unpack(numpy.int64(0x5040101))
self.assertFalse(filter_.shuffle)
self.assertFalse(filter_.fletcher32)
self.assertEqual(filter_.least_significant_digit, 5)
self.assertEqual(filter_.complevel, 1)
self.assertEqual(filter_.complib, 'zlib')
class DefaultDriverTestCase(common.TempFileMixin, TestCase):
DRIVER = None
DRIVER_PARAMS = {}
open_kwargs = dict(driver=DRIVER, **DRIVER_PARAMS)
def setUp(self):
super(DefaultDriverTestCase, self).setUp()
# Create an HDF5 file and contents
root = self.h5file.root
self.h5file.set_node_attr(root, "testattr", 41)
self.h5file.create_array(root, "array", [1, 2], title="array")
self.h5file.create_table(root, "table", {"var1": tables.IntCol()},
title="table")
def assertIsFile(self):
self.assertTrue(os.path.isfile(self.h5fname))
def test_newFile(self):
self.assertTrue(isinstance(self.h5file, tables.File))
self.assertIsFile()
def test_readFile(self):
self.h5file.close()
self.h5file = None
self.assertIsFile()
# Open an existing HDF5 file
self.h5file = tables.open_file(self.h5fname, mode="r",
driver=self.DRIVER,
**self.DRIVER_PARAMS)
# check contents
root = self.h5file.root
self.assertEqual(self.h5file.get_node_attr(root, "testattr"), 41)
self.assertTrue(isinstance(root.array, tables.Array))
self.assertEqual(root.array._v_title, "array")
self.assertTrue(isinstance(root.table, tables.Table))
self.assertEqual(root.table._v_title, "table")
self.assertTrue("var1" in root.table.colnames)
self.assertEqual(root.table.cols.var1.dtype, tables.IntCol().dtype)
def test_openFileA(self):
self.h5file.close()
self.h5file = None
self.assertIsFile()
# Open an existing HDF5 file in append mode
self.h5file = tables.open_file(self.h5fname, mode="a",
driver=self.DRIVER,
**self.DRIVER_PARAMS)
# check contents
root = self.h5file.root
self.assertEqual(self.h5file.get_node_attr(root, "testattr"), 41)
self.assertTrue(isinstance(root.array, tables.Array))
self.assertEqual(root.array._v_title, "array")
self.assertTrue(isinstance(root.table, tables.Table))
self.assertEqual(root.table._v_title, "table")
self.assertTrue("var1" in root.table.colnames)
self.assertEqual(root.table.cols.var1.dtype, tables.IntCol().dtype)
# write new data
root = self.h5file.root
self.h5file.set_node_attr(root, "testattr2", 42)
self.h5file.create_array(root, "array2", [1, 2], title="array2")
self.h5file.create_table(root, "table2", {"var2": tables.FloatCol()},
title="table2")
# check contents
self._reopen(mode="a", driver=self.DRIVER, **self.DRIVER_PARAMS)
root = self.h5file.root
self.assertEqual(self.h5file.get_node_attr(root, "testattr"), 41)
self.assertEqual(self.h5file.get_node_attr(root, "testattr2"), 42)
self.assertTrue(isinstance(root.array, tables.Array))
self.assertEqual(root.array._v_title, "array")
self.assertTrue(isinstance(root.array2, tables.Array))
self.assertEqual(root.array2._v_title, "array2")
self.assertTrue(isinstance(root.table, tables.Table))
self.assertEqual(root.table._v_title, "table")
self.assertTrue("var1" in root.table.colnames)
self.assertEqual(root.table.cols.var1.dtype, tables.IntCol().dtype)
self.assertTrue(isinstance(root.table2, tables.Table))
self.assertEqual(root.table2._v_title, "table2")
self.assertTrue("var2" in root.table2.colnames)
self.assertEqual(root.table2.cols.var2.dtype, tables.FloatCol().dtype)
def test_openFileRW(self):
self.h5file.close()
self.h5file = None
self.assertIsFile()
# Open an existing HDF5 file in append mode
self.h5file = tables.open_file(self.h5fname, mode="r+",
driver=self.DRIVER,
**self.DRIVER_PARAMS)
# check contents
root = self.h5file.root
self.assertEqual(self.h5file.get_node_attr(root, "testattr"), 41)
self.assertTrue(isinstance(root.array, tables.Array))
self.assertEqual(root.array._v_title, "array")
self.assertTrue(isinstance(root.table, tables.Table))
self.assertEqual(root.table._v_title, "table")
self.assertTrue("var1" in root.table.colnames)
self.assertEqual(root.table.cols.var1.dtype, tables.IntCol().dtype)
# write new data
self.h5file.set_node_attr(root, "testattr2", 42)
self.h5file.create_array(root, "array2", [1, 2], title="array2")
self.h5file.create_table(root, "table2", {"var2": tables.FloatCol()},
title="table2")
# check contents
self._reopen(mode="r+", driver=self.DRIVER, **self.DRIVER_PARAMS)
root = self.h5file.root
self.assertEqual(self.h5file.get_node_attr(root, "testattr"), 41)
self.assertEqual(self.h5file.get_node_attr(root, "testattr2"), 42)
self.assertTrue(isinstance(root.array, tables.Array))
self.assertEqual(root.array._v_title, "array")
self.assertTrue(isinstance(root.array2, tables.Array))
self.assertEqual(root.array2._v_title, "array2")
self.assertTrue(isinstance(root.table, tables.Table))
self.assertEqual(root.table._v_title, "table")
self.assertTrue("var1" in root.table.colnames)
self.assertEqual(root.table.cols.var1.dtype, tables.IntCol().dtype)
self.assertTrue(isinstance(root.table2, tables.Table))
self.assertEqual(root.table2._v_title, "table2")
self.assertTrue("var2" in root.table2.colnames)
self.assertEqual(root.table2.cols.var2.dtype, tables.FloatCol().dtype)
@unittest.skipIf(hdf5_version < "1.8.9", "requires HDF5 >= 1.8,9")
class Sec2DriverTestCase(DefaultDriverTestCase):
DRIVER = "H5FD_SEC2"
open_kwargs = dict(driver=DRIVER, **DefaultDriverTestCase.DRIVER_PARAMS)
def test_get_file_image(self):
image = self.h5file.get_file_image()
self.assertTrue(len(image) > 0)
if sys.version_info[0] < 3:
self.assertEqual([ord(i) for i in image[
:4]], [137, 72, 68, 70])
else:
self.assertEqual([i for i in image[:4]], [137, 72, 68, 70])
@unittest.skipIf(hdf5_version < "1.8.9", "requires HDF5 >= 1.8,9")
class StdioDriverTestCase(DefaultDriverTestCase):
DRIVER = "H5FD_STDIO"
open_kwargs = dict(driver=DRIVER, **DefaultDriverTestCase.DRIVER_PARAMS)
def test_get_file_image(self):
image = self.h5file.get_file_image()
self.assertTrue(len(image) > 0)
if sys.version_info[0] < 3:
self.assertEqual([ord(i) for i in image[
:4]], [137, 72, 68, 70])
else:
self.assertEqual([i for i in image[:4]], [137, 72, 68, 70])
@unittest.skipIf(hdf5_version < "1.8.9", "requires HDF5 >= 1.8,9")
class CoreDriverTestCase(DefaultDriverTestCase):
DRIVER = "H5FD_CORE"
open_kwargs = dict(driver=DRIVER, **DefaultDriverTestCase.DRIVER_PARAMS)
def test_get_file_image(self):
image = self.h5file.get_file_image()
self.assertTrue(len(image) > 0)
if sys.version_info[0] < 3:
self.assertEqual([ord(i) for i in image[
:4]], [137, 72, 68, 70])
else:
self.assertEqual([i for i in image[:4]], [137, 72, 68, 70])
class CoreDriverNoBackingStoreTestCase(TestCase):
DRIVER = "H5FD_CORE"
def setUp(self):
super(CoreDriverNoBackingStoreTestCase, self).setUp()
self.h5fname = tempfile.mktemp(suffix=".h5")
self.h5file = None
def tearDown(self):
if self.h5file:
self.h5file.close()
elif self.h5fname in tables.file._open_files:
open_files = tables.file._open_files
for h5file in open_files.get_handlers_by_name(self.h5fname):
h5file.close()
self.h5file = None
if os.path.isfile(self.h5fname):
os.remove(self.h5fname)
super(CoreDriverNoBackingStoreTestCase, self).tearDown()
def test_newFile(self):
"""Ensure that nothing is written to file."""
self.assertFalse(os.path.isfile(self.h5fname))
self.h5file = tables.open_file(self.h5fname, mode="w",
driver=self.DRIVER,
driver_core_backing_store=False)
# Create an HDF5 file and contents
root = self.h5file.root
self.h5file.set_node_attr(root, "testattr", 41)
self.h5file.create_array(root, "array", [1, 2], title="array")
self.h5file.create_table(root, "table", {"var1": tables.IntCol()},
title="table")
self.h5file.close() # flush
self.assertFalse(os.path.isfile(self.h5fname))
def test_readNewFileW(self):
self.assertFalse(os.path.isfile(self.h5fname))
# Create an HDF5 file and contents
self.h5file = tables.open_file(self.h5fname, mode="w",
driver=self.DRIVER,
driver_core_backing_store=False)
root = self.h5file.root
self.h5file.set_node_attr(root, "testattr", 41)
self.h5file.create_array(root, "array", [1, 2], title="array")
self.h5file.create_table(root, "table", {"var1": tables.IntCol()},
title="table")
self.assertEqual(self.h5file.get_node_attr(root, "testattr"), 41)
self.assertTrue(isinstance(root.array, tables.Array))
self.assertEqual(root.array._v_title, "array")
self.assertTrue(isinstance(root.table, tables.Table))
self.assertEqual(root.table._v_title, "table")
self.assertTrue("var1" in root.table.colnames)
self.assertEqual(root.table.cols.var1.dtype, tables.IntCol().dtype)
self.h5file.close() # flush
self.assertFalse(os.path.isfile(self.h5fname))
def test_readNewFileA(self):
self.assertFalse(os.path.isfile(self.h5fname))
# Create an HDF5 file and contents
self.h5file = tables.open_file(self.h5fname, mode="a",
driver=self.DRIVER,
driver_core_backing_store=False)
root = self.h5file.root
self.h5file.set_node_attr(root, "testattr", 41)
self.h5file.create_array(root, "array", [1, 2], title="array")
self.h5file.create_table(root, "table", {"var1": tables.IntCol()},
title="table")
self.assertEqual(self.h5file.get_node_attr(root, "testattr"), 41)
self.assertTrue(isinstance(root.array, tables.Array))
self.assertEqual(root.array._v_title, "array")
self.assertTrue(isinstance(root.table, tables.Table))
self.assertEqual(root.table._v_title, "table")
self.assertTrue("var1" in root.table.colnames)
self.assertEqual(root.table.cols.var1.dtype, tables.IntCol().dtype)
self.h5file.close() # flush
self.assertFalse(os.path.isfile(self.h5fname))
def test_openNewFileRW(self):
self.assertFalse(os.path.isfile(self.h5fname))
self.assertRaises(tables.HDF5ExtError,
tables.open_file, self.h5fname, mode="r+",
driver=self.DRIVER, driver_core_backing_store=False)
def test_openNewFileR(self):
self.assertFalse(os.path.isfile(self.h5fname))
self.assertRaises(tables.HDF5ExtError,
tables.open_file, self.h5fname, mode="r",
driver=self.DRIVER, driver_core_backing_store=False)
def _create_file(self, filename):
h5file = tables.open_file(filename, mode="w")
root = h5file.root
h5file.set_node_attr(root, "testattr", 41)
h5file.create_array(root, "array", [1, 2], title="array")
h5file.create_table(root, "table", {"var1": tables.IntCol()},
title="table")
h5file.close()
def test_readFile(self):
self._create_file(self.h5fname)
self.assertTrue(os.path.isfile(self.h5fname))
# Open an existing HDF5 file
self.h5file = tables.open_file(self.h5fname, mode="r",
driver=self.DRIVER,
driver_core_backing_store=False)
root = self.h5file.root
self.assertEqual(self.h5file.get_node_attr(root, "testattr"), 41)
self.assertTrue(isinstance(root.array, tables.Array))
self.assertEqual(root.array._v_title, "array")
self.assertTrue(isinstance(root.table, tables.Table))
self.assertEqual(root.table._v_title, "table")
self.assertTrue("var1" in root.table.colnames)
self.assertEqual(root.table.cols.var1.dtype, tables.IntCol().dtype)
def _get_digest(self, filename):
md5 = hashlib.md5()
fd = open(filename, 'rb')
for data in fd:
md5.update(data)
fd.close()
hexdigest = md5.hexdigest()
return hexdigest
def test_openFileA(self):
self._create_file(self.h5fname)
self.assertTrue(os.path.isfile(self.h5fname))
# compute the file hash
hexdigest = self._get_digest(self.h5fname)
# Open an existing HDF5 file in append mode
self.h5file = tables.open_file(self.h5fname, mode="a",
driver=self.DRIVER,
driver_core_backing_store=False)
# check contents
root = self.h5file.root
self.assertEqual(self.h5file.get_node_attr(root, "testattr"), 41)
self.assertTrue(isinstance(root.array, tables.Array))
self.assertEqual(root.array._v_title, "array")
self.assertTrue(isinstance(root.table, tables.Table))
self.assertEqual(root.table._v_title, "table")
self.assertTrue("var1" in root.table.colnames)
self.assertEqual(root.table.cols.var1.dtype, tables.IntCol().dtype)
# write new data
root = self.h5file.root
self.h5file.set_node_attr(root, "testattr2", 42)
self.h5file.create_array(root, "array2", [1, 2], title="array2")
self.h5file.create_table(root, "table2", {"var2": tables.FloatCol()},
title="table2")
self.h5file.close()
# ensure that there is no change on the file on disk
self.assertEqual(hexdigest, self._get_digest(self.h5fname))
def test_openFileRW(self):
self._create_file(self.h5fname)
self.assertTrue(os.path.isfile(self.h5fname))
# compute the file hash
hexdigest = self._get_digest(self.h5fname)
# Open an existing HDF5 file in append mode
self.h5file = tables.open_file(self.h5fname, mode="r+",
driver=self.DRIVER,
driver_core_backing_store=False)
# check contents
root = self.h5file.root
self.assertEqual(self.h5file.get_node_attr(root, "testattr"), 41)
self.assertTrue(isinstance(root.array, tables.Array))
self.assertEqual(root.array._v_title, "array")
self.assertTrue(isinstance(root.table, tables.Table))
self.assertEqual(root.table._v_title, "table")
self.assertTrue("var1" in root.table.colnames)
self.assertEqual(root.table.cols.var1.dtype, tables.IntCol().dtype)
# write new data
root = self.h5file.root
self.h5file.set_node_attr(root, "testattr2", 42)
self.h5file.create_array(root, "array2", [1, 2], title="array2")
self.h5file.create_table(root, "table2", {"var2": tables.FloatCol()},
title="table2")
self.h5file.close()
# ensure that there is no change on the file on disk
self.assertEqual(hexdigest, self._get_digest(self.h5fname))
@unittest.skipIf(hdf5_version < "1.8.9", 'HDF5 >= "1.8.9" required')
def test_get_file_image(self):
self.h5file = tables.open_file(self.h5fname, mode="w",
driver=self.DRIVER,
driver_core_backing_store=False)
root = self.h5file.root
self.h5file.set_node_attr(root, "testattr", 41)
self.h5file.create_array(root, "array", [1, 2], title="array")
self.h5file.create_table(root, "table", {"var1": tables.IntCol()},
title="table")
image = self.h5file.get_file_image()
self.assertTrue(len(image) > 0)
if sys.version_info[0] < 3:
self.assertEqual([ord(i) for i in image[
:4]], [137, 72, 68, 70])
else:
self.assertEqual([i for i in image[:4]], [137, 72, 68, 70])
class SplitDriverTestCase(DefaultDriverTestCase):
DRIVER = "H5FD_SPLIT"
DRIVER_PARAMS = {
"driver_split_meta_ext": "-xm.h5",
"driver_split_raw_ext": "-xr.h5",
}
open_kwargs = dict(driver=DRIVER, **DRIVER_PARAMS)
def _getTempFileName(self):
return tempfile.mktemp(prefix=self._getName())
def setUp(self):
super(SplitDriverTestCase, self).setUp()
self.h5fnames = [self.h5fname + self.DRIVER_PARAMS[k] for k in
("driver_split_meta_ext", "driver_split_raw_ext")]
def tearDown(self):
self.h5file.close()
for fname in self.h5fnames:
if os.path.isfile(fname):
os.remove(fname)
#super(SplitDriverTestCase, self).tearDown()
TestCase.tearDown(self)
def assertIsFile(self):
for fname in self.h5fnames:
self.assertTrue(os.path.isfile(fname))
class NotSpportedDriverTestCase(TestCase):
DRIVER = None
DRIVER_PARAMS = {}
EXCEPTION = ValueError
def setUp(self):
super(NotSpportedDriverTestCase, self).setUp()
self.h5fname = tempfile.mktemp(suffix=".h5")
def tearDown(self):
open_files = tables.file._open_files
if self.h5fname in open_files:
for h5file in open_files.get_handlers_by_name(self.h5fname):
h5file.close()
if os.path.exists(self.h5fname):
os.remove(self.h5fname)
super(NotSpportedDriverTestCase, self).tearDown()
def test_newFile(self):
self.assertRaises(self.EXCEPTION, tables.open_file, self.h5fname,
mode="w", driver=self.DRIVER, **self.DRIVER_PARAMS)
self.assertFalse(os.path.isfile(self.h5fname))
if "H5FD_LOG" in tables.hdf5extension._supported_drivers:
BaseLogDriverTestCase = DefaultDriverTestCase
else:
BaseLogDriverTestCase = NotSpportedDriverTestCase
class LogDriverTestCase(BaseLogDriverTestCase):
DRIVER = "H5FD_LOG"
open_kwargs = dict(driver=DRIVER, **BaseLogDriverTestCase.DRIVER_PARAMS)
def setUp(self):
# local binding
self.DRIVER_PARAMS = {
"driver_log_file": tempfile.mktemp(suffix=".log")
}
super(LogDriverTestCase, self).setUp()
def tearDown(self):
if os.path.exists(self.DRIVER_PARAMS["driver_log_file"]):
os.remove(self.DRIVER_PARAMS["driver_log_file"])
super(LogDriverTestCase, self).tearDown()
if HAVE_DIRECT_DRIVER:
class DirectDriverTestCase(DefaultDriverTestCase):
DRIVER = "H5FD_DIRECT"
open_kwargs = dict(
driver=DRIVER, **DefaultDriverTestCase.DRIVER_PARAMS
)
else:
class DirectDriverTestCase(NotSpportedDriverTestCase):
DRIVER = "H5FD_DIRECT"
EXCEPTION = RuntimeError
if HAVE_WINDOWS_DRIVER:
class WindowsDriverTestCase(DefaultDriverTestCase):
DRIVER = "H5FD_WINDOWS"
open_kwargs = dict(
driver=DRIVER, **DefaultDriverTestCase.DRIVER_PARAMS
)
else:
class WindowsDriverTestCase(NotSpportedDriverTestCase):
DRIVER = "H5FD_WINDOWS"
EXCEPTION = RuntimeError
class FamilyDriverTestCase(NotSpportedDriverTestCase):
DRIVER = "H5FD_FAMILY"
class MultiDriverTestCase(NotSpportedDriverTestCase):
DRIVER = "H5FD_MULTI"
class MpioDriverTestCase(NotSpportedDriverTestCase):
DRIVER = "H5FD_MPIO"
class MpiPosixDriverTestCase(NotSpportedDriverTestCase):
DRIVER = "H5FD_MPIPOSIX"
class StreamDriverTestCase(NotSpportedDriverTestCase):
DRIVER = "H5FD_STREAM"
@unittest.skipIf(hdf5_version < "1.8.9", 'HDF5 >= "1.8.9" required')
class InMemoryCoreDriverTestCase(TestCase):
DRIVER = "H5FD_CORE"
def setUp(self):
super(InMemoryCoreDriverTestCase, self).setUp()
self.h5fname = tempfile.mktemp(".h5")
self.h5file = None
def tearDown(self):
if self.h5file:
self.h5file.close()
self.h5file = None
if os.path.isfile(self.h5fname):
os.remove(self.h5fname)
super(InMemoryCoreDriverTestCase, self).tearDown()
def _create_image(self, filename="in-memory", title="Title", mode='w'):
h5file = tables.open_file(filename, mode=mode, title=title,
driver=self.DRIVER,
driver_core_backing_store=0)
try:
h5file.create_array(h5file.root, 'array', [1, 2], title="Array")
h5file.create_table(h5file.root, 'table', {
'var1': IntCol()}, "Table")
h5file.root._v_attrs.testattr = 41
image = h5file.get_file_image()
finally:
h5file.close()
return image
def test_newFileW(self):
image = self._create_image(self.h5fname, mode='w')
self.assertTrue(len(image) > 0)
if sys.version_info[0] < 3:
self.assertEqual([ord(i) for i in image[:4]], [137, 72, 68, 70])
else:
self.assertEqual([i for i in image[:4]], [137, 72, 68, 70])
self.assertFalse(os.path.exists(self.h5fname))
def test_newFileA(self):
image = self._create_image(self.h5fname, mode='a')
self.assertTrue(len(image) > 0)
if sys.version_info[0] < 3:
self.assertEqual([ord(i) for i in image[:4]], [137, 72, 68, 70])
else:
self.assertEqual([i for i in image[:4]], [137, 72, 68, 70])
self.assertFalse(os.path.exists(self.h5fname))
def test_openFileR(self):
image = self._create_image(self.h5fname)
self.assertFalse(os.path.exists(self.h5fname))
# Open an existing file
self.h5file = tables.open_file(self.h5fname, mode="r",
driver=self.DRIVER,
driver_core_image=image,
driver_core_backing_store=0)
# Get the CLASS attribute of the arr object
self.assertTrue(hasattr(self.h5file.root._v_attrs, "TITLE"))
self.assertEqual(self.h5file.get_node_attr("/", "TITLE"), "Title")
self.assertTrue(hasattr(self.h5file.root._v_attrs, "testattr"))
self.assertEqual(self.h5file.get_node_attr("/", "testattr"), 41)
self.assertTrue(hasattr(self.h5file.root, "array"))
self.assertEqual(self.h5file.get_node_attr("/array", "TITLE"), "Array")
self.assertTrue(hasattr(self.h5file.root, "table"))
self.assertEqual(self.h5file.get_node_attr("/table", "TITLE"), "Table")
self.assertEqual(self.h5file.root.array.read(), [1, 2])
def test_openFileRW(self):
image = self._create_image(self.h5fname)
self.assertFalse(os.path.exists(self.h5fname))
# Open an existing file
self.h5file = tables.open_file(self.h5fname, mode="r+",
driver=self.DRIVER,
driver_core_image=image,
driver_core_backing_store=0)
# Get the CLASS attribute of the arr object
self.assertTrue(hasattr(self.h5file.root._v_attrs, "TITLE"))
self.assertEqual(self.h5file.get_node_attr("/", "TITLE"), "Title")
self.assertTrue(hasattr(self.h5file.root._v_attrs, "testattr"))
self.assertEqual(self.h5file.get_node_attr("/", "testattr"), 41)
self.assertTrue(hasattr(self.h5file.root, "array"))
self.assertEqual(self.h5file.get_node_attr("/array", "TITLE"), "Array")
self.assertTrue(hasattr(self.h5file.root, "table"))
self.assertEqual(self.h5file.get_node_attr("/table", "TITLE"), "Table")
self.assertEqual(self.h5file.root.array.read(), [1, 2])
self.h5file.create_array(self.h5file.root, 'array2', list(range(10000)),
title="Array2")
self.h5file.root._v_attrs.testattr2 = 42
self.h5file.close()
self.assertFalse(os.path.exists(self.h5fname))
def test_openFileRW_update(self):
filename = tempfile.mktemp(".h5")
image1 = self._create_image(filename)
self.assertFalse(os.path.exists(self.h5fname))
# Open an existing file
self.h5file = tables.open_file(self.h5fname, mode="r+",
driver=self.DRIVER,
driver_core_image=image1,
driver_core_backing_store=0)
# Get the CLASS attribute of the arr object
self.assertTrue(hasattr(self.h5file.root._v_attrs, "TITLE"))
self.assertEqual(self.h5file.get_node_attr("/", "TITLE"), "Title")
self.assertTrue(hasattr(self.h5file.root._v_attrs, "testattr"))
self.assertEqual(self.h5file.get_node_attr("/", "testattr"), 41)
self.assertTrue(hasattr(self.h5file.root, "array"))
self.assertEqual(self.h5file.get_node_attr("/array", "TITLE"), "Array")
self.assertTrue(hasattr(self.h5file.root, "table"))
self.assertEqual(self.h5file.get_node_attr("/table", "TITLE"), "Table")
self.assertEqual(self.h5file.root.array.read(), [1, 2])
data = list(range(2 * tables.parameters.DRIVER_CORE_INCREMENT))
self.h5file.create_array(self.h5file.root, 'array2', data,
title="Array2")
self.h5file.root._v_attrs.testattr2 = 42
image2 = self.h5file.get_file_image()
self.h5file.close()
self.assertFalse(os.path.exists(self.h5fname))
self.assertNotEqual(len(image1), len(image2))
self.assertNotEqual(image1, image2)
# Open an existing file
self.h5file = tables.open_file(self.h5fname, mode="r",
driver=self.DRIVER,
driver_core_image=image2,
driver_core_backing_store=0)
# Get the CLASS attribute of the arr object
self.assertTrue(hasattr(self.h5file.root._v_attrs, "TITLE"))
self.assertEqual(self.h5file.get_node_attr("/", "TITLE"), "Title")
self.assertTrue(hasattr(self.h5file.root._v_attrs, "testattr"))
self.assertEqual(self.h5file.get_node_attr("/", "testattr"), 41)
self.assertTrue(hasattr(self.h5file.root, "array"))
self.assertEqual(self.h5file.get_node_attr("/array", "TITLE"), "Array")
self.assertTrue(hasattr(self.h5file.root, "table"))
self.assertEqual(self.h5file.get_node_attr("/table", "TITLE"), "Table")
self.assertEqual(self.h5file.root.array.read(), [1, 2])
self.assertTrue(hasattr(self.h5file.root._v_attrs, "testattr2"))
self.assertEqual(self.h5file.get_node_attr("/", "testattr2"), 42)
self.assertTrue(hasattr(self.h5file.root, "array2"))
self.assertEqual(self.h5file.get_node_attr(
"/array2", "TITLE"), "Array2")
self.assertEqual(self.h5file.root.array2.read(), data)
self.h5file.close()
self.assertFalse(os.path.exists(self.h5fname))
def test_openFileA(self):
image = self._create_image(self.h5fname)
self.assertFalse(os.path.exists(self.h5fname))
# Open an existing file
self.h5file = tables.open_file(self.h5fname, mode="a",
driver=self.DRIVER,
driver_core_image=image,
driver_core_backing_store=0)
# Get the CLASS attribute of the arr object
self.assertTrue(hasattr(self.h5file.root._v_attrs, "TITLE"))
self.assertEqual(self.h5file.get_node_attr("/", "TITLE"), "Title")
self.assertTrue(hasattr(self.h5file.root._v_attrs, "testattr"))
self.assertEqual(self.h5file.get_node_attr("/", "testattr"), 41)
self.assertTrue(hasattr(self.h5file.root, "array"))
self.assertEqual(self.h5file.get_node_attr("/array", "TITLE"), "Array")
self.assertTrue(hasattr(self.h5file.root, "table"))
self.assertEqual(self.h5file.get_node_attr("/table", "TITLE"), "Table")
self.assertEqual(self.h5file.root.array.read(), [1, 2])
self.h5file.close()
self.assertFalse(os.path.exists(self.h5fname))
def test_openFileA_update(self):
h5fname = tempfile.mktemp(".h5")
image1 = self._create_image(h5fname)
self.assertFalse(os.path.exists(self.h5fname))
# Open an existing file
self.h5file = tables.open_file(self.h5fname, mode="a",
driver=self.DRIVER,
driver_core_image=image1,
driver_core_backing_store=0)
# Get the CLASS attribute of the arr object
self.assertTrue(hasattr(self.h5file.root._v_attrs, "TITLE"))
self.assertEqual(self.h5file.get_node_attr("/", "TITLE"), "Title")
self.assertTrue(hasattr(self.h5file.root._v_attrs, "testattr"))
self.assertEqual(self.h5file.get_node_attr("/", "testattr"), 41)
self.assertTrue(hasattr(self.h5file.root, "array"))
self.assertEqual(self.h5file.get_node_attr("/array", "TITLE"), "Array")
self.assertTrue(hasattr(self.h5file.root, "table"))
self.assertEqual(self.h5file.get_node_attr("/table", "TITLE"), "Table")
self.assertEqual(self.h5file.root.array.read(), [1, 2])
data = list(range(2 * tables.parameters.DRIVER_CORE_INCREMENT))
self.h5file.create_array(self.h5file.root, 'array2', data,
title="Array2")
self.h5file.root._v_attrs.testattr2 = 42
image2 = self.h5file.get_file_image()
self.h5file.close()
self.assertFalse(os.path.exists(self.h5fname))
self.assertNotEqual(len(image1), len(image2))
self.assertNotEqual(image1, image2)
# Open an existing file
self.h5file = tables.open_file(self.h5fname, mode="r",
driver=self.DRIVER,
driver_core_image=image2,
driver_core_backing_store=0)
# Get the CLASS attribute of the arr object
self.assertTrue(hasattr(self.h5file.root._v_attrs, "TITLE"))
self.assertEqual(self.h5file.get_node_attr("/", "TITLE"), "Title")
self.assertTrue(hasattr(self.h5file.root._v_attrs, "testattr"))
self.assertEqual(self.h5file.get_node_attr("/", "testattr"), 41)
self.assertTrue(hasattr(self.h5file.root, "array"))
self.assertEqual(self.h5file.get_node_attr("/array", "TITLE"), "Array")
self.assertTrue(hasattr(self.h5file.root, "table"))
self.assertEqual(self.h5file.get_node_attr("/table", "TITLE"), "Table")
self.assertEqual(self.h5file.root.array.read(), [1, 2])
self.assertTrue(hasattr(self.h5file.root._v_attrs, "testattr2"))
self.assertEqual(self.h5file.get_node_attr("/", "testattr2"), 42)
self.assertTrue(hasattr(self.h5file.root, "array2"))
self.assertEqual(self.h5file.get_node_attr(
"/array2", "TITLE"), "Array2")
self.assertEqual(self.h5file.root.array2.read(), data)
self.h5file.close()
self.assertFalse(os.path.exists(self.h5fname))
def test_str(self):
self.h5file = tables.open_file(self.h5fname, mode="w", title="Title",
driver=self.DRIVER,
driver_core_backing_store=0)
self.h5file.create_array(self.h5file.root, 'array', [1, 2],
title="Array")
self.h5file.create_table(self.h5file.root, 'table', {'var1': IntCol()},
"Table")
self.h5file.root._v_attrs.testattr = 41
# ensure that the __str__ method works even if there is no phisical
# file on disk (in which case the os.stat operation for date retrieval
# fails)
self.assertTrue(str(self.h5file) is not None)
self.h5file.close()
self.assertFalse(os.path.exists(self.h5fname))
class QuantizeTestCase(common.TempFileMixin, TestCase):
mode = "w"
title = "This is the table title"
expectedrows = 10
appendrows = 5
def setUp(self):
super(QuantizeTestCase, self).setUp()
self.data = numpy.linspace(-5., 5., 41)
self.randomdata = numpy.random.random_sample(1000000)
self.randomints = numpy.random.random_integers(
-1000000, 1000000, 1000000).astype('int64')
self.populateFile()
self.h5file.close()
self.quantizeddata_0 = numpy.asarray(
[-5.] * 2 + [-4.] * 5 + [-3.] * 3 + [-2.] * 5 + [-1.] * 3 +
[0.] * 5 + [1.] * 3 + [2.] * 5 + [3.] * 3 + [4.] * 5 + [5.] * 2)
self.quantizeddata_m1 = numpy.asarray(
[-8.] * 4 + [0.] * 33 + [8.] * 4)
def populateFile(self):
root = self.h5file.root
filters = Filters(complevel=1, complib="blosc",
least_significant_digit=1)
ints = self.h5file.create_carray(root, "integers", Int64Atom(),
(1000000,), filters=filters)
ints[:] = self.randomints
floats = self.h5file.create_carray(root, "floats", Float32Atom(),
(1000000,), filters=filters)
floats[:] = self.randomdata
data1 = self.h5file.create_carray(root, "data1", Float64Atom(),
(41,), filters=filters)
data1[:] = self.data
filters = Filters(complevel=1, complib="blosc",
least_significant_digit=0)
data0 = self.h5file.create_carray(root, "data0", Float64Atom(),
(41,), filters=filters)
data0[:] = self.data
filters = Filters(complevel=1, complib="blosc",
least_significant_digit=2)
data2 = self.h5file.create_carray(root, "data2", Float64Atom(),
(41,), filters=filters)
data2[:] = self.data
filters = Filters(complevel=1, complib="blosc",
least_significant_digit=-1)
datam1 = self.h5file.create_carray(root, "datam1", Float64Atom(),
(41,), filters=filters)
datam1[:] = self.data
def test00_quantizeData(self):
"""Checking the quantize() function."""
quantized_0 = quantize(self.data, 0)
quantized_1 = quantize(self.data, 1)
quantized_2 = quantize(self.data, 2)
quantized_m1 = quantize(self.data, -1)
numpy.testing.assert_array_equal(quantized_0, self.quantizeddata_0)
numpy.testing.assert_array_equal(quantized_1, self.data)
numpy.testing.assert_array_equal(quantized_2, self.data)
numpy.testing.assert_array_equal(quantized_m1, self.quantizeddata_m1)
def test01_quantizeDataMaxError(self):
"""Checking the maximum error introduced by the quantize() function."""
quantized_0 = quantize(self.randomdata, 0)
quantized_1 = quantize(self.randomdata, 1)
quantized_2 = quantize(self.randomdata, 2)
quantized_m1 = quantize(self.randomdata, -1)
# assertLess is new in Python 2.7
#self.assertLess(numpy.abs(quantized_0 - self.randomdata).max(), 0.5)
#self.assertLess(numpy.abs(quantized_1 - self.randomdata).max(), 0.05)
#self.assertLess(numpy.abs(quantized_2 - self.randomdata).max(), 0.005)
#self.assertLess(numpy.abs(quantized_m1 - self.randomdata).max(), 1.)
self.assertTrue(numpy.abs(quantized_0 - self.randomdata).max() < 0.5)
self.assertTrue(numpy.abs(quantized_1 - self.randomdata).max() < 0.05)
self.assertTrue(numpy.abs(quantized_2 - self.randomdata).max() < 0.005)
self.assertTrue(numpy.abs(quantized_m1 - self.randomdata).max() < 1.)
def test02_array(self):
"""Checking quantized data as written to disk."""
self.h5file = tables.open_file(self.h5fname, "r")
numpy.testing.assert_array_equal(self.h5file.root.data1[:], self.data)
numpy.testing.assert_array_equal(self.h5file.root.data2[:], self.data)
numpy.testing.assert_array_equal(self.h5file.root.data0[:],
self.quantizeddata_0)
numpy.testing.assert_array_equal(self.h5file.root.datam1[:],
self.quantizeddata_m1)
numpy.testing.assert_array_equal(self.h5file.root.integers[:],
self.randomints)
self.assertEqual(self.h5file.root.integers[:].dtype,
self.randomints.dtype)
# assertLess is new in Python 2.7
#self.assertLess(
# numpy.abs(self.h5file.root.floats[:] - self.randomdata).max(),
# 0.05
#)
self.assertTrue(
numpy.abs(self.h5file.root.floats[:] - self.randomdata).max() <
0.05
)
def suite():
import doctest
theSuite = unittest.TestSuite()
niter = 1
# common.heavy = 1 # Uncomment this only for testing purposes!
for i in range(niter):
theSuite.addTest(unittest.makeSuite(FiltersCase1))
theSuite.addTest(unittest.makeSuite(FiltersCase2))
theSuite.addTest(unittest.makeSuite(FiltersCase10))
theSuite.addTest(unittest.makeSuite(FiltersCaseBloscBloscLZ))
theSuite.addTest(unittest.makeSuite(FiltersCaseBloscLZ4))
theSuite.addTest(unittest.makeSuite(FiltersCaseBloscLZ4HC))
theSuite.addTest(unittest.makeSuite(FiltersCaseBloscSnappy))
theSuite.addTest(unittest.makeSuite(FiltersCaseBloscZlib))
theSuite.addTest(unittest.makeSuite(FiltersCaseBloscBitShuffle))
theSuite.addTest(unittest.makeSuite(CopyGroupCase1))
theSuite.addTest(unittest.makeSuite(CopyGroupCase2))
theSuite.addTest(unittest.makeSuite(CopyFileCase1))
theSuite.addTest(unittest.makeSuite(CopyFileCase2))
theSuite.addTest(unittest.makeSuite(GroupFiltersTestCase))
theSuite.addTest(unittest.makeSuite(SetBloscMaxThreadsTestCase))
theSuite.addTest(unittest.makeSuite(FilterTestCase))
theSuite.addTest(doctest.DocTestSuite(tables.filters))
theSuite.addTest(unittest.makeSuite(DefaultDriverTestCase))
theSuite.addTest(unittest.makeSuite(Sec2DriverTestCase))
theSuite.addTest(unittest.makeSuite(StdioDriverTestCase))
theSuite.addTest(unittest.makeSuite(CoreDriverTestCase))
theSuite.addTest(unittest.makeSuite(CoreDriverNoBackingStoreTestCase))
theSuite.addTest(unittest.makeSuite(SplitDriverTestCase))
theSuite.addTest(unittest.makeSuite(LogDriverTestCase))
theSuite.addTest(unittest.makeSuite(DirectDriverTestCase))
theSuite.addTest(unittest.makeSuite(WindowsDriverTestCase))
theSuite.addTest(unittest.makeSuite(FamilyDriverTestCase))
theSuite.addTest(unittest.makeSuite(MultiDriverTestCase))
theSuite.addTest(unittest.makeSuite(MpioDriverTestCase))
theSuite.addTest(unittest.makeSuite(MpiPosixDriverTestCase))
theSuite.addTest(unittest.makeSuite(StreamDriverTestCase))
theSuite.addTest(unittest.makeSuite(InMemoryCoreDriverTestCase))
theSuite.addTest(unittest.makeSuite(QuantizeTestCase))
if common.heavy:
theSuite.addTest(unittest.makeSuite(CreateTestCase))
theSuite.addTest(unittest.makeSuite(FiltersCase3))
theSuite.addTest(unittest.makeSuite(FiltersCase4))
theSuite.addTest(unittest.makeSuite(FiltersCase5))
theSuite.addTest(unittest.makeSuite(FiltersCase6))
theSuite.addTest(unittest.makeSuite(FiltersCase7))
theSuite.addTest(unittest.makeSuite(FiltersCase8))
theSuite.addTest(unittest.makeSuite(FiltersCase9))
theSuite.addTest(unittest.makeSuite(CopyFileCase3))
theSuite.addTest(unittest.makeSuite(CopyFileCase4))
theSuite.addTest(unittest.makeSuite(CopyFileCase5))
theSuite.addTest(unittest.makeSuite(CopyFileCase6))
theSuite.addTest(unittest.makeSuite(CopyFileCase7))
theSuite.addTest(unittest.makeSuite(CopyFileCase8))
theSuite.addTest(unittest.makeSuite(CopyFileCase10))
theSuite.addTest(unittest.makeSuite(CopyGroupCase3))
theSuite.addTest(unittest.makeSuite(CopyGroupCase4))
theSuite.addTest(unittest.makeSuite(CopyGroupCase5))
theSuite.addTest(unittest.makeSuite(CopyGroupCase6))
theSuite.addTest(unittest.makeSuite(CopyGroupCase7))
theSuite.addTest(unittest.makeSuite(CopyGroupCase8))
return theSuite
if __name__ == '__main__':
common.parse_argv(sys.argv)
common.print_versions()
unittest.main(defaultTest='suite')
|
# -*- coding: utf-8 -*-
"""This test unit checks object creation funtions, like open_file,
create_table, create_array or create_group.
It also checks:
- name identifiers in tree objects
- title character limit for objects (255)
- limit in number in table fields (255)
"""
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import hashlib
import tempfile
import warnings
import numpy
import tables
from tables import (
Group, Leaf, Table, Array, Filters,
StringAtom, Int16Atom, Int64Atom, Float32Atom, Float64Atom,
Col, StringCol, IntCol, Int16Col, FloatCol, Float32Col,
)
from tables.parameters import MAX_COLUMNS
from tables.hdf5extension import HAVE_DIRECT_DRIVER, HAVE_WINDOWS_DRIVER
from tables.utils import quantize
from tables.tests import common
from tables.tests.common import unittest, hdf5_version, blosc_version
from tables.tests.common import PyTablesTestCase as TestCase
from six.moves import range
class Record(tables.IsDescription):
var1 = StringCol(itemsize=4) # 4-character String
var2 = IntCol() # integer
var3 = Int16Col() # short integer
var4 = FloatCol() # double (double-precision)
var5 = Float32Col() # float (single-precision)
class CreateTestCase(common.TempFileMixin, TestCase):
title = "This is the table title"
expectedrows = 100
maxshort = 2 ** 15
maxint = 2147483648 # (2 ** 31)
compress = 0
def setUp(self):
super(CreateTestCase, self).setUp()
# Create an instance of HDF5 Table
self.root = self.h5file.root
# Create a table object
self.table = self.h5file.create_table(self.root, 'atable',
Record, "Table title")
# Create an array object
self.array = self.h5file.create_array(self.root, 'anarray',
[1], "Array title")
# Create a group object
self.group = self.h5file.create_group(self.root, 'agroup',
"Group title")
def test00_isClass(self):
"""Testing table creation."""
self.assertTrue(isinstance(self.table, Table))
self.assertTrue(isinstance(self.array, Array))
self.assertTrue(isinstance(self.array, Leaf))
self.assertTrue(isinstance(self.group, Group))
def test01_overwriteNode(self):
"""Checking protection against node overwriting."""
try:
self.array = self.h5file.create_array(self.root, 'anarray',
[1], "Array title")
except tables.NodeError:
if common.verbose:
(type, value, traceback) = sys.exc_info()
print("\nGreat!, the next NameError was catched!")
print(value)
else:
self.fail("expected a tables.NodeError")
def test02_syntaxname(self):
"""Checking syntax in object tree names."""
with self.assertWarns(tables.NaturalNameWarning):
self.array = self.h5file.create_array(self.root, ' array',
[1], "Array title")
# another name error
with self.assertWarns(tables.NaturalNameWarning):
self.array = self.h5file.create_array(self.root, '$array',
[1], "Array title")
# Finally, test a reserved word
with self.assertWarns(tables.NaturalNameWarning):
self.array = self.h5file.create_array(self.root, 'for',
[1], "Array title")
def test03a_titleAttr(self):
"""Checking the self.title attr in nodes."""
# Close the opened file to destroy the object tree
self._reopen()
# Now, test that self.title exists and is correct in all the nodes
self.assertEqual(self.h5file.root.agroup._v_title, "Group title")
self.assertEqual(self.h5file.root.atable.title, "Table title")
self.assertEqual(self.h5file.root.anarray.title, "Array title")
def test03b_titleLength(self):
"""Checking large title character length limit (1023)"""
titlelength = 1023
# Try to put a very long title on a group object
group = self.h5file.create_group(self.root, 'group', "t" * titlelength)
self.assertEqual(group._v_title, "t" * titlelength)
self.assertEqual(group._f_getattr('TITLE'), "t" * titlelength)
# Now, try with a table object
table = self.h5file.create_table(self.root, 'table',
Record, "t" * titlelength)
self.assertEqual(table.title, "t" * titlelength)
self.assertEqual(table.get_attr("TITLE"), "t" * titlelength)
# Finally, try with an Array object
arr = self.h5file.create_array(self.root, 'arr',
[1], "t" * titlelength)
self.assertEqual(arr.title, "t" * titlelength)
self.assertEqual(arr.get_attr("TITLE"), "t" * titlelength)
def test04_maxFields(self):
"""Checking a large number of fields in tables"""
# The number of fields for a table
varnumber = MAX_COLUMNS
varnames = []
for i in range(varnumber):
varnames.append('int%d' % i)
# Build a dictionary with the types as values and varnames as keys
recordDict = {}
i = 0
for varname in varnames:
recordDict[varname] = Col.from_type("int32", dflt=1, pos=i)
i += 1
# Append this entry to indicate the alignment!
recordDict['_v_align'] = "="
table = self.h5file.create_table(self.root, 'table',
recordDict, "MetaRecord instance")
row = table.row
listrows = []
# Write 10 records
for j in range(10):
rowlist = []
for i in range(len(table.colnames)):
row[varnames[i]] = i * j
rowlist.append(i * j)
row.append()
listrows.append(tuple(rowlist))
# write data on disk
table.flush()
# Read all the data as a list
listout = table.read().tolist()
# Compare the input rowlist and output row list. They should
# be equal.
if common.verbose:
print("Original row list:", listrows[-1])
print("Retrieved row list:", listout[-1])
self.assertEqual(listrows, listout)
# The next limitation has been released. A warning is still there, though
def test05_maxFieldsExceeded(self):
"""Checking an excess of the maximum number of fields in tables"""
# The number of fields for a table
varnumber = MAX_COLUMNS + 1
varnames = []
for i in range(varnumber):
varnames.append('int%d' % i)
# Build a dictionary with the types as values and varnames as keys
recordDict = {}
i = 0
for varname in varnames:
recordDict[varname] = Col.from_type("int32", dflt=1)
i += 1
# Now, create a table with this record object
# This way of creating node objects has been deprecated
# table = Table(recordDict, "MetaRecord instance")
# Attach the table to object tree
warnings.filterwarnings("error", category=tables.PerformanceWarning)
# Here, a tables.PerformanceWarning should be raised!
try:
self.h5file.create_table(self.root, 'table',
recordDict, "MetaRecord instance")
except tables.PerformanceWarning:
if common.verbose:
(type, value, traceback) = sys.exc_info()
print("\nGreat!, the next PerformanceWarning was catched!")
print(value)
else:
self.fail("expected an tables.PerformanceWarning")
# Reset the warning
warnings.filterwarnings("default", category=tables.PerformanceWarning)
# The next limitation has been released
def _test06_maxColumnNameLengthExceeded(self):
"""Checking an excess (256) of the maximum length in column names"""
# Build a dictionary with the types as values and varnames as keys
recordDict = {}
recordDict["a" * 255] = IntCol(dflt=1)
recordDict["b" * 256] = IntCol(dflt=1) # Should trigger a ValueError
# Now, create a table with this record object
# This way of creating node objects has been deprecated
table = Table(recordDict, "MetaRecord instance")
self.assertTrue(table is not None)
# Attach the table to object tree
# Here, ValueError should be raised!
with self.assertRaises(ValueError):
self.h5file.create_table(self.root, 'table',
recordDict, "MetaRecord instance")
def test06_noMaxColumnNameLength(self):
"""Checking unlimited length in column names"""
# Build a dictionary with the types as values and varnames as keys
recordDict = {}
recordDict["a" * 255] = IntCol(dflt=1, pos=0)
recordDict["b" * 1024] = IntCol(dflt=1, pos=1) # Should work well
# Attach the table to object tree
# Here, IndexError should be raised!
table = self.h5file.create_table(self.root, 'table',
recordDict, "MetaRecord instance")
self.assertEqual(table.colnames[0], "a" * 255)
self.assertEqual(table.colnames[1], "b" * 1024)
class Record2(tables.IsDescription):
var1 = StringCol(itemsize=4) # 4-character String
var2 = IntCol() # integer
var3 = Int16Col() # short integer
class FiltersTreeTestCase(common.TempFileMixin, TestCase):
title = "A title"
nrows = 10
def setUp(self):
super(FiltersTreeTestCase, self).setUp()
self.populateFile()
def populateFile(self):
group = self.h5file.root
# Create a tree with three levels of depth
for j in range(5):
# Create a table
table = self.h5file.create_table(group, 'table1', Record2,
title=self.title,
filters=None)
# Get the record object associated with the new table
d = table.row
# Fill the table
for i in range(self.nrows):
d['var1'] = '%04d' % (self.nrows - i)
d['var2'] = i
d['var3'] = i * 2
d.append() # This injects the Record values
# Flush the buffer for this table
table.flush()
# Create a couple of arrays in each group
var1List = [x['var1'] for x in table.iterrows()]
var3List = [x['var3'] for x in table.iterrows()]
self.h5file.create_array(group, 'array1', var1List, "col 1")
self.h5file.create_array(group, 'array2', var3List, "col 3")
# Create a couple of EArrays as well
ea1 = self.h5file.create_earray(group, 'earray1',
StringAtom(itemsize=4), (0,),
"col 1")
ea2 = self.h5file.create_earray(group, 'earray2',
Int16Atom(), (0,), "col 3")
# And fill them with some values
ea1.append(var1List)
ea2.append(var3List)
# Finally a couple of VLArrays too
vla1 = self.h5file.create_vlarray(group, 'vlarray1',
StringAtom(itemsize=4), "col 1")
vla2 = self.h5file.create_vlarray(group, 'vlarray2',
Int16Atom(), "col 3")
# And fill them with some values
vla1.append(var1List)
vla2.append(var3List)
# Create a new group (descendant of group)
if j == 1: # The second level
group2 = self.h5file.create_group(group, 'group' + str(j),
filters=self.gfilters)
elif j == 2: # third level
group2 = self.h5file.create_group(group, 'group' + str(j))
else: # The rest of levels
group2 = self.h5file.create_group(group, 'group' + str(j),
filters=self.filters)
# Iterate over this new group (group2)
group = group2
def test00_checkFilters(self):
"""Checking inheritance of filters on trees (open file version)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test00_checkFilters..." %
self.__class__.__name__)
# First level check
if common.verbose:
print("Test filter:", repr(self.filters))
print("Filters in file:", repr(self.h5file.filters))
if self.filters is None:
filters = Filters()
else:
filters = self.filters
self.assertEqual(repr(filters), repr(self.h5file.filters))
# The next nodes have to have the same filter properties as
# self.filters
nodelist = [
'/table1', '/group0/earray1', '/group0/vlarray1', '/group0',
]
for node in nodelist:
obj = self.h5file.get_node(node)
if isinstance(obj, Group):
self.assertEqual(repr(filters), repr(obj._v_filters))
else:
self.assertEqual(repr(filters), repr(obj.filters))
# Second and third level check
group1 = self.h5file.root.group0.group1
if self.gfilters is None:
if self.filters is None:
gfilters = Filters()
else:
gfilters = self.filters
else:
gfilters = self.gfilters
if common.verbose:
print("Test gfilter:", repr(gfilters))
print("Filters in file:", repr(group1._v_filters))
self.assertEqual(repr(gfilters), repr(group1._v_filters))
# The next nodes have to have the same filter properties as
# gfilters
nodelist = [
'/group0/group1', '/group0/group1/earray1',
'/group0/group1/vlarray1',
'/group0/group1/table1', '/group0/group1/group2/table1',
]
for node in nodelist:
obj = self.h5file.get_node(node)
if isinstance(obj, Group):
self.assertEqual(repr(gfilters), repr(obj._v_filters))
else:
self.assertEqual(repr(gfilters), repr(obj.filters))
# Fourth and fifth level check
if self.filters is None:
# If None, the filters are inherited!
if self.gfilters is None:
filters = Filters()
else:
filters = self.gfilters
else:
filters = self.filters
group3 = self.h5file.root.group0.group1.group2.group3
if common.verbose:
print("Test filter:", repr(filters))
print("Filters in file:", repr(group3._v_filters))
self.assertEqual(repr(filters), repr(group3._v_filters))
# The next nodes have to have the same filter properties as
# self.filter
nodelist = [
'/group0/group1/group2/group3',
'/group0/group1/group2/group3/earray1',
'/group0/group1/group2/group3/vlarray1',
'/group0/group1/group2/group3/table1',
'/group0/group1/group2/group3/group4',
]
for node in nodelist:
obj = self.h5file.get_node(node)
if isinstance(obj, Group):
self.assertEqual(repr(filters), repr(obj._v_filters))
else:
self.assertEqual(repr(filters), repr(obj.filters))
# Checking the special case for Arrays in which the compression
# should always be the empty Filter()
# The next nodes have to have the same filter properties as
# Filter()
nodelist = [
'/array1',
'/group0/array1',
'/group0/group1/array1',
'/group0/group1/group2/array1',
'/group0/group1/group2/group3/array1',
]
for node in nodelist:
obj = self.h5file.get_node(node)
self.assertEqual(repr(Filters()), repr(obj.filters))
def test01_checkFilters(self):
"""Checking inheritance of filters on trees (close file version)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test01_checkFilters..." %
self.__class__.__name__)
# Close the file
self._reopen()
# First level check
if self.filters is None:
filters = Filters()
else:
filters = self.filters
if common.verbose:
print("Test filter:", repr(filters))
print("Filters in file:", repr(self.h5file.filters))
self.assertEqual(repr(filters), repr(self.h5file.filters))
# The next nodes have to have the same filter properties as
# self.filters
nodelist = [
'/table1', '/group0/earray1', '/group0/vlarray1', '/group0',
]
for node in nodelist:
object_ = self.h5file.get_node(node)
if isinstance(object_, Group):
self.assertEqual(repr(filters), repr(object_._v_filters))
else:
self.assertEqual(repr(filters), repr(object_.filters))
# Second and third level check
group1 = self.h5file.root.group0.group1
if self.gfilters is None:
if self.filters is None:
gfilters = Filters()
else:
gfilters = self.filters
else:
gfilters = self.gfilters
if common.verbose:
print("Test filter:", repr(gfilters))
print("Filters in file:", repr(group1._v_filters))
self.assertEqual(repr(gfilters), repr(group1._v_filters))
# The next nodes have to have the same filter properties as
# gfilters
nodelist = [
'/group0/group1', '/group0/group1/earray1',
'/group0/group1/vlarray1',
'/group0/group1/table1', '/group0/group1/group2/table1',
]
for node in nodelist:
object_ = self.h5file.get_node(node)
if isinstance(object_, Group):
self.assertEqual(repr(gfilters), repr(object_._v_filters))
else:
self.assertEqual(repr(gfilters), repr(object_.filters))
# Fourth and fifth level check
if self.filters is None:
if self.gfilters is None:
filters = Filters()
else:
filters = self.gfilters
else:
filters = self.filters
group3 = self.h5file.root.group0.group1.group2.group3
if common.verbose:
print("Test filter:", repr(filters))
print("Filters in file:", repr(group3._v_filters))
repr(filters) == repr(group3._v_filters)
# The next nodes have to have the same filter properties as
# self.filters
nodelist = [
'/group0/group1/group2/group3',
'/group0/group1/group2/group3/earray1',
'/group0/group1/group2/group3/vlarray1',
'/group0/group1/group2/group3/table1',
'/group0/group1/group2/group3/group4',
]
for node in nodelist:
obj = self.h5file.get_node(node)
if isinstance(obj, Group):
self.assertEqual(repr(filters), repr(obj._v_filters))
else:
self.assertEqual(repr(filters), repr(obj.filters))
# Checking the special case for Arrays in which the compression
# should always be the empty Filter()
# The next nodes have to have the same filter properties as
# Filter()
nodelist = [
'/array1',
'/group0/array1',
'/group0/group1/array1',
'/group0/group1/group2/array1',
'/group0/group1/group2/group3/array1',
]
for node in nodelist:
obj = self.h5file.get_node(node)
self.assertEqual(repr(Filters()), repr(obj.filters))
class FiltersCase1(FiltersTreeTestCase):
filters = Filters()
gfilters = Filters(complevel=1)
open_kwargs = dict(filters=filters)
@unittest.skipIf(not common.bzip2_avail,
'BZIP2 compression library not available')
class FiltersCase2(FiltersTreeTestCase):
filters = Filters(complevel=1, complib="bzip2")
gfilters = Filters(complevel=1)
open_kwargs = dict(filters=filters)
@unittest.skipIf(not common.lzo_avail, 'LZO compression library not available')
class FiltersCase3(FiltersTreeTestCase):
filters = Filters(shuffle=True, complib="zlib")
gfilters = Filters(complevel=1, shuffle=False, complib="lzo")
open_kwargs = dict(filters=filters)
class FiltersCase4(FiltersTreeTestCase):
filters = Filters(shuffle=True)
gfilters = Filters(complevel=1, shuffle=False)
open_kwargs = dict(filters=filters)
class FiltersCase5(FiltersTreeTestCase):
filters = Filters(fletcher32=True)
gfilters = Filters(complevel=1, shuffle=False)
open_kwargs = dict(filters=filters)
class FiltersCase6(FiltersTreeTestCase):
filters = None
gfilters = Filters(complevel=1, shuffle=False)
open_kwargs = dict(filters=filters)
class FiltersCase7(FiltersTreeTestCase):
filters = Filters(complevel=1)
gfilters = None
open_kwargs = dict(filters=filters)
class FiltersCase8(FiltersTreeTestCase):
filters = None
gfilters = None
open_kwargs = dict(filters=filters)
@unittest.skipIf(not common.bzip2_avail,
'BZIP2 compression library not available')
class FiltersCase9(FiltersTreeTestCase):
filters = Filters(shuffle=True, complib="zlib")
gfilters = Filters(complevel=5, shuffle=True, complib="bzip2")
open_kwargs = dict(filters=filters)
@unittest.skipIf(not common.blosc_avail,
'BLOSC compression library not available')
class FiltersCase10(FiltersTreeTestCase):
filters = Filters(shuffle=False, complevel=1, complib="blosc")
gfilters = Filters(complevel=5, shuffle=True, complib="blosc")
open_kwargs = dict(filters=filters)
@unittest.skipIf(not common.blosc_avail,
'BLOSC compression library not available')
class FiltersCaseBloscBloscLZ(FiltersTreeTestCase):
filters = Filters(shuffle=False, complevel=1, complib="blosc:blosclz")
gfilters = Filters(complevel=5, shuffle=True, complib="blosc:blosclz")
open_kwargs = dict(filters=filters)
@unittest.skipIf(not common.blosc_avail,
'BLOSC compression library not available')
@unittest.skipIf('lz4' not in tables.blosc_compressor_list(), 'lz4 required')
class FiltersCaseBloscLZ4(FiltersTreeTestCase):
filters = Filters(shuffle=False, complevel=1, complib="blosc:lz4")
gfilters = Filters(complevel=5, shuffle=True, complib="blosc:lz4")
open_kwargs = dict(filters=filters)
@unittest.skipIf(not common.blosc_avail,
'BLOSC compression library not available')
@unittest.skipIf('lz4' not in tables.blosc_compressor_list(), 'lz4 required')
class FiltersCaseBloscLZ4HC(FiltersTreeTestCase):
filters = Filters(shuffle=False, complevel=1, complib="blosc:lz4hc")
gfilters = Filters(complevel=5, shuffle=True, complib="blosc:lz4hc")
open_kwargs = dict(filters=filters)
@unittest.skipIf(not common.blosc_avail,
'BLOSC compression library not available')
@unittest.skipIf('snappy' not in tables.blosc_compressor_list(),
'snappy required')
class FiltersCaseBloscSnappy(FiltersTreeTestCase):
filters = Filters(shuffle=False, complevel=1, complib="blosc:snappy")
gfilters = Filters(complevel=5, shuffle=True, complib="blosc:snappy")
open_kwargs = dict(filters=filters)
@unittest.skipIf(not common.blosc_avail,
'BLOSC compression library not available')
@unittest.skipIf('zlib' not in tables.blosc_compressor_list(), 'zlib required')
class FiltersCaseBloscZlib(FiltersTreeTestCase):
filters = Filters(shuffle=False, complevel=1, complib="blosc:zlib")
gfilters = Filters(complevel=5, shuffle=True, complib="blosc:zlib")
open_kwargs = dict(filters=filters)
@unittest.skipIf(not common.blosc_avail,
'BLOSC compression library not available')
@unittest.skipIf(blosc_version < common.min_blosc_bitshuffle_version,
'BLOSC >= %s required' % common.min_blosc_bitshuffle_version)
class FiltersCaseBloscBitShuffle(FiltersTreeTestCase):
filters = Filters(shuffle=False, complevel=1, complib="blosc:blosclz")
gfilters = Filters(complevel=5, shuffle=False, bitshuffle=True, complib="blosc:blosclz")
open_kwargs = dict(filters=filters)
print("version:", tables.which_lib_version("blosc")[1])
class CopyGroupTestCase(common.TempFileMixin, TestCase):
title = "A title"
nrows = 10
def setUp(self):
super(CopyGroupTestCase, self).setUp()
# Create a temporary file
self.h5fname2 = tempfile.mktemp(".h5")
# Create the destination
self.h5file2 = tables.open_file(self.h5fname2, "w")
self.populateFile()
def populateFile(self):
group = self.h5file.root
# Add some user attrs:
group._v_attrs.attr1 = "an string for root group"
group._v_attrs.attr2 = 124
# Create a tree
for j in range(5):
for i in range(2):
# Create a new group (brother of group)
group2 = self.h5file.create_group(group, 'bgroup' + str(i),
filters=None)
# Create a table
table = self.h5file.create_table(group2, 'table1', Record2,
title=self.title,
filters=None)
# Get the record object associated with the new table
d = table.row
# Fill the table
for i in range(self.nrows):
d['var1'] = '%04d' % (self.nrows - i)
d['var2'] = i
d['var3'] = i * 2
d.append() # This injects the Record values
# Flush the buffer for this table
table.flush()
# Add some user attrs:
table.attrs.attr1 = "an string"
table.attrs.attr2 = 234
# Create a couple of arrays in each group
var1List = [x['var1'] for x in table.iterrows()]
var3List = [x['var3'] for x in table.iterrows()]
self.h5file.create_array(group2, 'array1', var1List, "col 1")
self.h5file.create_array(group2, 'array2', var3List, "col 3")
# Create a couple of EArrays as well
ea1 = self.h5file.create_earray(group2, 'earray1',
StringAtom(itemsize=4), (0,),
"col 1")
ea2 = self.h5file.create_earray(group2, 'earray2',
Int16Atom(), (0,), "col 3")
# Add some user attrs:
ea1.attrs.attr1 = "an string for earray"
ea2.attrs.attr2 = 123
# And fill them with some values
ea1.append(var1List)
ea2.append(var3List)
# Create a new group (descendant of group)
group3 = self.h5file.create_group(group, 'group' + str(j),
filters=None)
# Iterate over this new group (group3)
group = group3
# Add some user attrs:
group._v_attrs.attr1 = "an string for group"
group._v_attrs.attr2 = 124
def tearDown(self):
# Close the file
if self.h5file2.isopen:
self.h5file2.close()
os.remove(self.h5fname2)
super(CopyGroupTestCase, self).tearDown()
def test00_nonRecursive(self):
"""Checking non-recursive copy of a Group"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test00_nonRecursive..." %
self.__class__.__name__)
# Copy a group non-recursively
srcgroup = self.h5file.root.group0.group1
#srcgroup._f_copy_children(self.h5file2.root, recursive=False,
# filters=self.filters)
self.h5file.copy_children(srcgroup, self.h5file2.root,
recursive=False, filters=self.filters)
if self.close:
# Close the destination file
self.h5file2.close()
# And open it again
self.h5file2 = tables.open_file(self.h5fname2, "r")
# Check that the copy has been done correctly
dstgroup = self.h5file2.root
nodelist1 = list(srcgroup._v_children.keys())
nodelist2 = list(dstgroup._v_children.keys())
# Sort the lists
nodelist1.sort()
nodelist2.sort()
if common.verbose:
print("The origin node list -->", nodelist1)
print("The copied node list -->", nodelist2)
self.assertEqual(srcgroup._v_nchildren, dstgroup._v_nchildren)
self.assertEqual(nodelist1, nodelist2)
def test01_nonRecursiveAttrs(self):
"""Checking non-recursive copy of a Group (attributes copied)"""
if common.verbose:
print('\n', '-=' * 30)
print(("Running %s.test01_nonRecursiveAttrs..." %
self.__class__.__name__))
# Copy a group non-recursively with attrs
srcgroup = self.h5file.root.group0.group1
srcgroup._f_copy_children(self.h5file2.root,
recursive=False,
filters=self.filters,
copyuserattrs=1)
if self.close:
# Close the destination file
self.h5file2.close()
# And open it again
self.h5file2 = tables.open_file(self.h5fname2, "r")
# Check that the copy has been done correctly
dstgroup = self.h5file2.root
for srcnode in srcgroup:
dstnode = getattr(dstgroup, srcnode._v_name)
if isinstance(srcnode, Group):
srcattrs = srcnode._v_attrs
srcattrskeys = srcattrs._f_list("all")
dstattrs = dstnode._v_attrs
dstattrskeys = dstattrs._f_list("all")
else:
srcattrs = srcnode.attrs
srcattrskeys = srcattrs._f_list("all")
dstattrs = dstnode.attrs
dstattrskeys = dstattrs._f_list("all")
# Filters may differ, do not take into account
if self.filters is not None:
dstattrskeys.remove('FILTERS')
# These lists should already be ordered
if common.verbose:
print("srcattrskeys for node %s: %s" % (srcnode._v_name,
srcattrskeys))
print("dstattrskeys for node %s: %s" % (dstnode._v_name,
dstattrskeys))
self.assertEqual(srcattrskeys, dstattrskeys)
if common.verbose:
print("The attrs names has been copied correctly")
# Now, for the contents of attributes
for srcattrname in srcattrskeys:
srcattrvalue = str(getattr(srcattrs, srcattrname))
dstattrvalue = str(getattr(dstattrs, srcattrname))
self.assertEqual(srcattrvalue, dstattrvalue)
if self.filters is not None:
self.assertEqual(dstattrs.FILTERS, self.filters)
if common.verbose:
print("The attrs contents has been copied correctly")
def test02_Recursive(self):
"""Checking recursive copy of a Group"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test02_Recursive..." % self.__class__.__name__)
# Create the destination node
group = self.h5file2.root
for groupname in self.dstnode.split("/"):
if groupname:
group = self.h5file2.create_group(group, groupname)
dstgroup = self.h5file2.get_node(self.dstnode)
# Copy a group non-recursively
srcgroup = self.h5file.get_node(self.srcnode)
self.h5file.copy_children(srcgroup, dstgroup,
recursive=True,
filters=self.filters)
lenSrcGroup = len(srcgroup._v_pathname)
if lenSrcGroup == 1:
lenSrcGroup = 0 # Case where srcgroup == "/"
if self.close:
# Close the destination file
self.h5file2.close()
# And open it again
self.h5file2 = tables.open_file(self.h5fname2, "r")
dstgroup = self.h5file2.get_node(self.dstnode)
# Check that the copy has been done correctly
lenDstGroup = len(dstgroup._v_pathname)
if lenDstGroup == 1:
lenDstGroup = 0 # Case where dstgroup == "/"
first = 1
nodelist1 = []
for node in srcgroup._f_walknodes():
if first:
# skip the first group
first = 0
continue
nodelist1.append(node._v_pathname[lenSrcGroup:])
first = 1
nodelist2 = []
for node in dstgroup._f_walknodes():
if first:
# skip the first group
first = 0
continue
nodelist2.append(node._v_pathname[lenDstGroup:])
if common.verbose:
print("The origin node list -->", nodelist1)
print("The copied node list -->", nodelist2)
self.assertEqual(nodelist1, nodelist2)
def test03_RecursiveFilters(self):
"""Checking recursive copy of a Group (cheking Filters)"""
if common.verbose:
print('\n', '-=' * 30)
print(("Running %s.test03_RecursiveFilters..." %
self.__class__.__name__))
# Create the destination node
group = self.h5file2.root
for groupname in self.dstnode.split("/"):
if groupname:
group = self.h5file2.create_group(group, groupname)
dstgroup = self.h5file2.get_node(self.dstnode)
# Copy a group non-recursively
srcgroup = self.h5file.get_node(self.srcnode)
srcgroup._f_copy_children(dstgroup,
recursive=True,
filters=self.filters)
lenSrcGroup = len(srcgroup._v_pathname)
if lenSrcGroup == 1:
lenSrcGroup = 0 # Case where srcgroup == "/"
if self.close:
# Close the destination file
self.h5file2.close()
# And open it again
self.h5file2 = tables.open_file(self.h5fname2, "r")
dstgroup = self.h5file2.get_node(self.dstnode)
# Check that the copy has been done correctly
lenDstGroup = len(dstgroup._v_pathname)
if lenDstGroup == 1:
lenDstGroup = 0 # Case where dstgroup == "/"
first = 1
nodelist1 = {}
for node in srcgroup._f_walknodes():
if first:
# skip the first group
first = 0
continue
nodelist1[node._v_name] = node._v_pathname[lenSrcGroup:]
first = 1
for node in dstgroup._f_walknodes():
if first:
# skip the first group
first = 0
continue
if isinstance(node, Group):
repr(node._v_filters) == repr(nodelist1[node._v_name])
else:
repr(node.filters) == repr(nodelist1[node._v_name])
class CopyGroupCase1(CopyGroupTestCase):
close = 0
filters = None
srcnode = '/group0/group1'
dstnode = '/'
class CopyGroupCase2(CopyGroupTestCase):
close = 1
filters = None
srcnode = '/group0/group1'
dstnode = '/'
class CopyGroupCase3(CopyGroupTestCase):
close = 0
filters = None
srcnode = '/group0'
dstnode = '/group2/group3'
class CopyGroupCase4(CopyGroupTestCase):
close = 1
filters = Filters(complevel=1)
srcnode = '/group0'
dstnode = '/group2/group3'
class CopyGroupCase5(CopyGroupTestCase):
close = 0
filters = Filters()
srcnode = '/'
dstnode = '/group2/group3'
class CopyGroupCase6(CopyGroupTestCase):
close = 1
filters = Filters(fletcher32=True)
srcnode = '/group0'
dstnode = '/group2/group3'
class CopyGroupCase7(CopyGroupTestCase):
close = 0
filters = Filters(complevel=1, shuffle=False)
srcnode = '/'
dstnode = '/'
@unittest.skipIf(not common.lzo_avail, 'LZO compression library not available')
class CopyGroupCase8(CopyGroupTestCase):
close = 1
filters = Filters(complevel=1, complib="lzo")
srcnode = '/'
dstnode = '/'
class CopyFileTestCase(common.TempFileMixin, TestCase):
title = "A title"
nrows = 10
def setUp(self):
super(CopyFileTestCase, self).setUp()
# Create a temporary file
self.h5fname2 = tempfile.mktemp(".h5")
# Create the source file
self.populateFile()
def populateFile(self):
group = self.h5file.root
# Add some user attrs:
group._v_attrs.attr1 = "an string for root group"
group._v_attrs.attr2 = 124
# Create a tree
for j in range(5):
for i in range(2):
# Create a new group (brother of group)
group2 = self.h5file.create_group(group, 'bgroup' + str(i),
filters=None)
# Create a table
table = self.h5file.create_table(group2, 'table1', Record2,
title=self.title,
filters=None)
# Get the record object associated with the new table
d = table.row
# Fill the table
for i in range(self.nrows):
d['var1'] = '%04d' % (self.nrows - i)
d['var2'] = i
d['var3'] = i * 2
d.append() # This injects the Record values
# Flush the buffer for this table
table.flush()
# Add some user attrs:
table.attrs.attr1 = "an string"
table.attrs.attr2 = 234
# Create a couple of arrays in each group
var1List = [x['var1'] for x in table.iterrows()]
var3List = [x['var3'] for x in table.iterrows()]
self.h5file.create_array(group2, 'array1', var1List, "col 1")
self.h5file.create_array(group2, 'array2', var3List, "col 3")
# Create a couple of EArrays as well
ea1 = self.h5file.create_earray(group2, 'earray1',
StringAtom(itemsize=4), (0,),
"col 1")
ea2 = self.h5file.create_earray(group2, 'earray2',
Int16Atom(), (0,),
"col 3")
# Add some user attrs:
ea1.attrs.attr1 = "an string for earray"
ea2.attrs.attr2 = 123
# And fill them with some values
ea1.append(var1List)
ea2.append(var3List)
# Create a new group (descendant of group)
group3 = self.h5file.create_group(group, 'group' + str(j),
filters=None)
# Iterate over this new group (group3)
group = group3
# Add some user attrs:
group._v_attrs.attr1 = "an string for group"
group._v_attrs.attr2 = 124
def tearDown(self):
# Close the file
if hasattr(self, 'h5file2') and self.h5file2.isopen:
self.h5file2.close()
if hasattr(self, 'h5fname2') and os.path.exists(self.h5fname2):
os.remove(self.h5fname2)
super(CopyFileTestCase, self).tearDown()
def test00_overwrite(self):
"""Checking copy of a File (overwriting file)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test00_overwrite..." % self.__class__.__name__)
# Create a temporary file
file2h = open(self.h5fname2, "w")
file2h.close()
# Copy the file to the destination
self.h5file.copy_file(self.h5fname2, title=self.title,
overwrite=1,
copyuserattrs=0,
filters=None)
# Close the original file, if needed
if self.close:
self._reopen()
# ...and open the destination file
self.h5file2 = tables.open_file(self.h5fname2, "r")
# Check that the copy has been done correctly
srcgroup = self.h5file.root
dstgroup = self.h5file2.root
nodelist1 = list(srcgroup._v_children.keys())
nodelist2 = list(dstgroup._v_children.keys())
# Sort the lists
nodelist1.sort()
nodelist2.sort()
if common.verbose:
print("The origin node list -->", nodelist1)
print("The copied node list -->", nodelist2)
self.assertEqual(srcgroup._v_nchildren, dstgroup._v_nchildren)
self.assertEqual(nodelist1, nodelist2)
self.assertEqual(self.h5file2.title, self.title)
def test00a_srcdstequal(self):
"""Checking copy of a File (srcfile == dstfile)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test00a_srcdstequal..." %
self.__class__.__name__)
# Copy the file to the destination
self.assertRaises(IOError, self.h5file.copy_file, self.h5file.filename)
def test00b_firstclass(self):
"""Checking copy of a File (first-class function)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test00b_firstclass..." % self.__class__.__name__)
# Close the temporary file
self.h5file.close()
# Copy the file to the destination
tables.copy_file(self.h5fname, self.h5fname2, title=self.title,
copyuserattrs=0, filters=None, overwrite=1)
# ...and open the source and destination file
self.h5file = tables.open_file(self.h5fname, "r")
self.h5file2 = tables.open_file(self.h5fname2, "r")
# Check that the copy has been done correctly
srcgroup = self.h5file.root
dstgroup = self.h5file2.root
nodelist1 = list(srcgroup._v_children.keys())
nodelist2 = list(dstgroup._v_children.keys())
# Sort the lists
nodelist1.sort()
nodelist2.sort()
if common.verbose:
print("The origin node list -->", nodelist1)
print("The copied node list -->", nodelist2)
self.assertEqual(srcgroup._v_nchildren, dstgroup._v_nchildren)
self.assertEqual(nodelist1, nodelist2)
self.assertEqual(self.h5file2.title, self.title)
def test01_copy(self):
"""Checking copy of a File (attributes not copied)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test01_copy..." % self.__class__.__name__)
# Copy the file to the destination
self.h5file.copy_file(self.h5fname2, title=self.title,
copyuserattrs=0,
filters=self.filters)
# Close the original file, if needed
if self.close:
self._reopen()
# ...and open the destination file
self.h5file2 = tables.open_file(self.h5fname2, "r")
# Check that the copy has been done correctly
srcgroup = self.h5file.root
dstgroup = self.h5file2.root
nodelist1 = list(srcgroup._v_children.keys())
nodelist2 = list(dstgroup._v_children.keys())
# Sort the lists
nodelist1.sort()
nodelist2.sort()
if common.verbose:
print("The origin node list -->", nodelist1)
print("The copied node list -->", nodelist2)
self.assertEqual(srcgroup._v_nchildren, dstgroup._v_nchildren)
self.assertEqual(nodelist1, nodelist2)
# print("_v_attrnames-->", self.h5file2.root._v_attrs._v_attrnames)
# print("--> <%s,%s>" % (self.h5file2.title, self.title))
self.assertEqual(self.h5file2.title, self.title)
# Check that user attributes has not been copied
for srcnode in srcgroup:
dstnode = getattr(dstgroup, srcnode._v_name)
srcattrs = srcnode._v_attrs
srcattrskeys = srcattrs._f_list("sys")
dstattrs = dstnode._v_attrs
dstattrskeys = dstattrs._f_list("all")
# Filters may differ, do not take into account
if self.filters is not None:
dstattrskeys.remove('FILTERS')
# These lists should already be ordered
if common.verbose:
print("srcattrskeys for node %s: %s" % (srcnode._v_name,
srcattrskeys))
print("dstattrskeys for node %s: %s" % (dstnode._v_name,
dstattrskeys))
self.assertEqual(srcattrskeys, dstattrskeys)
if common.verbose:
print("The attrs names has been copied correctly")
# Now, for the contents of attributes
for srcattrname in srcattrskeys:
srcattrvalue = str(getattr(srcattrs, srcattrname))
dstattrvalue = str(getattr(dstattrs, srcattrname))
self.assertEqual(srcattrvalue, dstattrvalue)
if self.filters is not None:
self.assertEqual(dstattrs.FILTERS, self.filters)
if common.verbose:
print("The attrs contents has been copied correctly")
def test02_Attrs(self):
"""Checking copy of a File (attributes copied)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test02_Attrs..." % self.__class__.__name__)
# Copy the file to the destination
self.h5file.copy_file(self.h5fname2, title=self.title,
copyuserattrs=1,
filters=self.filters)
# Close the original file, if needed
if self.close:
self._reopen()
# ...and open the destination file
self.h5file2 = tables.open_file(self.h5fname2, "r")
# Check that the copy has been done correctly
srcgroup = self.h5file.root
dstgroup = self.h5file2.root
for srcnode in srcgroup:
dstnode = getattr(dstgroup, srcnode._v_name)
srcattrs = srcnode._v_attrs
srcattrskeys = srcattrs._f_list("all")
dstattrs = dstnode._v_attrs
dstattrskeys = dstattrs._f_list("all")
# These lists should already be ordered
if common.verbose:
print("srcattrskeys for node %s: %s" % (srcnode._v_name,
srcattrskeys))
print("dstattrskeys for node %s: %s" % (dstnode._v_name,
dstattrskeys))
# Filters may differ, do not take into account
if self.filters is not None:
dstattrskeys.remove('FILTERS')
self.assertEqual(srcattrskeys, dstattrskeys)
if common.verbose:
print("The attrs names has been copied correctly")
# Now, for the contents of attributes
for srcattrname in srcattrskeys:
srcattrvalue = str(getattr(srcattrs, srcattrname))
dstattrvalue = str(getattr(dstattrs, srcattrname))
self.assertEqual(srcattrvalue, dstattrvalue)
if self.filters is not None:
self.assertEqual(dstattrs.FILTERS, self.filters)
if common.verbose:
print("The attrs contents has been copied correctly")
class CopyFileCase1(CopyFileTestCase):
close = 0
title = "A new title"
filters = None
class CopyFileCase2(CopyFileTestCase):
close = 1
title = "A new title"
filters = None
class CopyFileCase3(CopyFileTestCase):
close = 0
title = "A new title"
filters = Filters(complevel=1)
class CopyFileCase4(CopyFileTestCase):
close = 1
title = "A new title"
filters = Filters(complevel=1)
class CopyFileCase5(CopyFileTestCase):
close = 0
title = "A new title"
filters = Filters(fletcher32=True)
class CopyFileCase6(CopyFileTestCase):
close = 1
title = "A new title"
filters = Filters(fletcher32=True)
class CopyFileCase7(CopyFileTestCase):
close = 0
title = "A new title"
filters = Filters(complevel=1, complib="lzo")
class CopyFileCase8(CopyFileTestCase):
close = 1
title = "A new title"
filters = Filters(complevel=1, complib="lzo")
class CopyFileCase10(common.TempFileMixin, TestCase):
def test01_notoverwrite(self):
"""Checking copy of a File (checking not overwriting)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test01_notoverwrite..." %
self.__class__.__name__)
# Create two empty files:
self.h5fname2 = tempfile.mktemp(".h5")
self.h5file2 = tables.open_file(self.h5fname2, "w")
self.h5file2.close() # close the second one
try:
# Copy the first into the second
self.assertRaises(
IOError, self.h5file.copy_file, self.h5fname2, overwrite=False)
finally:
# Delete files
os.remove(self.h5fname2)
class GroupFiltersTestCase(common.TempFileMixin, TestCase):
filters = tables.Filters(complevel=4) # something non-default
def setUp(self):
super(GroupFiltersTestCase, self).setUp()
atom, shape = tables.IntAtom(), (1, 1)
create_group = self.h5file.create_group
create_carray = self.h5file.create_carray
create_group('/', 'implicit_no')
create_group('/implicit_no', 'implicit_no')
create_carray('/implicit_no/implicit_no', 'implicit_no',
atom=atom, shape=shape)
create_carray('/implicit_no/implicit_no', 'explicit_no',
atom=atom, shape=shape, filters=tables.Filters())
create_carray('/implicit_no/implicit_no', 'explicit_yes',
atom=atom, shape=shape, filters=self.filters)
create_group('/', 'explicit_yes', filters=self.filters)
create_group('/explicit_yes', 'implicit_yes')
create_carray('/explicit_yes/implicit_yes', 'implicit_yes',
atom=atom, shape=shape)
create_carray('/explicit_yes/implicit_yes', 'explicit_yes',
atom=atom, shape=shape, filters=self.filters)
create_carray('/explicit_yes/implicit_yes', 'explicit_no',
atom=atom, shape=shape, filters=tables.Filters())
def _check_filters(self, h5file, filters=None):
for node in h5file:
# Get node filters.
if hasattr(node, 'filters'):
node_filters = node.filters
else:
node_filters = node._v_filters
# Compare to given filters.
if filters is not None:
self.assertEqual(node_filters, filters)
return
# Guess filters to compare to by node name.
if node._v_name.endswith('_no'):
self.assertEqual(
node_filters, tables.Filters(),
"node ``%s`` should have no filters" % node._v_pathname)
elif node._v_name.endswith('_yes'):
self.assertEqual(
node_filters, self.filters,
"node ``%s`` should have filters" % node._v_pathname)
def test00_propagate(self):
"""Filters propagating to children."""
self._check_filters(self.h5file)
def _test_copyFile(self, filters=None):
copyfname = tempfile.mktemp(suffix='.h5')
try:
self.h5file.copy_file(copyfname, filters=filters)
try:
copyf = tables.open_file(copyfname)
self._check_filters(copyf, filters=filters)
finally:
copyf.close()
finally:
os.remove(copyfname)
def test01_copyFile(self):
"""Keeping filters when copying a file."""
self._test_copyFile()
def test02_copyFile_override(self):
"""Overriding filters when copying a file."""
self._test_copyFile(self.filters)
def _test_change(self, pathname, change_filters, new_filters):
group = self.h5file.get_node(pathname)
# Check expected current filters.
old_filters = tables.Filters()
if pathname.endswith('_yes'):
old_filters = self.filters
self.assertEqual(group._v_filters, old_filters)
# Change filters.
change_filters(group)
self.assertEqual(group._v_filters, new_filters)
# Get and check changed filters.
if self._reopen():
group = self.h5file.get_node(pathname)
self.assertEqual(group._v_filters, new_filters)
def test03_change(self):
"""Changing the filters of a group."""
def set_filters(group):
group._v_filters = self.filters
self._test_change('/', set_filters, self.filters)
def test04_delete(self):
"""Deleting the filters of a group."""
def del_filters(group):
del group._v_filters
self._test_change('/explicit_yes', del_filters, tables.Filters())
@unittest.skipIf(not common.blosc_avail, 'BLOSC not available')
class SetBloscMaxThreadsTestCase(common.TempFileMixin, TestCase):
filters = tables.Filters(complevel=4, complib="blosc")
def test00(self):
"""Checking set_blosc_max_threads()"""
nthreads_old = tables.set_blosc_max_threads(4)
if common.verbose:
print("Previous max threads:", nthreads_old)
print("Should be:", self.h5file.params['MAX_BLOSC_THREADS'])
self.assertEqual(nthreads_old, self.h5file.params['MAX_BLOSC_THREADS'])
self.h5file.create_carray('/', 'some_array',
atom=tables.Int32Atom(), shape=(3, 3),
filters = self.filters)
nthreads_old = tables.set_blosc_max_threads(1)
if common.verbose:
print("Previous max threads:", nthreads_old)
print("Should be:", 4)
self.assertEqual(nthreads_old, 4)
def test01(self):
"""Checking set_blosc_max_threads() (re-open)"""
nthreads_old = tables.set_blosc_max_threads(4)
self.h5file.create_carray('/', 'some_array',
atom=tables.Int32Atom(), shape=(3, 3),
filters = self.filters)
self._reopen()
nthreads_old = tables.set_blosc_max_threads(4)
if common.verbose:
print("Previous max threads:", nthreads_old)
print("Should be:", self.h5file.params['MAX_BLOSC_THREADS'])
self.assertEqual(nthreads_old, self.h5file.params['MAX_BLOSC_THREADS'])
class FilterTestCase(TestCase):
def test_filter_pack_type(self):
self.assertEqual(type(Filters()._pack()), numpy.int64)
@staticmethod
def _hexl(n):
if sys.version_info[0] > 2:
return hex(int(n))
else:
return hex(int(n)).rstrip('L')
def test_filter_pack_01(self):
filter_ = Filters()
self.assertEqual(self._hexl(filter_._pack()), '0x0')
def test_filter_pack_02(self):
filter_ = Filters(1, shuffle=False)
self.assertEqual(self._hexl(filter_._pack()), '0x101')
def test_filter_pack_03(self):
filter_ = Filters(9, 'zlib', shuffle=True, fletcher32=True)
self.assertEqual(self._hexl(filter_._pack()), '0x30109')
def test_filter_pack_04(self):
filter_ = Filters(1, shuffle=False, least_significant_digit=5)
self.assertEqual(self._hexl(filter_._pack()), '0x5040101')
def test_filter_unpack_01(self):
filter_ = Filters._unpack(numpy.int64(0x0))
self.assertFalse(filter_.shuffle)
self.assertFalse(filter_.fletcher32)
self.assertEqual(filter_.least_significant_digit, None)
self.assertEqual(filter_.complevel, 0)
self.assertEqual(filter_.complib, None)
def test_filter_unpack_02(self):
filter_ = Filters._unpack(numpy.int64(0x101))
self.assertFalse(filter_.shuffle)
self.assertFalse(filter_.fletcher32)
self.assertEqual(filter_.least_significant_digit, None)
self.assertEqual(filter_.complevel, 1)
self.assertEqual(filter_.complib, 'zlib')
def test_filter_unpack_03(self):
filter_ = Filters._unpack(numpy.int64(0x30109))
self.assertTrue(filter_.shuffle)
self.assertTrue(filter_.fletcher32)
self.assertEqual(filter_.least_significant_digit, None)
self.assertEqual(filter_.complevel, 9)
self.assertEqual(filter_.complib, 'zlib')
def test_filter_unpack_04(self):
filter_ = Filters._unpack(numpy.int64(0x5040101))
self.assertFalse(filter_.shuffle)
self.assertFalse(filter_.fletcher32)
self.assertEqual(filter_.least_significant_digit, 5)
self.assertEqual(filter_.complevel, 1)
self.assertEqual(filter_.complib, 'zlib')
class DefaultDriverTestCase(common.TempFileMixin, TestCase):
DRIVER = None
DRIVER_PARAMS = {}
open_kwargs = dict(driver=DRIVER, **DRIVER_PARAMS)
def setUp(self):
super(DefaultDriverTestCase, self).setUp()
# Create an HDF5 file and contents
root = self.h5file.root
self.h5file.set_node_attr(root, "testattr", 41)
self.h5file.create_array(root, "array", [1, 2], title="array")
self.h5file.create_table(root, "table", {"var1": tables.IntCol()},
title="table")
def assertIsFile(self):
self.assertTrue(os.path.isfile(self.h5fname))
def test_newFile(self):
self.assertTrue(isinstance(self.h5file, tables.File))
self.assertIsFile()
def test_readFile(self):
self.h5file.close()
self.h5file = None
self.assertIsFile()
# Open an existing HDF5 file
self.h5file = tables.open_file(self.h5fname, mode="r",
driver=self.DRIVER,
**self.DRIVER_PARAMS)
# check contents
root = self.h5file.root
self.assertEqual(self.h5file.get_node_attr(root, "testattr"), 41)
self.assertTrue(isinstance(root.array, tables.Array))
self.assertEqual(root.array._v_title, "array")
self.assertTrue(isinstance(root.table, tables.Table))
self.assertEqual(root.table._v_title, "table")
self.assertTrue("var1" in root.table.colnames)
self.assertEqual(root.table.cols.var1.dtype, tables.IntCol().dtype)
def test_openFileA(self):
self.h5file.close()
self.h5file = None
self.assertIsFile()
# Open an existing HDF5 file in append mode
self.h5file = tables.open_file(self.h5fname, mode="a",
driver=self.DRIVER,
**self.DRIVER_PARAMS)
# check contents
root = self.h5file.root
self.assertEqual(self.h5file.get_node_attr(root, "testattr"), 41)
self.assertTrue(isinstance(root.array, tables.Array))
self.assertEqual(root.array._v_title, "array")
self.assertTrue(isinstance(root.table, tables.Table))
self.assertEqual(root.table._v_title, "table")
self.assertTrue("var1" in root.table.colnames)
self.assertEqual(root.table.cols.var1.dtype, tables.IntCol().dtype)
# write new data
root = self.h5file.root
self.h5file.set_node_attr(root, "testattr2", 42)
self.h5file.create_array(root, "array2", [1, 2], title="array2")
self.h5file.create_table(root, "table2", {"var2": tables.FloatCol()},
title="table2")
# check contents
self._reopen(mode="a", driver=self.DRIVER, **self.DRIVER_PARAMS)
root = self.h5file.root
self.assertEqual(self.h5file.get_node_attr(root, "testattr"), 41)
self.assertEqual(self.h5file.get_node_attr(root, "testattr2"), 42)
self.assertTrue(isinstance(root.array, tables.Array))
self.assertEqual(root.array._v_title, "array")
self.assertTrue(isinstance(root.array2, tables.Array))
self.assertEqual(root.array2._v_title, "array2")
self.assertTrue(isinstance(root.table, tables.Table))
self.assertEqual(root.table._v_title, "table")
self.assertTrue("var1" in root.table.colnames)
self.assertEqual(root.table.cols.var1.dtype, tables.IntCol().dtype)
self.assertTrue(isinstance(root.table2, tables.Table))
self.assertEqual(root.table2._v_title, "table2")
self.assertTrue("var2" in root.table2.colnames)
self.assertEqual(root.table2.cols.var2.dtype, tables.FloatCol().dtype)
def test_openFileRW(self):
self.h5file.close()
self.h5file = None
self.assertIsFile()
# Open an existing HDF5 file in append mode
self.h5file = tables.open_file(self.h5fname, mode="r+",
driver=self.DRIVER,
**self.DRIVER_PARAMS)
# check contents
root = self.h5file.root
self.assertEqual(self.h5file.get_node_attr(root, "testattr"), 41)
self.assertTrue(isinstance(root.array, tables.Array))
self.assertEqual(root.array._v_title, "array")
self.assertTrue(isinstance(root.table, tables.Table))
self.assertEqual(root.table._v_title, "table")
self.assertTrue("var1" in root.table.colnames)
self.assertEqual(root.table.cols.var1.dtype, tables.IntCol().dtype)
# write new data
self.h5file.set_node_attr(root, "testattr2", 42)
self.h5file.create_array(root, "array2", [1, 2], title="array2")
self.h5file.create_table(root, "table2", {"var2": tables.FloatCol()},
title="table2")
# check contents
self._reopen(mode="r+", driver=self.DRIVER, **self.DRIVER_PARAMS)
root = self.h5file.root
self.assertEqual(self.h5file.get_node_attr(root, "testattr"), 41)
self.assertEqual(self.h5file.get_node_attr(root, "testattr2"), 42)
self.assertTrue(isinstance(root.array, tables.Array))
self.assertEqual(root.array._v_title, "array")
self.assertTrue(isinstance(root.array2, tables.Array))
self.assertEqual(root.array2._v_title, "array2")
self.assertTrue(isinstance(root.table, tables.Table))
self.assertEqual(root.table._v_title, "table")
self.assertTrue("var1" in root.table.colnames)
self.assertEqual(root.table.cols.var1.dtype, tables.IntCol().dtype)
self.assertTrue(isinstance(root.table2, tables.Table))
self.assertEqual(root.table2._v_title, "table2")
self.assertTrue("var2" in root.table2.colnames)
self.assertEqual(root.table2.cols.var2.dtype, tables.FloatCol().dtype)
@unittest.skipIf(hdf5_version < "1.8.9", "requires HDF5 >= 1.8,9")
class Sec2DriverTestCase(DefaultDriverTestCase):
DRIVER = "H5FD_SEC2"
open_kwargs = dict(driver=DRIVER, **DefaultDriverTestCase.DRIVER_PARAMS)
def test_get_file_image(self):
image = self.h5file.get_file_image()
self.assertTrue(len(image) > 0)
if sys.version_info[0] < 3:
self.assertEqual([ord(i) for i in image[
:4]], [137, 72, 68, 70])
else:
self.assertEqual([i for i in image[:4]], [137, 72, 68, 70])
@unittest.skipIf(hdf5_version < "1.8.9", "requires HDF5 >= 1.8,9")
class StdioDriverTestCase(DefaultDriverTestCase):
DRIVER = "H5FD_STDIO"
open_kwargs = dict(driver=DRIVER, **DefaultDriverTestCase.DRIVER_PARAMS)
def test_get_file_image(self):
image = self.h5file.get_file_image()
self.assertTrue(len(image) > 0)
if sys.version_info[0] < 3:
self.assertEqual([ord(i) for i in image[
:4]], [137, 72, 68, 70])
else:
self.assertEqual([i for i in image[:4]], [137, 72, 68, 70])
@unittest.skipIf(hdf5_version < "1.8.9", "requires HDF5 >= 1.8,9")
class CoreDriverTestCase(DefaultDriverTestCase):
DRIVER = "H5FD_CORE"
open_kwargs = dict(driver=DRIVER, **DefaultDriverTestCase.DRIVER_PARAMS)
def test_get_file_image(self):
image = self.h5file.get_file_image()
self.assertTrue(len(image) > 0)
if sys.version_info[0] < 3:
self.assertEqual([ord(i) for i in image[
:4]], [137, 72, 68, 70])
else:
self.assertEqual([i for i in image[:4]], [137, 72, 68, 70])
class CoreDriverNoBackingStoreTestCase(TestCase):
DRIVER = "H5FD_CORE"
def setUp(self):
super(CoreDriverNoBackingStoreTestCase, self).setUp()
self.h5fname = tempfile.mktemp(suffix=".h5")
self.h5file = None
def tearDown(self):
if self.h5file:
self.h5file.close()
elif self.h5fname in tables.file._open_files:
open_files = tables.file._open_files
for h5file in open_files.get_handlers_by_name(self.h5fname):
h5file.close()
self.h5file = None
if os.path.isfile(self.h5fname):
os.remove(self.h5fname)
super(CoreDriverNoBackingStoreTestCase, self).tearDown()
def test_newFile(self):
"""Ensure that nothing is written to file."""
self.assertFalse(os.path.isfile(self.h5fname))
self.h5file = tables.open_file(self.h5fname, mode="w",
driver=self.DRIVER,
driver_core_backing_store=False)
# Create an HDF5 file and contents
root = self.h5file.root
self.h5file.set_node_attr(root, "testattr", 41)
self.h5file.create_array(root, "array", [1, 2], title="array")
self.h5file.create_table(root, "table", {"var1": tables.IntCol()},
title="table")
self.h5file.close() # flush
self.assertFalse(os.path.isfile(self.h5fname))
def test_readNewFileW(self):
self.assertFalse(os.path.isfile(self.h5fname))
# Create an HDF5 file and contents
self.h5file = tables.open_file(self.h5fname, mode="w",
driver=self.DRIVER,
driver_core_backing_store=False)
root = self.h5file.root
self.h5file.set_node_attr(root, "testattr", 41)
self.h5file.create_array(root, "array", [1, 2], title="array")
self.h5file.create_table(root, "table", {"var1": tables.IntCol()},
title="table")
self.assertEqual(self.h5file.get_node_attr(root, "testattr"), 41)
self.assertTrue(isinstance(root.array, tables.Array))
self.assertEqual(root.array._v_title, "array")
self.assertTrue(isinstance(root.table, tables.Table))
self.assertEqual(root.table._v_title, "table")
self.assertTrue("var1" in root.table.colnames)
self.assertEqual(root.table.cols.var1.dtype, tables.IntCol().dtype)
self.h5file.close() # flush
self.assertFalse(os.path.isfile(self.h5fname))
def test_readNewFileA(self):
self.assertFalse(os.path.isfile(self.h5fname))
# Create an HDF5 file and contents
self.h5file = tables.open_file(self.h5fname, mode="a",
driver=self.DRIVER,
driver_core_backing_store=False)
root = self.h5file.root
self.h5file.set_node_attr(root, "testattr", 41)
self.h5file.create_array(root, "array", [1, 2], title="array")
self.h5file.create_table(root, "table", {"var1": tables.IntCol()},
title="table")
self.assertEqual(self.h5file.get_node_attr(root, "testattr"), 41)
self.assertTrue(isinstance(root.array, tables.Array))
self.assertEqual(root.array._v_title, "array")
self.assertTrue(isinstance(root.table, tables.Table))
self.assertEqual(root.table._v_title, "table")
self.assertTrue("var1" in root.table.colnames)
self.assertEqual(root.table.cols.var1.dtype, tables.IntCol().dtype)
self.h5file.close() # flush
self.assertFalse(os.path.isfile(self.h5fname))
def test_openNewFileRW(self):
self.assertFalse(os.path.isfile(self.h5fname))
self.assertRaises(tables.HDF5ExtError,
tables.open_file, self.h5fname, mode="r+",
driver=self.DRIVER, driver_core_backing_store=False)
def test_openNewFileR(self):
self.assertFalse(os.path.isfile(self.h5fname))
self.assertRaises(tables.HDF5ExtError,
tables.open_file, self.h5fname, mode="r",
driver=self.DRIVER, driver_core_backing_store=False)
def _create_file(self, filename):
h5file = tables.open_file(filename, mode="w")
root = h5file.root
h5file.set_node_attr(root, "testattr", 41)
h5file.create_array(root, "array", [1, 2], title="array")
h5file.create_table(root, "table", {"var1": tables.IntCol()},
title="table")
h5file.close()
def test_readFile(self):
self._create_file(self.h5fname)
self.assertTrue(os.path.isfile(self.h5fname))
# Open an existing HDF5 file
self.h5file = tables.open_file(self.h5fname, mode="r",
driver=self.DRIVER,
driver_core_backing_store=False)
root = self.h5file.root
self.assertEqual(self.h5file.get_node_attr(root, "testattr"), 41)
self.assertTrue(isinstance(root.array, tables.Array))
self.assertEqual(root.array._v_title, "array")
self.assertTrue(isinstance(root.table, tables.Table))
self.assertEqual(root.table._v_title, "table")
self.assertTrue("var1" in root.table.colnames)
self.assertEqual(root.table.cols.var1.dtype, tables.IntCol().dtype)
def _get_digest(self, filename):
md5 = hashlib.md5()
fd = open(filename, 'rb')
for data in fd:
md5.update(data)
fd.close()
hexdigest = md5.hexdigest()
return hexdigest
def test_openFileA(self):
self._create_file(self.h5fname)
self.assertTrue(os.path.isfile(self.h5fname))
# compute the file hash
hexdigest = self._get_digest(self.h5fname)
# Open an existing HDF5 file in append mode
self.h5file = tables.open_file(self.h5fname, mode="a",
driver=self.DRIVER,
driver_core_backing_store=False)
# check contents
root = self.h5file.root
self.assertEqual(self.h5file.get_node_attr(root, "testattr"), 41)
self.assertTrue(isinstance(root.array, tables.Array))
self.assertEqual(root.array._v_title, "array")
self.assertTrue(isinstance(root.table, tables.Table))
self.assertEqual(root.table._v_title, "table")
self.assertTrue("var1" in root.table.colnames)
self.assertEqual(root.table.cols.var1.dtype, tables.IntCol().dtype)
# write new data
root = self.h5file.root
self.h5file.set_node_attr(root, "testattr2", 42)
self.h5file.create_array(root, "array2", [1, 2], title="array2")
self.h5file.create_table(root, "table2", {"var2": tables.FloatCol()},
title="table2")
self.h5file.close()
# ensure that there is no change on the file on disk
self.assertEqual(hexdigest, self._get_digest(self.h5fname))
def test_openFileRW(self):
self._create_file(self.h5fname)
self.assertTrue(os.path.isfile(self.h5fname))
# compute the file hash
hexdigest = self._get_digest(self.h5fname)
# Open an existing HDF5 file in append mode
self.h5file = tables.open_file(self.h5fname, mode="r+",
driver=self.DRIVER,
driver_core_backing_store=False)
# check contents
root = self.h5file.root
self.assertEqual(self.h5file.get_node_attr(root, "testattr"), 41)
self.assertTrue(isinstance(root.array, tables.Array))
self.assertEqual(root.array._v_title, "array")
self.assertTrue(isinstance(root.table, tables.Table))
self.assertEqual(root.table._v_title, "table")
self.assertTrue("var1" in root.table.colnames)
self.assertEqual(root.table.cols.var1.dtype, tables.IntCol().dtype)
# write new data
root = self.h5file.root
self.h5file.set_node_attr(root, "testattr2", 42)
self.h5file.create_array(root, "array2", [1, 2], title="array2")
self.h5file.create_table(root, "table2", {"var2": tables.FloatCol()},
title="table2")
self.h5file.close()
# ensure that there is no change on the file on disk
self.assertEqual(hexdigest, self._get_digest(self.h5fname))
@unittest.skipIf(hdf5_version < "1.8.9", 'HDF5 >= "1.8.9" required')
def test_get_file_image(self):
self.h5file = tables.open_file(self.h5fname, mode="w",
driver=self.DRIVER,
driver_core_backing_store=False)
root = self.h5file.root
self.h5file.set_node_attr(root, "testattr", 41)
self.h5file.create_array(root, "array", [1, 2], title="array")
self.h5file.create_table(root, "table", {"var1": tables.IntCol()},
title="table")
image = self.h5file.get_file_image()
self.assertTrue(len(image) > 0)
if sys.version_info[0] < 3:
self.assertEqual([ord(i) for i in image[
:4]], [137, 72, 68, 70])
else:
self.assertEqual([i for i in image[:4]], [137, 72, 68, 70])
class SplitDriverTestCase(DefaultDriverTestCase):
DRIVER = "H5FD_SPLIT"
DRIVER_PARAMS = {
"driver_split_meta_ext": "-xm.h5",
"driver_split_raw_ext": "-xr.h5",
}
open_kwargs = dict(driver=DRIVER, **DRIVER_PARAMS)
def _getTempFileName(self):
return tempfile.mktemp(prefix=self._getName())
def setUp(self):
super(SplitDriverTestCase, self).setUp()
self.h5fnames = [self.h5fname + self.DRIVER_PARAMS[k] for k in
("driver_split_meta_ext", "driver_split_raw_ext")]
def tearDown(self):
self.h5file.close()
for fname in self.h5fnames:
if os.path.isfile(fname):
os.remove(fname)
#super(SplitDriverTestCase, self).tearDown()
TestCase.tearDown(self)
def assertIsFile(self):
for fname in self.h5fnames:
self.assertTrue(os.path.isfile(fname))
class NotSpportedDriverTestCase(TestCase):
DRIVER = None
DRIVER_PARAMS = {}
EXCEPTION = ValueError
def setUp(self):
super(NotSpportedDriverTestCase, self).setUp()
self.h5fname = tempfile.mktemp(suffix=".h5")
def tearDown(self):
open_files = tables.file._open_files
if self.h5fname in open_files:
for h5file in open_files.get_handlers_by_name(self.h5fname):
h5file.close()
if os.path.exists(self.h5fname):
os.remove(self.h5fname)
super(NotSpportedDriverTestCase, self).tearDown()
def test_newFile(self):
self.assertRaises(self.EXCEPTION, tables.open_file, self.h5fname,
mode="w", driver=self.DRIVER, **self.DRIVER_PARAMS)
self.assertFalse(os.path.isfile(self.h5fname))
if "H5FD_LOG" in tables.hdf5extension._supported_drivers:
BaseLogDriverTestCase = DefaultDriverTestCase
else:
BaseLogDriverTestCase = NotSpportedDriverTestCase
class LogDriverTestCase(BaseLogDriverTestCase):
DRIVER = "H5FD_LOG"
open_kwargs = dict(driver=DRIVER, **BaseLogDriverTestCase.DRIVER_PARAMS)
def setUp(self):
# local binding
self.DRIVER_PARAMS = {
"driver_log_file": tempfile.mktemp(suffix=".log")
}
super(LogDriverTestCase, self).setUp()
def tearDown(self):
if os.path.exists(self.DRIVER_PARAMS["driver_log_file"]):
os.remove(self.DRIVER_PARAMS["driver_log_file"])
super(LogDriverTestCase, self).tearDown()
if HAVE_DIRECT_DRIVER:
class DirectDriverTestCase(DefaultDriverTestCase):
DRIVER = "H5FD_DIRECT"
open_kwargs = dict(
driver=DRIVER, **DefaultDriverTestCase.DRIVER_PARAMS
)
else:
class DirectDriverTestCase(NotSpportedDriverTestCase):
DRIVER = "H5FD_DIRECT"
EXCEPTION = RuntimeError
if HAVE_WINDOWS_DRIVER:
class WindowsDriverTestCase(DefaultDriverTestCase):
DRIVER = "H5FD_WINDOWS"
open_kwargs = dict(
driver=DRIVER, **DefaultDriverTestCase.DRIVER_PARAMS
)
else:
class WindowsDriverTestCase(NotSpportedDriverTestCase):
DRIVER = "H5FD_WINDOWS"
EXCEPTION = RuntimeError
class FamilyDriverTestCase(NotSpportedDriverTestCase):
DRIVER = "H5FD_FAMILY"
class MultiDriverTestCase(NotSpportedDriverTestCase):
DRIVER = "H5FD_MULTI"
class MpioDriverTestCase(NotSpportedDriverTestCase):
DRIVER = "H5FD_MPIO"
class MpiPosixDriverTestCase(NotSpportedDriverTestCase):
DRIVER = "H5FD_MPIPOSIX"
class StreamDriverTestCase(NotSpportedDriverTestCase):
DRIVER = "H5FD_STREAM"
@unittest.skipIf(hdf5_version < "1.8.9", 'HDF5 >= "1.8.9" required')
class InMemoryCoreDriverTestCase(TestCase):
DRIVER = "H5FD_CORE"
def setUp(self):
super(InMemoryCoreDriverTestCase, self).setUp()
self.h5fname = tempfile.mktemp(".h5")
self.h5file = None
def tearDown(self):
if self.h5file:
self.h5file.close()
self.h5file = None
if os.path.isfile(self.h5fname):
os.remove(self.h5fname)
super(InMemoryCoreDriverTestCase, self).tearDown()
def _create_image(self, filename="in-memory", title="Title", mode='w'):
h5file = tables.open_file(filename, mode=mode, title=title,
driver=self.DRIVER,
driver_core_backing_store=0)
try:
h5file.create_array(h5file.root, 'array', [1, 2], title="Array")
h5file.create_table(h5file.root, 'table', {
'var1': IntCol()}, "Table")
h5file.root._v_attrs.testattr = 41
image = h5file.get_file_image()
finally:
h5file.close()
return image
def test_newFileW(self):
image = self._create_image(self.h5fname, mode='w')
self.assertTrue(len(image) > 0)
if sys.version_info[0] < 3:
self.assertEqual([ord(i) for i in image[:4]], [137, 72, 68, 70])
else:
self.assertEqual([i for i in image[:4]], [137, 72, 68, 70])
self.assertFalse(os.path.exists(self.h5fname))
def test_newFileA(self):
image = self._create_image(self.h5fname, mode='a')
self.assertTrue(len(image) > 0)
if sys.version_info[0] < 3:
self.assertEqual([ord(i) for i in image[:4]], [137, 72, 68, 70])
else:
self.assertEqual([i for i in image[:4]], [137, 72, 68, 70])
self.assertFalse(os.path.exists(self.h5fname))
def test_openFileR(self):
image = self._create_image(self.h5fname)
self.assertFalse(os.path.exists(self.h5fname))
# Open an existing file
self.h5file = tables.open_file(self.h5fname, mode="r",
driver=self.DRIVER,
driver_core_image=image,
driver_core_backing_store=0)
# Get the CLASS attribute of the arr object
self.assertTrue(hasattr(self.h5file.root._v_attrs, "TITLE"))
self.assertEqual(self.h5file.get_node_attr("/", "TITLE"), "Title")
self.assertTrue(hasattr(self.h5file.root._v_attrs, "testattr"))
self.assertEqual(self.h5file.get_node_attr("/", "testattr"), 41)
self.assertTrue(hasattr(self.h5file.root, "array"))
self.assertEqual(self.h5file.get_node_attr("/array", "TITLE"), "Array")
self.assertTrue(hasattr(self.h5file.root, "table"))
self.assertEqual(self.h5file.get_node_attr("/table", "TITLE"), "Table")
self.assertEqual(self.h5file.root.array.read(), [1, 2])
def test_openFileRW(self):
image = self._create_image(self.h5fname)
self.assertFalse(os.path.exists(self.h5fname))
# Open an existing file
self.h5file = tables.open_file(self.h5fname, mode="r+",
driver=self.DRIVER,
driver_core_image=image,
driver_core_backing_store=0)
# Get the CLASS attribute of the arr object
self.assertTrue(hasattr(self.h5file.root._v_attrs, "TITLE"))
self.assertEqual(self.h5file.get_node_attr("/", "TITLE"), "Title")
self.assertTrue(hasattr(self.h5file.root._v_attrs, "testattr"))
self.assertEqual(self.h5file.get_node_attr("/", "testattr"), 41)
self.assertTrue(hasattr(self.h5file.root, "array"))
self.assertEqual(self.h5file.get_node_attr("/array", "TITLE"), "Array")
self.assertTrue(hasattr(self.h5file.root, "table"))
self.assertEqual(self.h5file.get_node_attr("/table", "TITLE"), "Table")
self.assertEqual(self.h5file.root.array.read(), [1, 2])
self.h5file.create_array(self.h5file.root, 'array2', list(range(10000)),
title="Array2")
self.h5file.root._v_attrs.testattr2 = 42
self.h5file.close()
self.assertFalse(os.path.exists(self.h5fname))
def test_openFileRW_update(self):
filename = tempfile.mktemp(".h5")
image1 = self._create_image(filename)
self.assertFalse(os.path.exists(self.h5fname))
# Open an existing file
self.h5file = tables.open_file(self.h5fname, mode="r+",
driver=self.DRIVER,
driver_core_image=image1,
driver_core_backing_store=0)
# Get the CLASS attribute of the arr object
self.assertTrue(hasattr(self.h5file.root._v_attrs, "TITLE"))
self.assertEqual(self.h5file.get_node_attr("/", "TITLE"), "Title")
self.assertTrue(hasattr(self.h5file.root._v_attrs, "testattr"))
self.assertEqual(self.h5file.get_node_attr("/", "testattr"), 41)
self.assertTrue(hasattr(self.h5file.root, "array"))
self.assertEqual(self.h5file.get_node_attr("/array", "TITLE"), "Array")
self.assertTrue(hasattr(self.h5file.root, "table"))
self.assertEqual(self.h5file.get_node_attr("/table", "TITLE"), "Table")
self.assertEqual(self.h5file.root.array.read(), [1, 2])
data = list(range(2 * tables.parameters.DRIVER_CORE_INCREMENT))
self.h5file.create_array(self.h5file.root, 'array2', data,
title="Array2")
self.h5file.root._v_attrs.testattr2 = 42
image2 = self.h5file.get_file_image()
self.h5file.close()
self.assertFalse(os.path.exists(self.h5fname))
self.assertNotEqual(len(image1), len(image2))
self.assertNotEqual(image1, image2)
# Open an existing file
self.h5file = tables.open_file(self.h5fname, mode="r",
driver=self.DRIVER,
driver_core_image=image2,
driver_core_backing_store=0)
# Get the CLASS attribute of the arr object
self.assertTrue(hasattr(self.h5file.root._v_attrs, "TITLE"))
self.assertEqual(self.h5file.get_node_attr("/", "TITLE"), "Title")
self.assertTrue(hasattr(self.h5file.root._v_attrs, "testattr"))
self.assertEqual(self.h5file.get_node_attr("/", "testattr"), 41)
self.assertTrue(hasattr(self.h5file.root, "array"))
self.assertEqual(self.h5file.get_node_attr("/array", "TITLE"), "Array")
self.assertTrue(hasattr(self.h5file.root, "table"))
self.assertEqual(self.h5file.get_node_attr("/table", "TITLE"), "Table")
self.assertEqual(self.h5file.root.array.read(), [1, 2])
self.assertTrue(hasattr(self.h5file.root._v_attrs, "testattr2"))
self.assertEqual(self.h5file.get_node_attr("/", "testattr2"), 42)
self.assertTrue(hasattr(self.h5file.root, "array2"))
self.assertEqual(self.h5file.get_node_attr(
"/array2", "TITLE"), "Array2")
self.assertEqual(self.h5file.root.array2.read(), data)
self.h5file.close()
self.assertFalse(os.path.exists(self.h5fname))
def test_openFileA(self):
image = self._create_image(self.h5fname)
self.assertFalse(os.path.exists(self.h5fname))
# Open an existing file
self.h5file = tables.open_file(self.h5fname, mode="a",
driver=self.DRIVER,
driver_core_image=image,
driver_core_backing_store=0)
# Get the CLASS attribute of the arr object
self.assertTrue(hasattr(self.h5file.root._v_attrs, "TITLE"))
self.assertEqual(self.h5file.get_node_attr("/", "TITLE"), "Title")
self.assertTrue(hasattr(self.h5file.root._v_attrs, "testattr"))
self.assertEqual(self.h5file.get_node_attr("/", "testattr"), 41)
self.assertTrue(hasattr(self.h5file.root, "array"))
self.assertEqual(self.h5file.get_node_attr("/array", "TITLE"), "Array")
self.assertTrue(hasattr(self.h5file.root, "table"))
self.assertEqual(self.h5file.get_node_attr("/table", "TITLE"), "Table")
self.assertEqual(self.h5file.root.array.read(), [1, 2])
self.h5file.close()
self.assertFalse(os.path.exists(self.h5fname))
def test_openFileA_update(self):
h5fname = tempfile.mktemp(".h5")
image1 = self._create_image(h5fname)
self.assertFalse(os.path.exists(self.h5fname))
# Open an existing file
self.h5file = tables.open_file(self.h5fname, mode="a",
driver=self.DRIVER,
driver_core_image=image1,
driver_core_backing_store=0)
# Get the CLASS attribute of the arr object
self.assertTrue(hasattr(self.h5file.root._v_attrs, "TITLE"))
self.assertEqual(self.h5file.get_node_attr("/", "TITLE"), "Title")
self.assertTrue(hasattr(self.h5file.root._v_attrs, "testattr"))
self.assertEqual(self.h5file.get_node_attr("/", "testattr"), 41)
self.assertTrue(hasattr(self.h5file.root, "array"))
self.assertEqual(self.h5file.get_node_attr("/array", "TITLE"), "Array")
self.assertTrue(hasattr(self.h5file.root, "table"))
self.assertEqual(self.h5file.get_node_attr("/table", "TITLE"), "Table")
self.assertEqual(self.h5file.root.array.read(), [1, 2])
data = list(range(2 * tables.parameters.DRIVER_CORE_INCREMENT))
self.h5file.create_array(self.h5file.root, 'array2', data,
title="Array2")
self.h5file.root._v_attrs.testattr2 = 42
image2 = self.h5file.get_file_image()
self.h5file.close()
self.assertFalse(os.path.exists(self.h5fname))
self.assertNotEqual(len(image1), len(image2))
self.assertNotEqual(image1, image2)
# Open an existing file
self.h5file = tables.open_file(self.h5fname, mode="r",
driver=self.DRIVER,
driver_core_image=image2,
driver_core_backing_store=0)
# Get the CLASS attribute of the arr object
self.assertTrue(hasattr(self.h5file.root._v_attrs, "TITLE"))
self.assertEqual(self.h5file.get_node_attr("/", "TITLE"), "Title")
self.assertTrue(hasattr(self.h5file.root._v_attrs, "testattr"))
self.assertEqual(self.h5file.get_node_attr("/", "testattr"), 41)
self.assertTrue(hasattr(self.h5file.root, "array"))
self.assertEqual(self.h5file.get_node_attr("/array", "TITLE"), "Array")
self.assertTrue(hasattr(self.h5file.root, "table"))
self.assertEqual(self.h5file.get_node_attr("/table", "TITLE"), "Table")
self.assertEqual(self.h5file.root.array.read(), [1, 2])
self.assertTrue(hasattr(self.h5file.root._v_attrs, "testattr2"))
self.assertEqual(self.h5file.get_node_attr("/", "testattr2"), 42)
self.assertTrue(hasattr(self.h5file.root, "array2"))
self.assertEqual(self.h5file.get_node_attr(
"/array2", "TITLE"), "Array2")
self.assertEqual(self.h5file.root.array2.read(), data)
self.h5file.close()
self.assertFalse(os.path.exists(self.h5fname))
def test_str(self):
self.h5file = tables.open_file(self.h5fname, mode="w", title="Title",
driver=self.DRIVER,
driver_core_backing_store=0)
self.h5file.create_array(self.h5file.root, 'array', [1, 2],
title="Array")
self.h5file.create_table(self.h5file.root, 'table', {'var1': IntCol()},
"Table")
self.h5file.root._v_attrs.testattr = 41
# ensure that the __str__ method works even if there is no phisical
# file on disk (in which case the os.stat operation for date retrieval
# fails)
self.assertTrue(str(self.h5file) is not None)
self.h5file.close()
self.assertFalse(os.path.exists(self.h5fname))
class QuantizeTestCase(common.TempFileMixin, TestCase):
mode = "w"
title = "This is the table title"
expectedrows = 10
appendrows = 5
def setUp(self):
super(QuantizeTestCase, self).setUp()
self.data = numpy.linspace(-5., 5., 41)
self.randomdata = numpy.random.random_sample(1000000)
self.randomints = numpy.random.random_integers(
-1000000, 1000000, 1000000).astype('int64')
self.populateFile()
self.h5file.close()
self.quantizeddata_0 = numpy.asarray(
[-5.] * 2 + [-4.] * 5 + [-3.] * 3 + [-2.] * 5 + [-1.] * 3 +
[0.] * 5 + [1.] * 3 + [2.] * 5 + [3.] * 3 + [4.] * 5 + [5.] * 2)
self.quantizeddata_m1 = numpy.asarray(
[-8.] * 4 + [0.] * 33 + [8.] * 4)
def populateFile(self):
root = self.h5file.root
filters = Filters(complevel=1, complib="blosc",
least_significant_digit=1)
ints = self.h5file.create_carray(root, "integers", Int64Atom(),
(1000000,), filters=filters)
ints[:] = self.randomints
floats = self.h5file.create_carray(root, "floats", Float32Atom(),
(1000000,), filters=filters)
floats[:] = self.randomdata
data1 = self.h5file.create_carray(root, "data1", Float64Atom(),
(41,), filters=filters)
data1[:] = self.data
filters = Filters(complevel=1, complib="blosc",
least_significant_digit=0)
data0 = self.h5file.create_carray(root, "data0", Float64Atom(),
(41,), filters=filters)
data0[:] = self.data
filters = Filters(complevel=1, complib="blosc",
least_significant_digit=2)
data2 = self.h5file.create_carray(root, "data2", Float64Atom(),
(41,), filters=filters)
data2[:] = self.data
filters = Filters(complevel=1, complib="blosc",
least_significant_digit=-1)
datam1 = self.h5file.create_carray(root, "datam1", Float64Atom(),
(41,), filters=filters)
datam1[:] = self.data
def test00_quantizeData(self):
"""Checking the quantize() function."""
quantized_0 = quantize(self.data, 0)
quantized_1 = quantize(self.data, 1)
quantized_2 = quantize(self.data, 2)
quantized_m1 = quantize(self.data, -1)
numpy.testing.assert_array_equal(quantized_0, self.quantizeddata_0)
numpy.testing.assert_array_equal(quantized_1, self.data)
numpy.testing.assert_array_equal(quantized_2, self.data)
numpy.testing.assert_array_equal(quantized_m1, self.quantizeddata_m1)
def test01_quantizeDataMaxError(self):
"""Checking the maximum error introduced by the quantize() function."""
quantized_0 = quantize(self.randomdata, 0)
quantized_1 = quantize(self.randomdata, 1)
quantized_2 = quantize(self.randomdata, 2)
quantized_m1 = quantize(self.randomdata, -1)
# assertLess is new in Python 2.7
#self.assertLess(numpy.abs(quantized_0 - self.randomdata).max(), 0.5)
#self.assertLess(numpy.abs(quantized_1 - self.randomdata).max(), 0.05)
#self.assertLess(numpy.abs(quantized_2 - self.randomdata).max(), 0.005)
#self.assertLess(numpy.abs(quantized_m1 - self.randomdata).max(), 1.)
self.assertTrue(numpy.abs(quantized_0 - self.randomdata).max() < 0.5)
self.assertTrue(numpy.abs(quantized_1 - self.randomdata).max() < 0.05)
self.assertTrue(numpy.abs(quantized_2 - self.randomdata).max() < 0.005)
self.assertTrue(numpy.abs(quantized_m1 - self.randomdata).max() < 1.)
def test02_array(self):
"""Checking quantized data as written to disk."""
self.h5file = tables.open_file(self.h5fname, "r")
numpy.testing.assert_array_equal(self.h5file.root.data1[:], self.data)
numpy.testing.assert_array_equal(self.h5file.root.data2[:], self.data)
numpy.testing.assert_array_equal(self.h5file.root.data0[:],
self.quantizeddata_0)
numpy.testing.assert_array_equal(self.h5file.root.datam1[:],
self.quantizeddata_m1)
numpy.testing.assert_array_equal(self.h5file.root.integers[:],
self.randomints)
self.assertEqual(self.h5file.root.integers[:].dtype,
self.randomints.dtype)
# assertLess is new in Python 2.7
#self.assertLess(
# numpy.abs(self.h5file.root.floats[:] - self.randomdata).max(),
# 0.05
#)
self.assertTrue(
numpy.abs(self.h5file.root.floats[:] - self.randomdata).max() <
0.05
)
def suite():
import doctest
theSuite = unittest.TestSuite()
niter = 1
# common.heavy = 1 # Uncomment this only for testing purposes!
for i in range(niter):
theSuite.addTest(unittest.makeSuite(FiltersCase1))
theSuite.addTest(unittest.makeSuite(FiltersCase2))
theSuite.addTest(unittest.makeSuite(FiltersCase10))
theSuite.addTest(unittest.makeSuite(FiltersCaseBloscBloscLZ))
theSuite.addTest(unittest.makeSuite(FiltersCaseBloscLZ4))
theSuite.addTest(unittest.makeSuite(FiltersCaseBloscLZ4HC))
theSuite.addTest(unittest.makeSuite(FiltersCaseBloscSnappy))
theSuite.addTest(unittest.makeSuite(FiltersCaseBloscZlib))
theSuite.addTest(unittest.makeSuite(FiltersCaseBloscBitShuffle))
theSuite.addTest(unittest.makeSuite(CopyGroupCase1))
theSuite.addTest(unittest.makeSuite(CopyGroupCase2))
theSuite.addTest(unittest.makeSuite(CopyFileCase1))
theSuite.addTest(unittest.makeSuite(CopyFileCase2))
theSuite.addTest(unittest.makeSuite(GroupFiltersTestCase))
theSuite.addTest(unittest.makeSuite(SetBloscMaxThreadsTestCase))
theSuite.addTest(unittest.makeSuite(FilterTestCase))
theSuite.addTest(doctest.DocTestSuite(tables.filters))
theSuite.addTest(unittest.makeSuite(DefaultDriverTestCase))
theSuite.addTest(unittest.makeSuite(Sec2DriverTestCase))
theSuite.addTest(unittest.makeSuite(StdioDriverTestCase))
theSuite.addTest(unittest.makeSuite(CoreDriverTestCase))
theSuite.addTest(unittest.makeSuite(CoreDriverNoBackingStoreTestCase))
theSuite.addTest(unittest.makeSuite(SplitDriverTestCase))
theSuite.addTest(unittest.makeSuite(LogDriverTestCase))
theSuite.addTest(unittest.makeSuite(DirectDriverTestCase))
theSuite.addTest(unittest.makeSuite(WindowsDriverTestCase))
theSuite.addTest(unittest.makeSuite(FamilyDriverTestCase))
theSuite.addTest(unittest.makeSuite(MultiDriverTestCase))
theSuite.addTest(unittest.makeSuite(MpioDriverTestCase))
theSuite.addTest(unittest.makeSuite(MpiPosixDriverTestCase))
theSuite.addTest(unittest.makeSuite(StreamDriverTestCase))
theSuite.addTest(unittest.makeSuite(InMemoryCoreDriverTestCase))
theSuite.addTest(unittest.makeSuite(QuantizeTestCase))
if common.heavy:
theSuite.addTest(unittest.makeSuite(CreateTestCase))
theSuite.addTest(unittest.makeSuite(FiltersCase3))
theSuite.addTest(unittest.makeSuite(FiltersCase4))
theSuite.addTest(unittest.makeSuite(FiltersCase5))
theSuite.addTest(unittest.makeSuite(FiltersCase6))
theSuite.addTest(unittest.makeSuite(FiltersCase7))
theSuite.addTest(unittest.makeSuite(FiltersCase8))
theSuite.addTest(unittest.makeSuite(FiltersCase9))
theSuite.addTest(unittest.makeSuite(CopyFileCase3))
theSuite.addTest(unittest.makeSuite(CopyFileCase4))
theSuite.addTest(unittest.makeSuite(CopyFileCase5))
theSuite.addTest(unittest.makeSuite(CopyFileCase6))
theSuite.addTest(unittest.makeSuite(CopyFileCase7))
theSuite.addTest(unittest.makeSuite(CopyFileCase8))
theSuite.addTest(unittest.makeSuite(CopyFileCase10))
theSuite.addTest(unittest.makeSuite(CopyGroupCase3))
theSuite.addTest(unittest.makeSuite(CopyGroupCase4))
theSuite.addTest(unittest.makeSuite(CopyGroupCase5))
theSuite.addTest(unittest.makeSuite(CopyGroupCase6))
theSuite.addTest(unittest.makeSuite(CopyGroupCase7))
theSuite.addTest(unittest.makeSuite(CopyGroupCase8))
return theSuite
if __name__ == '__main__':
common.parse_argv(sys.argv)
common.print_versions()
unittest.main(defaultTest='suite')
|
en
| 0.801765
|
# -*- coding: utf-8 -*- This test unit checks object creation funtions, like open_file, create_table, create_array or create_group. It also checks: - name identifiers in tree objects - title character limit for objects (255) - limit in number in table fields (255) # 4-character String # integer # short integer # double (double-precision) # float (single-precision) # (2 ** 31) # Create an instance of HDF5 Table # Create a table object # Create an array object # Create a group object Testing table creation. Checking protection against node overwriting. Checking syntax in object tree names. # another name error # Finally, test a reserved word Checking the self.title attr in nodes. # Close the opened file to destroy the object tree # Now, test that self.title exists and is correct in all the nodes Checking large title character length limit (1023) # Try to put a very long title on a group object # Now, try with a table object # Finally, try with an Array object Checking a large number of fields in tables # The number of fields for a table # Build a dictionary with the types as values and varnames as keys # Append this entry to indicate the alignment! # Write 10 records # write data on disk # Read all the data as a list # Compare the input rowlist and output row list. They should # be equal. # The next limitation has been released. A warning is still there, though Checking an excess of the maximum number of fields in tables # The number of fields for a table # Build a dictionary with the types as values and varnames as keys # Now, create a table with this record object # This way of creating node objects has been deprecated # table = Table(recordDict, "MetaRecord instance") # Attach the table to object tree # Here, a tables.PerformanceWarning should be raised! # Reset the warning # The next limitation has been released Checking an excess (256) of the maximum length in column names # Build a dictionary with the types as values and varnames as keys # Should trigger a ValueError # Now, create a table with this record object # This way of creating node objects has been deprecated # Attach the table to object tree # Here, ValueError should be raised! Checking unlimited length in column names # Build a dictionary with the types as values and varnames as keys # Should work well # Attach the table to object tree # Here, IndexError should be raised! # 4-character String # integer # short integer # Create a tree with three levels of depth # Create a table # Get the record object associated with the new table # Fill the table # This injects the Record values # Flush the buffer for this table # Create a couple of arrays in each group # Create a couple of EArrays as well # And fill them with some values # Finally a couple of VLArrays too # And fill them with some values # Create a new group (descendant of group) # The second level # third level # The rest of levels # Iterate over this new group (group2) Checking inheritance of filters on trees (open file version) # First level check # The next nodes have to have the same filter properties as # self.filters # Second and third level check # The next nodes have to have the same filter properties as # gfilters # Fourth and fifth level check # If None, the filters are inherited! # The next nodes have to have the same filter properties as # self.filter # Checking the special case for Arrays in which the compression # should always be the empty Filter() # The next nodes have to have the same filter properties as # Filter() Checking inheritance of filters on trees (close file version) # Close the file # First level check # The next nodes have to have the same filter properties as # self.filters # Second and third level check # The next nodes have to have the same filter properties as # gfilters # Fourth and fifth level check # The next nodes have to have the same filter properties as # self.filters # Checking the special case for Arrays in which the compression # should always be the empty Filter() # The next nodes have to have the same filter properties as # Filter() # Create a temporary file # Create the destination # Add some user attrs: # Create a tree # Create a new group (brother of group) # Create a table # Get the record object associated with the new table # Fill the table # This injects the Record values # Flush the buffer for this table # Add some user attrs: # Create a couple of arrays in each group # Create a couple of EArrays as well # Add some user attrs: # And fill them with some values # Create a new group (descendant of group) # Iterate over this new group (group3) # Add some user attrs: # Close the file Checking non-recursive copy of a Group # Copy a group non-recursively #srcgroup._f_copy_children(self.h5file2.root, recursive=False, # filters=self.filters) # Close the destination file # And open it again # Check that the copy has been done correctly # Sort the lists Checking non-recursive copy of a Group (attributes copied) # Copy a group non-recursively with attrs # Close the destination file # And open it again # Check that the copy has been done correctly # Filters may differ, do not take into account # These lists should already be ordered # Now, for the contents of attributes Checking recursive copy of a Group # Create the destination node # Copy a group non-recursively # Case where srcgroup == "/" # Close the destination file # And open it again # Check that the copy has been done correctly # Case where dstgroup == "/" # skip the first group # skip the first group Checking recursive copy of a Group (cheking Filters) # Create the destination node # Copy a group non-recursively # Case where srcgroup == "/" # Close the destination file # And open it again # Check that the copy has been done correctly # Case where dstgroup == "/" # skip the first group # skip the first group # Create a temporary file # Create the source file # Add some user attrs: # Create a tree # Create a new group (brother of group) # Create a table # Get the record object associated with the new table # Fill the table # This injects the Record values # Flush the buffer for this table # Add some user attrs: # Create a couple of arrays in each group # Create a couple of EArrays as well # Add some user attrs: # And fill them with some values # Create a new group (descendant of group) # Iterate over this new group (group3) # Add some user attrs: # Close the file Checking copy of a File (overwriting file) # Create a temporary file # Copy the file to the destination # Close the original file, if needed # ...and open the destination file # Check that the copy has been done correctly # Sort the lists Checking copy of a File (srcfile == dstfile) # Copy the file to the destination Checking copy of a File (first-class function) # Close the temporary file # Copy the file to the destination # ...and open the source and destination file # Check that the copy has been done correctly # Sort the lists Checking copy of a File (attributes not copied) # Copy the file to the destination # Close the original file, if needed # ...and open the destination file # Check that the copy has been done correctly # Sort the lists # print("_v_attrnames-->", self.h5file2.root._v_attrs._v_attrnames) # print("--> <%s,%s>" % (self.h5file2.title, self.title)) # Check that user attributes has not been copied # Filters may differ, do not take into account # These lists should already be ordered # Now, for the contents of attributes Checking copy of a File (attributes copied) # Copy the file to the destination # Close the original file, if needed # ...and open the destination file # Check that the copy has been done correctly # These lists should already be ordered # Filters may differ, do not take into account # Now, for the contents of attributes Checking copy of a File (checking not overwriting) # Create two empty files: # close the second one # Copy the first into the second # Delete files # something non-default # Get node filters. # Compare to given filters. # Guess filters to compare to by node name. Filters propagating to children. Keeping filters when copying a file. Overriding filters when copying a file. # Check expected current filters. # Change filters. # Get and check changed filters. Changing the filters of a group. Deleting the filters of a group. Checking set_blosc_max_threads() Checking set_blosc_max_threads() (re-open) # Create an HDF5 file and contents # Open an existing HDF5 file # check contents # Open an existing HDF5 file in append mode # check contents # write new data # check contents # Open an existing HDF5 file in append mode # check contents # write new data # check contents Ensure that nothing is written to file. # Create an HDF5 file and contents # flush # Create an HDF5 file and contents # flush # Create an HDF5 file and contents # flush # Open an existing HDF5 file # compute the file hash # Open an existing HDF5 file in append mode # check contents # write new data # ensure that there is no change on the file on disk # compute the file hash # Open an existing HDF5 file in append mode # check contents # write new data # ensure that there is no change on the file on disk #super(SplitDriverTestCase, self).tearDown() # local binding # Open an existing file # Get the CLASS attribute of the arr object # Open an existing file # Get the CLASS attribute of the arr object # Open an existing file # Get the CLASS attribute of the arr object # Open an existing file # Get the CLASS attribute of the arr object # Open an existing file # Get the CLASS attribute of the arr object # Open an existing file # Get the CLASS attribute of the arr object # Open an existing file # Get the CLASS attribute of the arr object # ensure that the __str__ method works even if there is no phisical # file on disk (in which case the os.stat operation for date retrieval # fails) Checking the quantize() function. Checking the maximum error introduced by the quantize() function. # assertLess is new in Python 2.7 #self.assertLess(numpy.abs(quantized_0 - self.randomdata).max(), 0.5) #self.assertLess(numpy.abs(quantized_1 - self.randomdata).max(), 0.05) #self.assertLess(numpy.abs(quantized_2 - self.randomdata).max(), 0.005) #self.assertLess(numpy.abs(quantized_m1 - self.randomdata).max(), 1.) Checking quantized data as written to disk. # assertLess is new in Python 2.7 #self.assertLess( # numpy.abs(self.h5file.root.floats[:] - self.randomdata).max(), # 0.05 #) # common.heavy = 1 # Uncomment this only for testing purposes!
| 2.483591
| 2
|
app/business/globus_client.py
|
justincc/RDSDS-Server
| 0
|
6626974
|
<gh_stars>0
from globus_sdk import ConfidentialAppAuthClient
import logging
from app.core.config import GLOBUS_CLIENT_ID, GLOBUS_CLIENT_SECRET
class GlobusClient:
client: ConfidentialAppAuthClient = None
globus = GlobusClient()
def load_app_client():
globus.client = ConfidentialAppAuthClient(
GLOBUS_CLIENT_ID, GLOBUS_CLIENT_SECRET)
|
from globus_sdk import ConfidentialAppAuthClient
import logging
from app.core.config import GLOBUS_CLIENT_ID, GLOBUS_CLIENT_SECRET
class GlobusClient:
client: ConfidentialAppAuthClient = None
globus = GlobusClient()
def load_app_client():
globus.client = ConfidentialAppAuthClient(
GLOBUS_CLIENT_ID, GLOBUS_CLIENT_SECRET)
|
none
| 1
| 1.822167
| 2
|
|
topcoder/update_topcoder_readme.py
|
0x8b/HackerRank
| 3
|
6626975
|
#!/usr/bin/env python
import json
from operator import itemgetter
from pathlib import Path
with open("data.json", "r") as f:
data = json.load(f)
links = {
problem["name"]: problem["desc"]
for problems in data.values()
for problem in problems
}
languages = {
"py": "Python",
"rs": "Rust",
}
with open("README.md", "w", encoding="utf8") as readme:
readme.write("# TopCoder\n\n")
for category in sorted(data.keys()):
readme.write(f"### {category}\n\n")
readme.write(f"|Name|Solution|Description|\n")
readme.write(f"|---|---|---|\n")
for problem in sorted(data[category], key=itemgetter("name")):
name = problem["name"]
solutions = Path("solutions")
for path in solutions.glob(f"{name}.*"):
line = [
name,
f"[{languages[path.suffix[1:]]}](/topcoder/solutions/{path.name})",
f'<a href="{problem["desc"]}" target="_blank">TopCoder \U0001F855</a>',
]
readme.write(f'|{"|".join(map(str, line))}|\n')
|
#!/usr/bin/env python
import json
from operator import itemgetter
from pathlib import Path
with open("data.json", "r") as f:
data = json.load(f)
links = {
problem["name"]: problem["desc"]
for problems in data.values()
for problem in problems
}
languages = {
"py": "Python",
"rs": "Rust",
}
with open("README.md", "w", encoding="utf8") as readme:
readme.write("# TopCoder\n\n")
for category in sorted(data.keys()):
readme.write(f"### {category}\n\n")
readme.write(f"|Name|Solution|Description|\n")
readme.write(f"|---|---|---|\n")
for problem in sorted(data[category], key=itemgetter("name")):
name = problem["name"]
solutions = Path("solutions")
for path in solutions.glob(f"{name}.*"):
line = [
name,
f"[{languages[path.suffix[1:]]}](/topcoder/solutions/{path.name})",
f'<a href="{problem["desc"]}" target="_blank">TopCoder \U0001F855</a>',
]
readme.write(f'|{"|".join(map(str, line))}|\n')
|
es
| 0.111254
|
#!/usr/bin/env python ## {category}\n\n")
| 2.951359
| 3
|
e2emlstorlets/tools/swift_access.py
|
eranr/e2emlstorlets
| 0
|
6626976
|
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import ConfigParser
from swiftclient import client
def parse_config(config_file):
required_keys = ['auth_version',
'user',
'password',
'project_name',
'domain_name',
'auth_uri',
'repo_dir']
conf = dict()
config = ConfigParser.ConfigParser()
config.read(config_file)
options = config.options('Default')
for option in options:
try:
conf[option] = config.get('Default', option)
except:
raise
if all([k in conf for k in required_keys]) is False:
raise Exception('Missing access information')
return conf
def get_auth(conf):
"""
Get token string to access to swift
:param conf: a dict of config parameters
:returns: (swift endpoint url, token string)
"""
auth_url = conf['auth_uri']
project = conf['project_name']
os_options = {'user_domain_name': conf['domain_name'],
'project_name': conf['project_name']}
user = conf['user']
passwd = conf['password']
url, token = client.get_auth(auth_url, project + ':' + user, passwd,
os_options=os_options,
auth_version=conf['auth_version'])
return url, token
def put_local_file(url, token, container, local_dir, local_file, headers=None):
"""
Put local file to swift
:param url: swift endpoint url
:param token: token string to access to swift
:param local_dir: directory path where the target file is placed
:param loca_file: name of the file to be put to swift
:param headers: headers parameters to be included in request headers
"""
resp = dict()
with open(os.path.join(local_dir, local_file), 'r') as f:
client.put_object(url, token, container, local_file, f,
headers=headers,
content_type="application/octet-stream",
response_dict=resp)
status = resp.get('status', 0)
assert (status // 100 == 2)
def deploy_storlet(conf, path_to_storlet, module_main):
url, token = get_auth(conf)
headers = {'X-Object-Meta-Storlet-Language': 'Python',
'X-Object-Meta-Storlet-Interface-Version': '1.0',
'X-Object-Meta-Storlet-Object-Metadata': 'no',
'X-Object-Meta-Storlet-Main': module_main}
put_local_file(url, token, 'storlet', os.path.dirname(path_to_storlet),
os.path.basename(path_to_storlet), headers)
def _parse_data_url(data_url):
url_elements = data_url.split('/')
url_elements = [el for el in url_elements if el != '']
return url_elements[0], url_elements[1]
|
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import ConfigParser
from swiftclient import client
def parse_config(config_file):
required_keys = ['auth_version',
'user',
'password',
'project_name',
'domain_name',
'auth_uri',
'repo_dir']
conf = dict()
config = ConfigParser.ConfigParser()
config.read(config_file)
options = config.options('Default')
for option in options:
try:
conf[option] = config.get('Default', option)
except:
raise
if all([k in conf for k in required_keys]) is False:
raise Exception('Missing access information')
return conf
def get_auth(conf):
"""
Get token string to access to swift
:param conf: a dict of config parameters
:returns: (swift endpoint url, token string)
"""
auth_url = conf['auth_uri']
project = conf['project_name']
os_options = {'user_domain_name': conf['domain_name'],
'project_name': conf['project_name']}
user = conf['user']
passwd = conf['password']
url, token = client.get_auth(auth_url, project + ':' + user, passwd,
os_options=os_options,
auth_version=conf['auth_version'])
return url, token
def put_local_file(url, token, container, local_dir, local_file, headers=None):
"""
Put local file to swift
:param url: swift endpoint url
:param token: token string to access to swift
:param local_dir: directory path where the target file is placed
:param loca_file: name of the file to be put to swift
:param headers: headers parameters to be included in request headers
"""
resp = dict()
with open(os.path.join(local_dir, local_file), 'r') as f:
client.put_object(url, token, container, local_file, f,
headers=headers,
content_type="application/octet-stream",
response_dict=resp)
status = resp.get('status', 0)
assert (status // 100 == 2)
def deploy_storlet(conf, path_to_storlet, module_main):
url, token = get_auth(conf)
headers = {'X-Object-Meta-Storlet-Language': 'Python',
'X-Object-Meta-Storlet-Interface-Version': '1.0',
'X-Object-Meta-Storlet-Object-Metadata': 'no',
'X-Object-Meta-Storlet-Main': module_main}
put_local_file(url, token, 'storlet', os.path.dirname(path_to_storlet),
os.path.basename(path_to_storlet), headers)
def _parse_data_url(data_url):
url_elements = data_url.split('/')
url_elements = [el for el in url_elements if el != '']
return url_elements[0], url_elements[1]
|
en
| 0.787364
|
# Copyright (c) 2010-2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. Get token string to access to swift :param conf: a dict of config parameters :returns: (swift endpoint url, token string) Put local file to swift :param url: swift endpoint url :param token: token string to access to swift :param local_dir: directory path where the target file is placed :param loca_file: name of the file to be put to swift :param headers: headers parameters to be included in request headers
| 2.178715
| 2
|
thingsGate/odoo/gate.py
|
lubusax/2005_thingsGate
| 0
|
6626977
|
<gh_stars>0
import requests, json, logging
from internet.internet import internetAccess,getInternet
class Gate:
def __init__(self,dirPath):
self.dirPath = dirPath
self.thingsGateFilePath = self.dirPath+'/data/thingsGate.json'
self.setParams()
logging.debug("Gate Class Initialized")
def setParams(self):
logging.debug("Params file is %s " % self.thingsGateFilePath)
thingsGateFile = open(self.thingsGateFilePath)
self.thingsGateDict = json.load(thingsGateFile)["thingsGate"]
thingsGateFile.close()
self.gateRegistered = True if (
self.thingsGateDict["registered"]=="yes") else False
logging.debug(('Is the gate registered: {r}').format(r=self.gateRegistered))
# self.db = self.thingsGateDict["db"][0]
# self.user = self.thingsGateDict["user_name"][0]
# self.pswd = self.thingsGateDict["user_password"][0]
# self.host = self.thingsGateDict["odoo_host"][0]
# self.port = self.thingsGateDict["odoo_port"][0]
# self.adm = self.thingsGateDict["admin_id"][0]
# self.tz = self.thingsGateDict["timezone"][0]
# os.environ["TZ"] = tz_dic.tz_dic[self.tz]
# time.tzset()
# if "https" not in self.thingsGateDict:
# self.https_on = False
# else:
# self.https_on = True
# if self.https_on:
# if self.port:
# self.url_template = "https://%s:%s" % (self.host, self.port)
# else:
# self.url_template = "https://%s" % self.host
# else:
# if self.port:
# self.url_template = "http://%s:%s" % (self.host, self.port)
# else:
# self.url_template = "http://%s" % self.host
# self.uid = self._get_user_id()
def internetSetup(self):
while not internetAccess():
getInternet()
return True
def odooSetup(self):
return True
def gateSetup(self):
self.internetSetup()
self.odooSetup()
return True
def gateInit(dirPath):
logging.debug('gate init -has begun')
G = Gate(dirPath)
if not G.gateRegistered:
G.gateSetup()
logging.debug('gate init- has ended')
|
import requests, json, logging
from internet.internet import internetAccess,getInternet
class Gate:
def __init__(self,dirPath):
self.dirPath = dirPath
self.thingsGateFilePath = self.dirPath+'/data/thingsGate.json'
self.setParams()
logging.debug("Gate Class Initialized")
def setParams(self):
logging.debug("Params file is %s " % self.thingsGateFilePath)
thingsGateFile = open(self.thingsGateFilePath)
self.thingsGateDict = json.load(thingsGateFile)["thingsGate"]
thingsGateFile.close()
self.gateRegistered = True if (
self.thingsGateDict["registered"]=="yes") else False
logging.debug(('Is the gate registered: {r}').format(r=self.gateRegistered))
# self.db = self.thingsGateDict["db"][0]
# self.user = self.thingsGateDict["user_name"][0]
# self.pswd = self.thingsGateDict["user_password"][0]
# self.host = self.thingsGateDict["odoo_host"][0]
# self.port = self.thingsGateDict["odoo_port"][0]
# self.adm = self.thingsGateDict["admin_id"][0]
# self.tz = self.thingsGateDict["timezone"][0]
# os.environ["TZ"] = tz_dic.tz_dic[self.tz]
# time.tzset()
# if "https" not in self.thingsGateDict:
# self.https_on = False
# else:
# self.https_on = True
# if self.https_on:
# if self.port:
# self.url_template = "https://%s:%s" % (self.host, self.port)
# else:
# self.url_template = "https://%s" % self.host
# else:
# if self.port:
# self.url_template = "http://%s:%s" % (self.host, self.port)
# else:
# self.url_template = "http://%s" % self.host
# self.uid = self._get_user_id()
def internetSetup(self):
while not internetAccess():
getInternet()
return True
def odooSetup(self):
return True
def gateSetup(self):
self.internetSetup()
self.odooSetup()
return True
def gateInit(dirPath):
logging.debug('gate init -has begun')
G = Gate(dirPath)
if not G.gateRegistered:
G.gateSetup()
logging.debug('gate init- has ended')
|
en
| 0.163062
|
# self.db = self.thingsGateDict["db"][0] # self.user = self.thingsGateDict["user_name"][0] # self.pswd = self.thingsGateDict["user_password"][0] # self.host = self.thingsGateDict["odoo_host"][0] # self.port = self.thingsGateDict["odoo_port"][0] # self.adm = self.thingsGateDict["admin_id"][0] # self.tz = self.thingsGateDict["timezone"][0] # os.environ["TZ"] = tz_dic.tz_dic[self.tz] # time.tzset() # if "https" not in self.thingsGateDict: # self.https_on = False # else: # self.https_on = True # if self.https_on: # if self.port: # self.url_template = "https://%s:%s" % (self.host, self.port) # else: # self.url_template = "https://%s" % self.host # else: # if self.port: # self.url_template = "http://%s:%s" % (self.host, self.port) # else: # self.url_template = "http://%s" % self.host # self.uid = self._get_user_id()
| 2.54563
| 3
|
proj/hps_accel/gateware/gen2/test_macc.py
|
keadwen/CFU-Playground
| 240
|
6626978
|
<gh_stars>100-1000
#!/bin/env python
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for macc.py"""
from collections import namedtuple
import random
from nmigen import unsigned, signed
from nmigen.sim import Delay
from nmigen_cfu import TestBase
from .macc import MaccBlock
class MaccBlockTest(TestBase):
"""Tests MaccBlock"""
def create_dut(self):
return MaccBlock(4, unsigned(8), signed(8), signed(24))
def test_basic_functions(self):
D = namedtuple('D', ['a', 'b', 'first', 'last', 'expected'])
DATA = [
# Filler - before sim start
D(0, 0, 0, 0, 0),
D(0, 0, 0, 0, 0),
D(0, 0, 0, 0, 0),
# Data starts here
D(0, 0, 0, 0, 0),
# Multiply two sets of numbers (1 byte only)
D(1, -2, 1, 0, 0), # -2
D(5, 3, 0, 1, 13), # +15
D(6, 26, 0, 0, 13), # parameters should not affect output
D(2, 17, 0, 0, 13),
D(4, 21, 0, 0, 13),
# Four sets of four numbers - result calculated by hand
D(0x01020304, 0x05060708, 1, 0, 13),
D(0x0a0b0c0d, 0xfffefdfc, 0, 0, 13), # filter values negative
D(0x18191a1b, 0x1113171d, 0, 0, 13),
D(0x22232425, 0x1b1d2127, 0, 1, 6778),
# ((6, 26, 0, 0), 13), # parameters should not affect output
# ((2, 17, 0, 0), 13),
# ((4, 21, 0, 0), 6778),
# Filler - wait for results to percolate through system
D(0, 0, 0, 0, 0),
D(0, 0, 0, 0, 0),
D(0, 0, 0, 0, 0),
]
def process():
for (data, data_prev, data_prev3) in zip(DATA[3:], DATA[2:], DATA):
# Set inputs
yield self.dut.input_a.eq(data.a)
yield self.dut.input_b.eq(data.b)
yield self.dut.input_first.eq(data.first)
yield self.dut.input_last.eq(data.last)
yield Delay(0.1)
# check inputs correctly passed onward
self.assertEqual((yield self.dut.output_a), data_prev.a & 0xffff_ffff)
self.assertEqual((yield self.dut.output_b), data_prev.b & 0xffff_ffff)
self.assertEqual((yield self.dut.output_first), data_prev.first)
self.assertEqual((yield self.dut.output_last), data_prev.last)
# Check output is as expected
self.assertEqual((yield self.dut.output_accumulator), data_prev3.expected)
self.assertEqual((yield self.dut.output_accumulator_new), data_prev3.last)
yield
self.run_sim(process, False)
def check_calculation(self, a_list, b_list):
"""Checks a given calculation:
Args:
a_list: first operands. Size is a multiple of 4.
b_list: second operands. List of integers the same size as a_list.
"""
expected_result = sum(a * b for a, b in zip(a_list, b_list))
def to_word(x, y, z, t):
return ((x & 0xff) |
((y & 0xff) << 8) |
((z & 0xff) << 16) |
((t & 0xff) << 24))
def process():
# Send in all inputs
num_inputs = len(a_list) // 4
for i in range(num_inputs):
a = to_word(*a_list[i * 4: (i + 1) * 4])
b = to_word(*b_list[i * 4: (i + 1) * 4])
yield self.dut.input_a.eq(a)
yield self.dut.input_b.eq(b)
yield self.dut.input_first.eq(i == 0)
yield self.dut.input_last.eq(i == (num_inputs - 1))
yield
# wait for output to be available
yield self.dut.input_last.eq(0)
yield
yield
self.assertFalse((yield self.dut.output_accumulator_new))
yield
self.assertTrue((yield self.dut.output_accumulator_new))
self.assertEqual((yield self.dut.output_accumulator), expected_result)
yield
self.assertFalse((yield self.dut.output_accumulator_new))
return process()
def check_random_calculation(self, size, seed):
"""Checks a randomly generated calculation.
Args:
size - number of arguments must be divisble by 4
seed - used to seed the generator.
"""
random.seed(seed)
a_list = [random.randrange(0, 256) for _ in range(size)]
b_list = [random.randrange(-128, 128) for _ in range(size)]
return self.check_calculation(a_list, b_list)
def test_larger_calculations(self):
def process():
yield from self.check_random_calculation(32, 1)
yield from self.check_random_calculation(500, 2)
yield from self.check_random_calculation(64, 3)
yield from self.check_random_calculation(48, 3)
self.run_sim(process, False)
def test_layer_04_index_15200(self):
# Real values from a problematic calculation in gen2.
a_list = [
0, 0, 0, 21, 0, 0, 61, 0, 0, 0, 2, 0, 0, 0, 0, 17, 0, 0, 0, 44, 0,
0, 81, 0, 0, 0, 49, 0, 81, 0, 0, 39, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0,
9, 0, 6, 10, 0, 29, 0, 0, 0, 25, 0, 0, 7, 0, 0, 0, 0, 0, 0, 3, 0, 9,
0, 0, 0, 16, 0, 0, 56, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 14, 0, 38, 0,
0, 77, 0, 0, 0, 32, 0, 3, 0, 0, 21, 0, 0, 0, 22, 0, 9, 42, 0, 0, 0,
31, 0, 83, 0, 0, 48, 0, 0, 0, 16, 0, 0, 2, 0, 0, 0, 14, 0, 0, 0, 0,
17, 0, 0, 0, 17, 0, 0, 54, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 0, 0, 20,
0, 0, 58, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 0, 0, 42, 0, 0, 84, 0, 0,
0, 59, 0, 69, 0, 0, 35, 0, 0, 0, 3, 0, 15, 13, 0, 0, 12, 3, 0, 35,
33, 0, 30, 0, 0, 0, 17, 0, 0, 51, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0,
0, 17, 0, 0, 53, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 14, 0, 32, 0, 0, 66,
0, 0, 0, 19, 0, 0, 0, 0, 18, 0, 0, 0, 46, 0, 37, 56, 0, 0, 0, 49, 0,
135, 0, 0, 59
]
b_list = [
-16, 3, 2, -23, -4, 11, 31, 5, 15, 11, 36, 27, 10, 22, 30, 28, -37,
3, 6, -22, 18, -2, 25, 9, 7, 47, 17, 10, 54, 0, 70, 28, -12, -13, 6,
-9, -8, -7, 42, 5, -19, 13, 4, -27, -4, -12, 41, 49, -15, -13, 2, 2,
-7, 8, 24, 39, -13, -41, 8, 16, -28, -39, -16, 28, 55, 39, 3, -4,
-52, -60, 41, -115, -27, 125, 3, 1, 6, 43, -20, 1, 49, 33, -3, 10,
-27, -16, -1, -71, -18, -16, 0, 16, 0, 20, -17, -12, 1, 12, 7, 27,
-22, 15, 29, -27, -5, 33, -1, 17, 50, 6, 5, 20, -29, -24, -26, 14,
-52, 23, 13, 3, -38, -43, 13, -6, -1, 11, -53, 13, -42, -87, -2, 12,
17, -17, -22, -71, -7, 30, -18, -20, -117, 3, -59, 2, -31, -22, -1,
-10, -20, -45, -13, -93, -49, -65, -21, -52, -78, -48, -10, -19, 18,
-9, 4, 11, 4, 45, -31, -72, -27, -127, -21, -32, -12, -45, 19, -10,
-7, -39, -18, 17, 14, 25, -28, 7, -31, -47, 10, 3, -36, -9, -46, -6,
25, -89, 20, -11, 25, -15, -39, 4, -17, 23, -37, -59, -18, -3, -60,
-30, -21, -49, 10, -7, 33, -20, -29, -33, 11, -2, -14, -27, -74,
-10, -14, -40, 70, -9, -6, 14, 10, 1, -18, -84, 4, 33, -26, -31, -20,
-49, -7, -17, -6, 14, -11, -17, 31, 1, -25, -22, 5, 3, 26, -1, -3,
-27, 3, -24
]
def process():
yield from self.check_calculation(a_list, b_list)
self.run_sim(process, False)
|
#!/bin/env python
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for macc.py"""
from collections import namedtuple
import random
from nmigen import unsigned, signed
from nmigen.sim import Delay
from nmigen_cfu import TestBase
from .macc import MaccBlock
class MaccBlockTest(TestBase):
"""Tests MaccBlock"""
def create_dut(self):
return MaccBlock(4, unsigned(8), signed(8), signed(24))
def test_basic_functions(self):
D = namedtuple('D', ['a', 'b', 'first', 'last', 'expected'])
DATA = [
# Filler - before sim start
D(0, 0, 0, 0, 0),
D(0, 0, 0, 0, 0),
D(0, 0, 0, 0, 0),
# Data starts here
D(0, 0, 0, 0, 0),
# Multiply two sets of numbers (1 byte only)
D(1, -2, 1, 0, 0), # -2
D(5, 3, 0, 1, 13), # +15
D(6, 26, 0, 0, 13), # parameters should not affect output
D(2, 17, 0, 0, 13),
D(4, 21, 0, 0, 13),
# Four sets of four numbers - result calculated by hand
D(0x01020304, 0x05060708, 1, 0, 13),
D(0x0a0b0c0d, 0xfffefdfc, 0, 0, 13), # filter values negative
D(0x18191a1b, 0x1113171d, 0, 0, 13),
D(0x22232425, 0x1b1d2127, 0, 1, 6778),
# ((6, 26, 0, 0), 13), # parameters should not affect output
# ((2, 17, 0, 0), 13),
# ((4, 21, 0, 0), 6778),
# Filler - wait for results to percolate through system
D(0, 0, 0, 0, 0),
D(0, 0, 0, 0, 0),
D(0, 0, 0, 0, 0),
]
def process():
for (data, data_prev, data_prev3) in zip(DATA[3:], DATA[2:], DATA):
# Set inputs
yield self.dut.input_a.eq(data.a)
yield self.dut.input_b.eq(data.b)
yield self.dut.input_first.eq(data.first)
yield self.dut.input_last.eq(data.last)
yield Delay(0.1)
# check inputs correctly passed onward
self.assertEqual((yield self.dut.output_a), data_prev.a & 0xffff_ffff)
self.assertEqual((yield self.dut.output_b), data_prev.b & 0xffff_ffff)
self.assertEqual((yield self.dut.output_first), data_prev.first)
self.assertEqual((yield self.dut.output_last), data_prev.last)
# Check output is as expected
self.assertEqual((yield self.dut.output_accumulator), data_prev3.expected)
self.assertEqual((yield self.dut.output_accumulator_new), data_prev3.last)
yield
self.run_sim(process, False)
def check_calculation(self, a_list, b_list):
"""Checks a given calculation:
Args:
a_list: first operands. Size is a multiple of 4.
b_list: second operands. List of integers the same size as a_list.
"""
expected_result = sum(a * b for a, b in zip(a_list, b_list))
def to_word(x, y, z, t):
return ((x & 0xff) |
((y & 0xff) << 8) |
((z & 0xff) << 16) |
((t & 0xff) << 24))
def process():
# Send in all inputs
num_inputs = len(a_list) // 4
for i in range(num_inputs):
a = to_word(*a_list[i * 4: (i + 1) * 4])
b = to_word(*b_list[i * 4: (i + 1) * 4])
yield self.dut.input_a.eq(a)
yield self.dut.input_b.eq(b)
yield self.dut.input_first.eq(i == 0)
yield self.dut.input_last.eq(i == (num_inputs - 1))
yield
# wait for output to be available
yield self.dut.input_last.eq(0)
yield
yield
self.assertFalse((yield self.dut.output_accumulator_new))
yield
self.assertTrue((yield self.dut.output_accumulator_new))
self.assertEqual((yield self.dut.output_accumulator), expected_result)
yield
self.assertFalse((yield self.dut.output_accumulator_new))
return process()
def check_random_calculation(self, size, seed):
"""Checks a randomly generated calculation.
Args:
size - number of arguments must be divisble by 4
seed - used to seed the generator.
"""
random.seed(seed)
a_list = [random.randrange(0, 256) for _ in range(size)]
b_list = [random.randrange(-128, 128) for _ in range(size)]
return self.check_calculation(a_list, b_list)
def test_larger_calculations(self):
def process():
yield from self.check_random_calculation(32, 1)
yield from self.check_random_calculation(500, 2)
yield from self.check_random_calculation(64, 3)
yield from self.check_random_calculation(48, 3)
self.run_sim(process, False)
def test_layer_04_index_15200(self):
# Real values from a problematic calculation in gen2.
a_list = [
0, 0, 0, 21, 0, 0, 61, 0, 0, 0, 2, 0, 0, 0, 0, 17, 0, 0, 0, 44, 0,
0, 81, 0, 0, 0, 49, 0, 81, 0, 0, 39, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0,
9, 0, 6, 10, 0, 29, 0, 0, 0, 25, 0, 0, 7, 0, 0, 0, 0, 0, 0, 3, 0, 9,
0, 0, 0, 16, 0, 0, 56, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 14, 0, 38, 0,
0, 77, 0, 0, 0, 32, 0, 3, 0, 0, 21, 0, 0, 0, 22, 0, 9, 42, 0, 0, 0,
31, 0, 83, 0, 0, 48, 0, 0, 0, 16, 0, 0, 2, 0, 0, 0, 14, 0, 0, 0, 0,
17, 0, 0, 0, 17, 0, 0, 54, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 0, 0, 20,
0, 0, 58, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 0, 0, 42, 0, 0, 84, 0, 0,
0, 59, 0, 69, 0, 0, 35, 0, 0, 0, 3, 0, 15, 13, 0, 0, 12, 3, 0, 35,
33, 0, 30, 0, 0, 0, 17, 0, 0, 51, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0,
0, 17, 0, 0, 53, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 14, 0, 32, 0, 0, 66,
0, 0, 0, 19, 0, 0, 0, 0, 18, 0, 0, 0, 46, 0, 37, 56, 0, 0, 0, 49, 0,
135, 0, 0, 59
]
b_list = [
-16, 3, 2, -23, -4, 11, 31, 5, 15, 11, 36, 27, 10, 22, 30, 28, -37,
3, 6, -22, 18, -2, 25, 9, 7, 47, 17, 10, 54, 0, 70, 28, -12, -13, 6,
-9, -8, -7, 42, 5, -19, 13, 4, -27, -4, -12, 41, 49, -15, -13, 2, 2,
-7, 8, 24, 39, -13, -41, 8, 16, -28, -39, -16, 28, 55, 39, 3, -4,
-52, -60, 41, -115, -27, 125, 3, 1, 6, 43, -20, 1, 49, 33, -3, 10,
-27, -16, -1, -71, -18, -16, 0, 16, 0, 20, -17, -12, 1, 12, 7, 27,
-22, 15, 29, -27, -5, 33, -1, 17, 50, 6, 5, 20, -29, -24, -26, 14,
-52, 23, 13, 3, -38, -43, 13, -6, -1, 11, -53, 13, -42, -87, -2, 12,
17, -17, -22, -71, -7, 30, -18, -20, -117, 3, -59, 2, -31, -22, -1,
-10, -20, -45, -13, -93, -49, -65, -21, -52, -78, -48, -10, -19, 18,
-9, 4, 11, 4, 45, -31, -72, -27, -127, -21, -32, -12, -45, 19, -10,
-7, -39, -18, 17, 14, 25, -28, 7, -31, -47, 10, 3, -36, -9, -46, -6,
25, -89, 20, -11, 25, -15, -39, 4, -17, 23, -37, -59, -18, -3, -60,
-30, -21, -49, 10, -7, 33, -20, -29, -33, 11, -2, -14, -27, -74,
-10, -14, -40, 70, -9, -6, 14, 10, 1, -18, -84, 4, 33, -26, -31, -20,
-49, -7, -17, -6, 14, -11, -17, 31, 1, -25, -22, 5, 3, 26, -1, -3,
-27, 3, -24
]
def process():
yield from self.check_calculation(a_list, b_list)
self.run_sim(process, False)
|
en
| 0.780568
|
#!/bin/env python # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Tests for macc.py Tests MaccBlock # Filler - before sim start # Data starts here # Multiply two sets of numbers (1 byte only) # -2 # +15 # parameters should not affect output # Four sets of four numbers - result calculated by hand # filter values negative # ((6, 26, 0, 0), 13), # parameters should not affect output # ((2, 17, 0, 0), 13), # ((4, 21, 0, 0), 6778), # Filler - wait for results to percolate through system # Set inputs # check inputs correctly passed onward # Check output is as expected Checks a given calculation: Args: a_list: first operands. Size is a multiple of 4. b_list: second operands. List of integers the same size as a_list. # Send in all inputs # wait for output to be available Checks a randomly generated calculation. Args: size - number of arguments must be divisble by 4 seed - used to seed the generator. # Real values from a problematic calculation in gen2.
| 2.440178
| 2
|
youtube_dl/extractor/springboardplatform.py
|
hackarada/youtube-dl
| 66,635
|
6626979
|
<reponame>hackarada/youtube-dl
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
xpath_attr,
xpath_text,
xpath_element,
unescapeHTML,
unified_timestamp,
)
class SpringboardPlatformIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://
cms\.springboardplatform\.com/
(?:
(?:previews|embed_iframe)/(?P<index>\d+)/video/(?P<id>\d+)|
xml_feeds_advanced/index/(?P<index_2>\d+)/rss3/(?P<id_2>\d+)
)
'''
_TESTS = [{
'url': 'http://cms.springboardplatform.com/previews/159/video/981017/0/0/1',
'md5': '5c3cb7b5c55740d482561099e920f192',
'info_dict': {
'id': '981017',
'ext': 'mp4',
'title': 'Redman "BUD like YOU" "Usher Good Kisser" REMIX',
'description': 'Redman "BUD like YOU" "Usher Good Kisser" REMIX',
'thumbnail': r're:^https?://.*\.jpg$',
'timestamp': 1409132328,
'upload_date': '20140827',
'duration': 193,
},
}, {
'url': 'http://cms.springboardplatform.com/embed_iframe/159/video/981017/rab007/rapbasement.com/1/1',
'only_matching': True,
}, {
'url': 'http://cms.springboardplatform.com/embed_iframe/20/video/1731611/ki055/kidzworld.com/10',
'only_matching': True,
}, {
'url': 'http://cms.springboardplatform.com/xml_feeds_advanced/index/159/rss3/981017/0/0/1/',
'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage):
return [
mobj.group('url')
for mobj in re.finditer(
r'<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//cms\.springboardplatform\.com/embed_iframe/\d+/video/\d+.*?)\1',
webpage)]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id') or mobj.group('id_2')
index = mobj.group('index') or mobj.group('index_2')
video = self._download_xml(
'http://cms.springboardplatform.com/xml_feeds_advanced/index/%s/rss3/%s'
% (index, video_id), video_id)
item = xpath_element(video, './/item', 'item', fatal=True)
content = xpath_element(
item, './{http://search.yahoo.com/mrss/}content', 'content',
fatal=True)
title = unescapeHTML(xpath_text(item, './title', 'title', fatal=True))
video_url = content.attrib['url']
if 'error_video.mp4' in video_url:
raise ExtractorError(
'Video %s no longer exists' % video_id, expected=True)
duration = int_or_none(content.get('duration'))
tbr = int_or_none(content.get('bitrate'))
filesize = int_or_none(content.get('fileSize'))
width = int_or_none(content.get('width'))
height = int_or_none(content.get('height'))
description = unescapeHTML(xpath_text(
item, './description', 'description'))
thumbnail = xpath_attr(
item, './{http://search.yahoo.com/mrss/}thumbnail', 'url',
'thumbnail')
timestamp = unified_timestamp(xpath_text(
item, './{http://cms.springboardplatform.com/namespaces.html}created',
'timestamp'))
formats = [{
'url': video_url,
'format_id': 'http',
'tbr': tbr,
'filesize': filesize,
'width': width,
'height': height,
}]
m3u8_format = formats[0].copy()
m3u8_format.update({
'url': re.sub(r'(https?://)cdn\.', r'\1hls.', video_url) + '.m3u8',
'ext': 'mp4',
'format_id': 'hls',
'protocol': 'm3u8_native',
})
formats.append(m3u8_format)
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
'formats': formats,
}
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
xpath_attr,
xpath_text,
xpath_element,
unescapeHTML,
unified_timestamp,
)
class SpringboardPlatformIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://
cms\.springboardplatform\.com/
(?:
(?:previews|embed_iframe)/(?P<index>\d+)/video/(?P<id>\d+)|
xml_feeds_advanced/index/(?P<index_2>\d+)/rss3/(?P<id_2>\d+)
)
'''
_TESTS = [{
'url': 'http://cms.springboardplatform.com/previews/159/video/981017/0/0/1',
'md5': '5c3cb7b5c55740d482561099e920f192',
'info_dict': {
'id': '981017',
'ext': 'mp4',
'title': 'Redman "BUD like YOU" "Usher Good Kisser" REMIX',
'description': 'Redman "BUD like YOU" "Usher Good Kisser" REMIX',
'thumbnail': r're:^https?://.*\.jpg$',
'timestamp': 1409132328,
'upload_date': '20140827',
'duration': 193,
},
}, {
'url': 'http://cms.springboardplatform.com/embed_iframe/159/video/981017/rab007/rapbasement.com/1/1',
'only_matching': True,
}, {
'url': 'http://cms.springboardplatform.com/embed_iframe/20/video/1731611/ki055/kidzworld.com/10',
'only_matching': True,
}, {
'url': 'http://cms.springboardplatform.com/xml_feeds_advanced/index/159/rss3/981017/0/0/1/',
'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage):
return [
mobj.group('url')
for mobj in re.finditer(
r'<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//cms\.springboardplatform\.com/embed_iframe/\d+/video/\d+.*?)\1',
webpage)]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id') or mobj.group('id_2')
index = mobj.group('index') or mobj.group('index_2')
video = self._download_xml(
'http://cms.springboardplatform.com/xml_feeds_advanced/index/%s/rss3/%s'
% (index, video_id), video_id)
item = xpath_element(video, './/item', 'item', fatal=True)
content = xpath_element(
item, './{http://search.yahoo.com/mrss/}content', 'content',
fatal=True)
title = unescapeHTML(xpath_text(item, './title', 'title', fatal=True))
video_url = content.attrib['url']
if 'error_video.mp4' in video_url:
raise ExtractorError(
'Video %s no longer exists' % video_id, expected=True)
duration = int_or_none(content.get('duration'))
tbr = int_or_none(content.get('bitrate'))
filesize = int_or_none(content.get('fileSize'))
width = int_or_none(content.get('width'))
height = int_or_none(content.get('height'))
description = unescapeHTML(xpath_text(
item, './description', 'description'))
thumbnail = xpath_attr(
item, './{http://search.yahoo.com/mrss/}thumbnail', 'url',
'thumbnail')
timestamp = unified_timestamp(xpath_text(
item, './{http://cms.springboardplatform.com/namespaces.html}created',
'timestamp'))
formats = [{
'url': video_url,
'format_id': 'http',
'tbr': tbr,
'filesize': filesize,
'width': width,
'height': height,
}]
m3u8_format = formats[0].copy()
m3u8_format.update({
'url': re.sub(r'(https?://)cdn\.', r'\1hls.', video_url) + '.m3u8',
'ext': 'mp4',
'format_id': 'hls',
'protocol': 'm3u8_native',
})
formats.append(m3u8_format)
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
'formats': formats,
}
|
en
| 0.39914
|
# coding: utf-8 (?x) https?:// cms\.springboardplatform\.com/ (?: (?:previews|embed_iframe)/(?P<index>\d+)/video/(?P<id>\d+)| xml_feeds_advanced/index/(?P<index_2>\d+)/rss3/(?P<id_2>\d+) )
| 2.017697
| 2
|
tests/macro/scripts/lapack/runtest_dgbtrf.py
|
dina-fouad/pyccel
| 206
|
6626980
|
# pylint: disable=missing-function-docstring, missing-module-docstring, undefined-variable, unused-import
from numpy import zeros
from pyccel.stdlib.internal.lapack import dgbtrf
if __name__ == '__main__':
n = 25
ml = 1
mu = 1
lda = 2 * ml + mu + 1
a = zeros((lda,n), dtype = 'double',order = 'F')
# Superdiagonal, Diagonal, Subdiagonal
m = ml + mu
a[m-1,1:n] = -1.0
a[ m,0:n] = 2.0
a[m+1,0:n-1] = -1.0
info = -1
ipiv = zeros(n, 'int')
#$ header macro (ab(:,:), ipiv(ab.shape[0]), info), dgbtrf_v1(ab, kl, ku, m=ab.shape[1], n=ab.shape[1], ldab=ab.shape[0]) := dgbtrf(m, n, kl, ku, ab, ldab, ipiv, info)
a, ipiv, info = dgbtrf_v1(a, ml, ku=mu)
|
# pylint: disable=missing-function-docstring, missing-module-docstring, undefined-variable, unused-import
from numpy import zeros
from pyccel.stdlib.internal.lapack import dgbtrf
if __name__ == '__main__':
n = 25
ml = 1
mu = 1
lda = 2 * ml + mu + 1
a = zeros((lda,n), dtype = 'double',order = 'F')
# Superdiagonal, Diagonal, Subdiagonal
m = ml + mu
a[m-1,1:n] = -1.0
a[ m,0:n] = 2.0
a[m+1,0:n-1] = -1.0
info = -1
ipiv = zeros(n, 'int')
#$ header macro (ab(:,:), ipiv(ab.shape[0]), info), dgbtrf_v1(ab, kl, ku, m=ab.shape[1], n=ab.shape[1], ldab=ab.shape[0]) := dgbtrf(m, n, kl, ku, ab, ldab, ipiv, info)
a, ipiv, info = dgbtrf_v1(a, ml, ku=mu)
|
en
| 0.415443
|
# pylint: disable=missing-function-docstring, missing-module-docstring, undefined-variable, unused-import # Superdiagonal, Diagonal, Subdiagonal #$ header macro (ab(:,:), ipiv(ab.shape[0]), info), dgbtrf_v1(ab, kl, ku, m=ab.shape[1], n=ab.shape[1], ldab=ab.shape[0]) := dgbtrf(m, n, kl, ku, ab, ldab, ipiv, info)
| 2.120912
| 2
|
pioneer/core/traj_predict.py
|
TJUMMG/TGSR
| 0
|
6626981
|
<reponame>TJUMMG/TGSR
import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import DataLoader, Dataset
import os
import warnings
from pioneer.core.STLSTM import STLSTM
class Predict_FC(nn.Module):
def __init__(self):
super(Predict_FC, self).__init__()
self.pred_cx = torch.nn.Sequential(
nn.Linear(6, 10),
nn.ReLU(),
nn.Linear(10, 10),
nn.ReLU(),
nn.Linear(10, 1)
)
self.pred_cy = torch.nn.Sequential(
nn.Linear(6, 10),
nn.ReLU(),
nn.Linear(10, 10),
nn.ReLU(),
nn.Linear(10, 1)
)
def forward(self, center_post):
'''
:param center_post: [BatchSize, Seq_len, 6] (cx,cy,vx,vy,gtw,gth)
:return: [cx,cy]
'''
cx_concat = center_post[:, :, 0]
cy_concat = center_post[:, :, 1]
cx_pred = self.pred_cx(cx_concat)
cy_pred = self.pred_cy(cy_concat)
return cx_pred,cy_pred
class Predict_GRU(nn.Module):
def __init__(self):
super(Predict_GRU, self).__init__()
self.pred_cx = torch.nn.GRU(4, 10, 2, batch_first=True)
self.cx_linear = torch.nn.Linear(10, 1)
self.pred_cy = torch.nn.GRU(4, 10, 2, batch_first=True)
self.cy_linear = torch.nn.Linear(10, 1)
def forward(self, center_post):
'''
:param center_post: [BatchSize, Seq_len, 6] (cx,cy,vx,vy,gtw,gth)
:return: [cx,cy]
'''
cx_concat = center_post[:,:, 0]
cy_concat = center_post[:,:, 1]
vx_concat = center_post[:,:, 2]
vy_concat = center_post[:,:, 3]
w_concat = center_post[:, :, 4]
h_concat = center_post[:, :, 5]
input_x = torch.stack((cx_concat, vx_concat, w_concat, h_concat), dim=-1)
input_y = torch.stack((cy_concat, vy_concat, w_concat, h_concat), dim=-1)
_, hn_x = self.pred_cx(torch.tensor(input_x))
_, hn_y = self.pred_cy(torch.tensor(input_y))
cx_pred = self.cx_linear(hn_x[-1])
cy_pred = self.cy_linear(hn_y[-1])
return cx_pred,cy_pred
class Predict_LSTM(nn.Module):
def __init__(self):
super(Predict_LSTM, self).__init__()
self.pred_cx = torch.nn.LSTM(4, 10, 2, batch_first=True)
self.cx_linear = torch.nn.Linear(10, 1)
self.pred_cy = torch.nn.LSTM(4, 10, 2, batch_first=True)
self.cy_linear = torch.nn.Linear(10, 1)
def forward(self, center_post):
'''
:param center_post: [BatchSize, Seq_len, 6] (cx,cy,vx,vy,gtw,gth)
:return: [cx,cy]
'''
cx_concat = center_post[:,:, 0]
cy_concat = center_post[:,:, 1]
vx_concat = center_post[:,:, 2]
vy_concat = center_post[:,:, 3]
w_concat = center_post[:, :, 4]
h_concat = center_post[:, :, 5]
input_x = torch.stack((cx_concat, vx_concat, w_concat, h_concat), dim=-1)
input_y = torch.stack((cy_concat, vy_concat, w_concat, h_concat), dim=-1)
_, (hn_x,cn_x) = self.pred_cx(torch.tensor(input_x))
_, (hn_y,cn_y) = self.pred_cy(torch.tensor(input_y))
cx_pred = self.cx_linear(hn_x[-1])
cy_pred = self.cy_linear(hn_y[-1])
return cx_pred,cy_pred
class Predict_STLSTM(nn.Module):
def __init__(self, batchsize=12):
super(Predict_STLSTM, self).__init__()
self.pred_cx = STLSTM(4, 10, 2, batchsize)
self.cx_linear = torch.nn.Linear(10, 1)
self.pred_cy = STLSTM(4, 10, 2, batchsize)
self.cy_linear = torch.nn.Linear(10, 1)
def forward(self, center_post):
'''
:param center_post: [BatchSize, Seq_len, 6] (cx,cy,vx,vy,gtw,gth)
:return: [cx,cy]
'''
cx_concat = center_post[:,:, 0]
cy_concat = center_post[:,:, 1]
vx_concat = center_post[:,:, 2]
vy_concat = center_post[:,:, 3]
w_concat = center_post[:, :, 4]
h_concat = center_post[:, :, 5]
input_x = torch.stack((cx_concat, vx_concat, w_concat, h_concat), dim=-1)
input_y = torch.stack((cy_concat, vy_concat, w_concat, h_concat), dim=-1)
hn_x, cn_x, mn_x = self.pred_cx(torch.tensor(input_x))
hn_y, cn_y, mn_y = self.pred_cy(torch.tensor(input_y))
cx_pred = self.cx_linear(hn_x)
cy_pred = self.cy_linear(hn_y)
return cx_pred, cy_pred
class Data(Dataset):
def __init__(self, data_path, seq_len):
# 载入数据
with open(data_path, 'r') as file:
data = file.readlines()
self.data_name = []
self.data_cx = []
self.data_cy = []
self.data_gtw = []
self.data_gth = []
for data_i in data:
# name, cx, cy = data_i.split()
name, gtcx, gtcy, gtw, gth, trcx, trcy, trw, trh = data_i.split(',')
self.data_name.append(name)
self.data_cx.append(gtcx)
self.data_cy.append(gtcy)
self.data_gtw.append(gtw)
self.data_gth.append(gth)
self.data_lens = len(self.data_cx)
# 定义序列长度
self.seq_len = seq_len
def __getitem__(self, index):
while True:
# 对index做一些限制
if index > (self.data_lens - (self.seq_len + 2)):
index = self.data_lens - (self.seq_len + 2)
if self.data_name[index] != self.data_name[index + self.seq_len + 1]:
index = index - (self.seq_len + 1)
# 判断是否运动幅度过大
cx = np.array([float(i) for i in self.data_cx[index:index + self.seq_len + 2]])
cy = np.array([float(i) for i in self.data_cy[index:index + self.seq_len + 2]])
gtw = np.array([float(i) for i in self.data_gtw[index:index + self.seq_len + 2]])
gth = np.array([float(i) for i in self.data_gth[index:index + self.seq_len + 2]])
cx_v = np.zeros([self.seq_len])
cy_v = np.zeros([self.seq_len])
for i in range(len(cx_v) - 1):
cx_v[i] = cx[i + 1] - cx[i]
for i in range(len(cy_v) - 1):
cy_v[i] = cy[i + 1] - cy[i]
if np.mean(cx_v) < 60 and np.mean(cy_v) < 60:
cx_ = cx[1:self.seq_len + 1]
cy_ = cy[1:self.seq_len + 1]
gtw = gtw[1:self.seq_len + 1]
gth = gth[1:self.seq_len + 1]
data = np.stack((cx_, cy_, cx_v, cy_v, gtw, gth)).T.astype(np.float32)
label = np.array([cx[self.seq_len + 1], cy[self.seq_len + 1]]).astype(np.float32)
break
else: # 重新选
index = int(np.random.choice(self.data_lens - (self.seq_len + 2), 1))
return data, label
def __len__(self):
return self.data_lens
if __name__ == '__main__':
warnings.filterwarnings("ignore")
# set the parameter
# torch.set_num_threads(1)
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
training_data_path = '../data/cxcy_uav123.txt'
batch_num = 12
seq_len = 6
epoch_num = 20
model_save_path = './model_test'
# pretrain_model_path = './model_lstm2_wh_uav123/checkpoint_19.pth' # None or path
pretrain_model_path = None # None or path
FLAG = 'Eval' # Train or Eval
# define predict net and load pretrained model
pred = Predict_STLSTM().cuda()
if pretrain_model_path != None:
pred.load_state_dict(torch.load(pretrain_model_path))
# Dataloader and Loss function
dataset = Data(data_path=training_data_path, seq_len=seq_len)
dataloader = DataLoader(dataset=dataset, batch_size=batch_num)
criterion = torch.nn.MSELoss(reduction='mean')
if FLAG == 'Train':
pred.train()
for epoch in range(epoch_num):
# 用于查看loss
loss_data = torch.zeros([1])
for idx, (batch_data, batch_label) in enumerate(dataloader):
cx_pred, cy_pred = pred(batch_data.cuda())
loss_x = criterion(cx_pred, batch_label[:, 0].cuda())
loss_y = criterion(cy_pred, batch_label[:, 1].cuda())
loss = loss_x + loss_y
# 手动定义一个随epoch衰减的lr
lrs = np.logspace(-7, -5, epoch_num)
lr = float(lrs[epoch_num - epoch - 1])
optim = torch.optim.SGD(params=pred.parameters(), lr=lr)
optim.zero_grad()
loss.backward()
optim.step()
loss_data += loss
print(epoch, loss_data / (dataset.data_lens / batch_num))
# save model
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
torch.save(pred.state_dict(),
os.path.join(model_save_path, 'checkpoint_{}.pth'.format(epoch+1)))
else:
# 测试
pred.eval()
loss_ = torch.zeros([1])
for idx, (batch_data, batch_label) in enumerate(dataloader):
cx_pred, cy_pred = pred(batch_data.cuda())
loss_x = criterion(cx_pred, batch_label[:, 0].cuda())
loss_y = criterion(cy_pred, batch_label[:, 1].cuda())
loss = loss_x + loss_y
loss_ += loss
print(loss_ / (dataset.data_lens / batch_num))
|
import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import DataLoader, Dataset
import os
import warnings
from pioneer.core.STLSTM import STLSTM
class Predict_FC(nn.Module):
def __init__(self):
super(Predict_FC, self).__init__()
self.pred_cx = torch.nn.Sequential(
nn.Linear(6, 10),
nn.ReLU(),
nn.Linear(10, 10),
nn.ReLU(),
nn.Linear(10, 1)
)
self.pred_cy = torch.nn.Sequential(
nn.Linear(6, 10),
nn.ReLU(),
nn.Linear(10, 10),
nn.ReLU(),
nn.Linear(10, 1)
)
def forward(self, center_post):
'''
:param center_post: [BatchSize, Seq_len, 6] (cx,cy,vx,vy,gtw,gth)
:return: [cx,cy]
'''
cx_concat = center_post[:, :, 0]
cy_concat = center_post[:, :, 1]
cx_pred = self.pred_cx(cx_concat)
cy_pred = self.pred_cy(cy_concat)
return cx_pred,cy_pred
class Predict_GRU(nn.Module):
def __init__(self):
super(Predict_GRU, self).__init__()
self.pred_cx = torch.nn.GRU(4, 10, 2, batch_first=True)
self.cx_linear = torch.nn.Linear(10, 1)
self.pred_cy = torch.nn.GRU(4, 10, 2, batch_first=True)
self.cy_linear = torch.nn.Linear(10, 1)
def forward(self, center_post):
'''
:param center_post: [BatchSize, Seq_len, 6] (cx,cy,vx,vy,gtw,gth)
:return: [cx,cy]
'''
cx_concat = center_post[:,:, 0]
cy_concat = center_post[:,:, 1]
vx_concat = center_post[:,:, 2]
vy_concat = center_post[:,:, 3]
w_concat = center_post[:, :, 4]
h_concat = center_post[:, :, 5]
input_x = torch.stack((cx_concat, vx_concat, w_concat, h_concat), dim=-1)
input_y = torch.stack((cy_concat, vy_concat, w_concat, h_concat), dim=-1)
_, hn_x = self.pred_cx(torch.tensor(input_x))
_, hn_y = self.pred_cy(torch.tensor(input_y))
cx_pred = self.cx_linear(hn_x[-1])
cy_pred = self.cy_linear(hn_y[-1])
return cx_pred,cy_pred
class Predict_LSTM(nn.Module):
def __init__(self):
super(Predict_LSTM, self).__init__()
self.pred_cx = torch.nn.LSTM(4, 10, 2, batch_first=True)
self.cx_linear = torch.nn.Linear(10, 1)
self.pred_cy = torch.nn.LSTM(4, 10, 2, batch_first=True)
self.cy_linear = torch.nn.Linear(10, 1)
def forward(self, center_post):
'''
:param center_post: [BatchSize, Seq_len, 6] (cx,cy,vx,vy,gtw,gth)
:return: [cx,cy]
'''
cx_concat = center_post[:,:, 0]
cy_concat = center_post[:,:, 1]
vx_concat = center_post[:,:, 2]
vy_concat = center_post[:,:, 3]
w_concat = center_post[:, :, 4]
h_concat = center_post[:, :, 5]
input_x = torch.stack((cx_concat, vx_concat, w_concat, h_concat), dim=-1)
input_y = torch.stack((cy_concat, vy_concat, w_concat, h_concat), dim=-1)
_, (hn_x,cn_x) = self.pred_cx(torch.tensor(input_x))
_, (hn_y,cn_y) = self.pred_cy(torch.tensor(input_y))
cx_pred = self.cx_linear(hn_x[-1])
cy_pred = self.cy_linear(hn_y[-1])
return cx_pred,cy_pred
class Predict_STLSTM(nn.Module):
def __init__(self, batchsize=12):
super(Predict_STLSTM, self).__init__()
self.pred_cx = STLSTM(4, 10, 2, batchsize)
self.cx_linear = torch.nn.Linear(10, 1)
self.pred_cy = STLSTM(4, 10, 2, batchsize)
self.cy_linear = torch.nn.Linear(10, 1)
def forward(self, center_post):
'''
:param center_post: [BatchSize, Seq_len, 6] (cx,cy,vx,vy,gtw,gth)
:return: [cx,cy]
'''
cx_concat = center_post[:,:, 0]
cy_concat = center_post[:,:, 1]
vx_concat = center_post[:,:, 2]
vy_concat = center_post[:,:, 3]
w_concat = center_post[:, :, 4]
h_concat = center_post[:, :, 5]
input_x = torch.stack((cx_concat, vx_concat, w_concat, h_concat), dim=-1)
input_y = torch.stack((cy_concat, vy_concat, w_concat, h_concat), dim=-1)
hn_x, cn_x, mn_x = self.pred_cx(torch.tensor(input_x))
hn_y, cn_y, mn_y = self.pred_cy(torch.tensor(input_y))
cx_pred = self.cx_linear(hn_x)
cy_pred = self.cy_linear(hn_y)
return cx_pred, cy_pred
class Data(Dataset):
def __init__(self, data_path, seq_len):
# 载入数据
with open(data_path, 'r') as file:
data = file.readlines()
self.data_name = []
self.data_cx = []
self.data_cy = []
self.data_gtw = []
self.data_gth = []
for data_i in data:
# name, cx, cy = data_i.split()
name, gtcx, gtcy, gtw, gth, trcx, trcy, trw, trh = data_i.split(',')
self.data_name.append(name)
self.data_cx.append(gtcx)
self.data_cy.append(gtcy)
self.data_gtw.append(gtw)
self.data_gth.append(gth)
self.data_lens = len(self.data_cx)
# 定义序列长度
self.seq_len = seq_len
def __getitem__(self, index):
while True:
# 对index做一些限制
if index > (self.data_lens - (self.seq_len + 2)):
index = self.data_lens - (self.seq_len + 2)
if self.data_name[index] != self.data_name[index + self.seq_len + 1]:
index = index - (self.seq_len + 1)
# 判断是否运动幅度过大
cx = np.array([float(i) for i in self.data_cx[index:index + self.seq_len + 2]])
cy = np.array([float(i) for i in self.data_cy[index:index + self.seq_len + 2]])
gtw = np.array([float(i) for i in self.data_gtw[index:index + self.seq_len + 2]])
gth = np.array([float(i) for i in self.data_gth[index:index + self.seq_len + 2]])
cx_v = np.zeros([self.seq_len])
cy_v = np.zeros([self.seq_len])
for i in range(len(cx_v) - 1):
cx_v[i] = cx[i + 1] - cx[i]
for i in range(len(cy_v) - 1):
cy_v[i] = cy[i + 1] - cy[i]
if np.mean(cx_v) < 60 and np.mean(cy_v) < 60:
cx_ = cx[1:self.seq_len + 1]
cy_ = cy[1:self.seq_len + 1]
gtw = gtw[1:self.seq_len + 1]
gth = gth[1:self.seq_len + 1]
data = np.stack((cx_, cy_, cx_v, cy_v, gtw, gth)).T.astype(np.float32)
label = np.array([cx[self.seq_len + 1], cy[self.seq_len + 1]]).astype(np.float32)
break
else: # 重新选
index = int(np.random.choice(self.data_lens - (self.seq_len + 2), 1))
return data, label
def __len__(self):
return self.data_lens
if __name__ == '__main__':
warnings.filterwarnings("ignore")
# set the parameter
# torch.set_num_threads(1)
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
training_data_path = '../data/cxcy_uav123.txt'
batch_num = 12
seq_len = 6
epoch_num = 20
model_save_path = './model_test'
# pretrain_model_path = './model_lstm2_wh_uav123/checkpoint_19.pth' # None or path
pretrain_model_path = None # None or path
FLAG = 'Eval' # Train or Eval
# define predict net and load pretrained model
pred = Predict_STLSTM().cuda()
if pretrain_model_path != None:
pred.load_state_dict(torch.load(pretrain_model_path))
# Dataloader and Loss function
dataset = Data(data_path=training_data_path, seq_len=seq_len)
dataloader = DataLoader(dataset=dataset, batch_size=batch_num)
criterion = torch.nn.MSELoss(reduction='mean')
if FLAG == 'Train':
pred.train()
for epoch in range(epoch_num):
# 用于查看loss
loss_data = torch.zeros([1])
for idx, (batch_data, batch_label) in enumerate(dataloader):
cx_pred, cy_pred = pred(batch_data.cuda())
loss_x = criterion(cx_pred, batch_label[:, 0].cuda())
loss_y = criterion(cy_pred, batch_label[:, 1].cuda())
loss = loss_x + loss_y
# 手动定义一个随epoch衰减的lr
lrs = np.logspace(-7, -5, epoch_num)
lr = float(lrs[epoch_num - epoch - 1])
optim = torch.optim.SGD(params=pred.parameters(), lr=lr)
optim.zero_grad()
loss.backward()
optim.step()
loss_data += loss
print(epoch, loss_data / (dataset.data_lens / batch_num))
# save model
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
torch.save(pred.state_dict(),
os.path.join(model_save_path, 'checkpoint_{}.pth'.format(epoch+1)))
else:
# 测试
pred.eval()
loss_ = torch.zeros([1])
for idx, (batch_data, batch_label) in enumerate(dataloader):
cx_pred, cy_pred = pred(batch_data.cuda())
loss_x = criterion(cx_pred, batch_label[:, 0].cuda())
loss_y = criterion(cy_pred, batch_label[:, 1].cuda())
loss = loss_x + loss_y
loss_ += loss
print(loss_ / (dataset.data_lens / batch_num))
|
en
| 0.353251
|
:param center_post: [BatchSize, Seq_len, 6] (cx,cy,vx,vy,gtw,gth) :return: [cx,cy] :param center_post: [BatchSize, Seq_len, 6] (cx,cy,vx,vy,gtw,gth) :return: [cx,cy] :param center_post: [BatchSize, Seq_len, 6] (cx,cy,vx,vy,gtw,gth) :return: [cx,cy] :param center_post: [BatchSize, Seq_len, 6] (cx,cy,vx,vy,gtw,gth) :return: [cx,cy] # 载入数据 # name, cx, cy = data_i.split() # 定义序列长度 # 对index做一些限制 # 判断是否运动幅度过大 # 重新选 # set the parameter # torch.set_num_threads(1) # pretrain_model_path = './model_lstm2_wh_uav123/checkpoint_19.pth' # None or path # None or path # Train or Eval # define predict net and load pretrained model # Dataloader and Loss function # 用于查看loss # 手动定义一个随epoch衰减的lr # save model # 测试
| 2.476996
| 2
|
server.py
|
MasahiroYoshida/network-app-layer
| 0
|
6626982
|
<gh_stars>0
#!/usr/bin/python
import socket, optparse
import threading
from os import listdir, fork
from message import Message
import time
q = ['first item']
def handle_controller_connection(controller_socket):
request = controller_socket.recv(1024)
print('Controller: {}\n'.format(request))
contents = [file for file in listdir('./database')]
message = Message(payload=contents)
controller_socket.send(message.export())
controller_socket.close()
def handle_controller(StoC_socket):
while True:
controller_sock, address = StoC_socket.accept()
print 'Accepted connection from {}:{}'.format(address[0], address[1])
client_handler = threading.Thread(
target=handle_controller_connection,
args=(controller_sock,)
)
client_handler.start()
def handle_renderer_connection(renderer_socket):
request = renderer_socket.recv(1024)
print('Renderer: {}\n'.format(request))
message_rec = Message()
message_rec.decode(request)
filename = message_rec.filename
file_path = './database/' + str(filename)
message_send = Message()
if message_rec.command == 2: #PLAY
filename = message_rec.filename
file_path = './database/' + str(filename)
message_send = Message()
try:
with open(file_path, 'rb') as f:
contents = f.read(1024)
while(contents):
print('reading another 1024')
item = q.pop()
if item == 'stop':
break
message_send.payload = contents
renderer_socket.send(message_send.export())
contents = f.read(1024)
q.append('dummy')
time.sleep(1)
f.close()
except:
message_send.payload('File does not exist')
renderer_socket.send(message_send.export())
if message_rec.command == 3: #STOP
q.append('stop')
print('stop playing')
if message_rec.command == 4: #RESUME
q.append('resume')
print('resume playing')
renderer_socket.close()
def handle_renderer(RtoS_socket):
while True:
renderer_sock, address = RtoS_socket.accept()
print 'Accepted connection from {}:{}'.format(address[0], address[1])
client_handler = threading.Thread(
target=handle_renderer_connection,
args=(renderer_sock,)
)
client_handler.start()
def main():
parser = optparse.OptionParser()
parser.add_option('--is', dest='ips', default='10.0.0.1')
parser.add_option('--ir', dest='ipr', default='10.0.0.3')
(options, args) = parser.parse_args()
bind_ip_ser = options.ips
port_CtoS = 50000
port_RtoS_command = 50002
StoC_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
StoC_socket.bind(("", port_CtoS))
StoC_socket.listen(5) # max backlog of connections
RtoS_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
RtoS_socket.bind(("", port_RtoS_command))
RtoS_socket.listen(5) # max backlog of connections
pid = fork()
if(pid == 0):
handle_renderer(RtoS_socket)
elif(pid > 0):
handle_controller(StoC_socket)
if __name__ == '__main__':
main()
|
#!/usr/bin/python
import socket, optparse
import threading
from os import listdir, fork
from message import Message
import time
q = ['first item']
def handle_controller_connection(controller_socket):
request = controller_socket.recv(1024)
print('Controller: {}\n'.format(request))
contents = [file for file in listdir('./database')]
message = Message(payload=contents)
controller_socket.send(message.export())
controller_socket.close()
def handle_controller(StoC_socket):
while True:
controller_sock, address = StoC_socket.accept()
print 'Accepted connection from {}:{}'.format(address[0], address[1])
client_handler = threading.Thread(
target=handle_controller_connection,
args=(controller_sock,)
)
client_handler.start()
def handle_renderer_connection(renderer_socket):
request = renderer_socket.recv(1024)
print('Renderer: {}\n'.format(request))
message_rec = Message()
message_rec.decode(request)
filename = message_rec.filename
file_path = './database/' + str(filename)
message_send = Message()
if message_rec.command == 2: #PLAY
filename = message_rec.filename
file_path = './database/' + str(filename)
message_send = Message()
try:
with open(file_path, 'rb') as f:
contents = f.read(1024)
while(contents):
print('reading another 1024')
item = q.pop()
if item == 'stop':
break
message_send.payload = contents
renderer_socket.send(message_send.export())
contents = f.read(1024)
q.append('dummy')
time.sleep(1)
f.close()
except:
message_send.payload('File does not exist')
renderer_socket.send(message_send.export())
if message_rec.command == 3: #STOP
q.append('stop')
print('stop playing')
if message_rec.command == 4: #RESUME
q.append('resume')
print('resume playing')
renderer_socket.close()
def handle_renderer(RtoS_socket):
while True:
renderer_sock, address = RtoS_socket.accept()
print 'Accepted connection from {}:{}'.format(address[0], address[1])
client_handler = threading.Thread(
target=handle_renderer_connection,
args=(renderer_sock,)
)
client_handler.start()
def main():
parser = optparse.OptionParser()
parser.add_option('--is', dest='ips', default='10.0.0.1')
parser.add_option('--ir', dest='ipr', default='10.0.0.3')
(options, args) = parser.parse_args()
bind_ip_ser = options.ips
port_CtoS = 50000
port_RtoS_command = 50002
StoC_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
StoC_socket.bind(("", port_CtoS))
StoC_socket.listen(5) # max backlog of connections
RtoS_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
RtoS_socket.bind(("", port_RtoS_command))
RtoS_socket.listen(5) # max backlog of connections
pid = fork()
if(pid == 0):
handle_renderer(RtoS_socket)
elif(pid > 0):
handle_controller(StoC_socket)
if __name__ == '__main__':
main()
|
en
| 0.702655
|
#!/usr/bin/python #PLAY #STOP #RESUME # max backlog of connections # max backlog of connections
| 2.612851
| 3
|
pythonProject/03al52funncao_def2/funcao+def_3.py
|
D-Wolter/PycharmProjects
| 0
|
6626983
|
def divisao(n1 , n2):
if n2 > 0:
return n1 / n2
divide = divisao(8, 2)
if divide:
print(divide)
else:
print('conta invalida')
|
def divisao(n1 , n2):
if n2 > 0:
return n1 / n2
divide = divisao(8, 2)
if divide:
print(divide)
else:
print('conta invalida')
|
none
| 1
| 3.984413
| 4
|
|
amrlib/models/parse_gsii/graph_builder.py
|
MeghaTiya/amrlib
| 103
|
6626984
|
import penman
import re
from collections import Counter, defaultdict
from types import SimpleNamespace
import numpy as np
from .amr_graph import _is_attr_form, need_an_instance
# Note that penman triples typically have a colon in front of the relationship but
# it appears to add these automatically when creating the graph.
class GraphBuilder(object):
def __init__(self, rel_vocab):
self.enumerator = Counter()
self.rel_vocab = rel_vocab
self.concepts = [] # List of node names (concets and attributes)
self.relations = [] # list of (target_id, source_id, arc_prob, rel_prob:list(vocab))
self.names = [] # names for concepts (1:1) ie.. n1, n2, p1, Ohio@attr4@ , ..
self.arc_thresh = 0.50 # Threshold of arc probability to add an edge **1
# **1: Experimentally 0.5 is about optimal, though increasing to 0.9 doesn't decrease the score
# and decreasing to 0.1 only drops the smatch score by 0.014 smatch
# Convert a list of concepts and relations into a penman graph
# concept is a list of concepts
# relation is a list of (target_id, source_id, arc_prob, rel_prob:list(vocab))
def build(self, concepts, relations):
self.concepts = concepts
self.relations = relations
self.used_arcs = defaultdict(set) # keep track of edge names aready seen (key is source_id)
triples = self.build_instance_triples() # add self.names
triples += self.build_edge_attrib_triples()
graph = penman.graph.Graph(triples)
string = penman.encode(graph, indent=6)
# Strip the uniqueness post tag (ie.. 2007@attr1@ -> 2007)
string = re.sub(r'@attr\d+@', '', string)
return string
# Create instance triples from a list of concepts (nodes and attributes)
# This must be the first call because self.names is set here
def build_instance_triples(self):
self.enumerator.clear()
self.names = [] # unique variable or attribute with tag
triples = []
# Loop through the concepts
for i, concept in enumerate(self.concepts):
# strings patterns match concept forms and thus require and instance variable
if need_an_instance(concept):
# The penman library has an issue parsing concepts with parens or tildes
# These characters shouldn't be present but parsing errors can lead to this.
# I found 11 instances in 55,635 training data samples
# The Smatch scoring AMR reader looks to have an additional issue when it sees a quote in a concept
concept = concept.replace('(', '')
concept = concept.replace(')', '')
concept = concept.replace('~', '')
concept = concept.replace('"', '')
if concept != self.concepts[i]:
self.concepts[i] = concept
# get the enumerated graph variable and add a triple for it
name = self.get_enumerated_var(concept)
triples.append((name, 'instance', concept))
# Attributes
else:
# AMRGraph.py adds an underscore to the end of any string attributes
# Trade the underscore for quotes so we can put it back in the penman format
if concept.endswith('_'):
# The penman library has an issue parsing an attribute with a quote inside it
# and, practially this probably doesn't make sense anyway, so remove it.
name = '"' + concept[:-1].replace('"', '') + '"'
# Numbers or other things which don't get double-quotes applied to them
else:
name = concept
# Add a temporary tag to attribute names to gaurentee uniqueness. These will be stripped later.
name = name + '@attr%d@ ' % i
self.names.append(name)
return triples
# Turn concept into a unique variable name (ie.. name -> n, n1, n2,...)
def get_enumerated_var(self, concept):
first = concept[0] # first letter
if not first.isalpha():
first = 'x'
idx = self.enumerator[first]
self.enumerator[first] += 1
# de-facto standard is to not append a 0 on the first instance but this
# seems to cause an issue with my version of the smatch scorer's AMR reader,
# so for now always append a number since a unique value is all that the spec requires.
return '%s%d' % (first, idx)
# Create edge and attribute triples from concepts/names and relations
def build_edge_attrib_triples(self):
# Put relations n a little more readable format and create a dictionary of them based on target_id
rel_dict = defaultdict(list)
for rel in self.relations:
target_id, source_id, arc_prob, rel_probs = rel
entry = SimpleNamespace(target_id=target_id, source_id=source_id, arc_prob=arc_prob, rel_probs=rel_probs)
rel_dict[target_id].append( entry )
# Loop through an index for every concepts except the first, to find the best relation
# Note that this is iterating target id backwards, which is not the way the original code was.
# This produces much better results when combined with enforcing the rule that ARGx can not be
# repeated for any source node. When iterating forward, smatch drops ~0.15 points
triples = []
for target_id in range(1, len(self.concepts))[::-1]:
# Look at all relations attached to this target concept.
# Add triples for any non-attribute relation with a probability greater than 50%.
# If none are above 50%, add the best one.
# For attributes, add the best one (attribs only have 1 source connection)
best = SimpleNamespace(target_id=None, source_id=None, arc_prob=0, rel_probs=[])
for entry in rel_dict[target_id]:
assert entry.target_id == target_id
assert entry.source_id < entry.target_id
# Attribtes are never sources, they are always terminal nodes.
if _is_attr_form(self.concepts[entry.source_id]):
continue
# If greater than a 50% arc probability and it's not an attribute, add a triple for it
if entry.arc_prob >= self.arc_thresh:
if not _is_attr_form(self.concepts[entry.target_id]):
triples.append( self.form_relation_triple(entry) )
# Keep track of the max probabilities
if entry.arc_prob > best.arc_prob:
best = entry
# If the max probability is less than 50% or if the target is an attibute then the code above
# didn't add any triples so add the best one.
if best.arc_prob < self.arc_thresh or _is_attr_form(self.concepts[best.target_id]):
assert best.target_id == target_id
triples.append( self.form_relation_triple(best) )
return triples
# Form a a triple(source, relation, target) for an edge or attribute triple
def form_relation_triple(self, entry):
edge_name = self.edge_name_from_rules(entry)
# If the edge is a reverse type, form the triple backwards to show this
if edge_name.endswith('_reverse_'):
return (self.names[entry.target_id], edge_name[:-9], self.names[entry.source_id])
else:
return (self.names[entry.source_id], edge_name, self.names[entry.target_id])
# Use some common-sense rules to select the most-probable edge name
# Generally this is the argmax(rel_probs) but there are cases that are illegal (or at least un-heard of) for AMR
# Since only the edge_names can be changed, loop through the most probably ones until we find one that passes
def edge_name_from_rules(self, entry):
target = self.concepts[entry.target_id]
is_attrib = _is_attr_form(target)
# Rules that exactly dictate the edge name
# Rule: imperative and expressive attributes always have mode as the edge
if target in ('imperative', 'expressive') and is_attrib:
return 'mode' # edge_name
# Loop until all rules are satisfied
edge_name_it = EdgeNameIterator(self.rel_vocab, entry.rel_probs)
edge_name = edge_name_it.get_next()
while edge_name_it.was_advanced:
edge_name_it.was_advanced = False
# Rule: don't repeat ARGx egdes
if edge_name.startswith('ARG') and edge_name in self.used_arcs[entry.source_id]:
edge_name = edge_name_it.get_next()
# Rule: edges for attributes should not be reversed (X-of type)
elif edge_name.endswith('_reverse_') and is_attrib:
edge_name = edge_name_it.get_next()
# Rule: domain is never an attribute, the target is always a node
elif edge_name == 'domain' and is_attrib:
edge_name = edge_name_it.get_next()
# Rule: polarity attributes are always have '-' for a value
elif edge_name == 'polarity' and target != '-':
edge_name = edge_name_it.get_next()
# Rule: All "name" edges end lead into "name" nodes (but the reverse is not always true)
elif edge_name == 'name' and target != 'name':
edge_name = edge_name_it.get_next()
# Rule: mode is always an attribute
elif edge_name == 'mode' and not is_attrib:
edge_name = edge_name_it.get_next()
# Keep track of used arcs and don't repeat them for the node
self.used_arcs[entry.source_id].add(edge_name)
return edge_name
# Helper class to loop through relation probabilities and get the best / next_best edge name
class EdgeNameIterator(object):
def __init__(self, rel_vocab, rel_probs):
self.rel_vocab = rel_vocab
self.indices = np.argsort(rel_probs)[::-1] # index of the probabilities, sorted high to low
self.ptr = 0
self.was_advanced = False
def get_next(self):
index = self.indices[self.ptr]
self.ptr += 1 # let this through an exception if we exhaust all available edges
self.was_advanced = True
return self.rel_vocab.idx2token(index)
|
import penman
import re
from collections import Counter, defaultdict
from types import SimpleNamespace
import numpy as np
from .amr_graph import _is_attr_form, need_an_instance
# Note that penman triples typically have a colon in front of the relationship but
# it appears to add these automatically when creating the graph.
class GraphBuilder(object):
def __init__(self, rel_vocab):
self.enumerator = Counter()
self.rel_vocab = rel_vocab
self.concepts = [] # List of node names (concets and attributes)
self.relations = [] # list of (target_id, source_id, arc_prob, rel_prob:list(vocab))
self.names = [] # names for concepts (1:1) ie.. n1, n2, p1, Ohio@attr4@ , ..
self.arc_thresh = 0.50 # Threshold of arc probability to add an edge **1
# **1: Experimentally 0.5 is about optimal, though increasing to 0.9 doesn't decrease the score
# and decreasing to 0.1 only drops the smatch score by 0.014 smatch
# Convert a list of concepts and relations into a penman graph
# concept is a list of concepts
# relation is a list of (target_id, source_id, arc_prob, rel_prob:list(vocab))
def build(self, concepts, relations):
self.concepts = concepts
self.relations = relations
self.used_arcs = defaultdict(set) # keep track of edge names aready seen (key is source_id)
triples = self.build_instance_triples() # add self.names
triples += self.build_edge_attrib_triples()
graph = penman.graph.Graph(triples)
string = penman.encode(graph, indent=6)
# Strip the uniqueness post tag (ie.. 2007@attr1@ -> 2007)
string = re.sub(r'@attr\d+@', '', string)
return string
# Create instance triples from a list of concepts (nodes and attributes)
# This must be the first call because self.names is set here
def build_instance_triples(self):
self.enumerator.clear()
self.names = [] # unique variable or attribute with tag
triples = []
# Loop through the concepts
for i, concept in enumerate(self.concepts):
# strings patterns match concept forms and thus require and instance variable
if need_an_instance(concept):
# The penman library has an issue parsing concepts with parens or tildes
# These characters shouldn't be present but parsing errors can lead to this.
# I found 11 instances in 55,635 training data samples
# The Smatch scoring AMR reader looks to have an additional issue when it sees a quote in a concept
concept = concept.replace('(', '')
concept = concept.replace(')', '')
concept = concept.replace('~', '')
concept = concept.replace('"', '')
if concept != self.concepts[i]:
self.concepts[i] = concept
# get the enumerated graph variable and add a triple for it
name = self.get_enumerated_var(concept)
triples.append((name, 'instance', concept))
# Attributes
else:
# AMRGraph.py adds an underscore to the end of any string attributes
# Trade the underscore for quotes so we can put it back in the penman format
if concept.endswith('_'):
# The penman library has an issue parsing an attribute with a quote inside it
# and, practially this probably doesn't make sense anyway, so remove it.
name = '"' + concept[:-1].replace('"', '') + '"'
# Numbers or other things which don't get double-quotes applied to them
else:
name = concept
# Add a temporary tag to attribute names to gaurentee uniqueness. These will be stripped later.
name = name + '@attr%d@ ' % i
self.names.append(name)
return triples
# Turn concept into a unique variable name (ie.. name -> n, n1, n2,...)
def get_enumerated_var(self, concept):
first = concept[0] # first letter
if not first.isalpha():
first = 'x'
idx = self.enumerator[first]
self.enumerator[first] += 1
# de-facto standard is to not append a 0 on the first instance but this
# seems to cause an issue with my version of the smatch scorer's AMR reader,
# so for now always append a number since a unique value is all that the spec requires.
return '%s%d' % (first, idx)
# Create edge and attribute triples from concepts/names and relations
def build_edge_attrib_triples(self):
# Put relations n a little more readable format and create a dictionary of them based on target_id
rel_dict = defaultdict(list)
for rel in self.relations:
target_id, source_id, arc_prob, rel_probs = rel
entry = SimpleNamespace(target_id=target_id, source_id=source_id, arc_prob=arc_prob, rel_probs=rel_probs)
rel_dict[target_id].append( entry )
# Loop through an index for every concepts except the first, to find the best relation
# Note that this is iterating target id backwards, which is not the way the original code was.
# This produces much better results when combined with enforcing the rule that ARGx can not be
# repeated for any source node. When iterating forward, smatch drops ~0.15 points
triples = []
for target_id in range(1, len(self.concepts))[::-1]:
# Look at all relations attached to this target concept.
# Add triples for any non-attribute relation with a probability greater than 50%.
# If none are above 50%, add the best one.
# For attributes, add the best one (attribs only have 1 source connection)
best = SimpleNamespace(target_id=None, source_id=None, arc_prob=0, rel_probs=[])
for entry in rel_dict[target_id]:
assert entry.target_id == target_id
assert entry.source_id < entry.target_id
# Attribtes are never sources, they are always terminal nodes.
if _is_attr_form(self.concepts[entry.source_id]):
continue
# If greater than a 50% arc probability and it's not an attribute, add a triple for it
if entry.arc_prob >= self.arc_thresh:
if not _is_attr_form(self.concepts[entry.target_id]):
triples.append( self.form_relation_triple(entry) )
# Keep track of the max probabilities
if entry.arc_prob > best.arc_prob:
best = entry
# If the max probability is less than 50% or if the target is an attibute then the code above
# didn't add any triples so add the best one.
if best.arc_prob < self.arc_thresh or _is_attr_form(self.concepts[best.target_id]):
assert best.target_id == target_id
triples.append( self.form_relation_triple(best) )
return triples
# Form a a triple(source, relation, target) for an edge or attribute triple
def form_relation_triple(self, entry):
edge_name = self.edge_name_from_rules(entry)
# If the edge is a reverse type, form the triple backwards to show this
if edge_name.endswith('_reverse_'):
return (self.names[entry.target_id], edge_name[:-9], self.names[entry.source_id])
else:
return (self.names[entry.source_id], edge_name, self.names[entry.target_id])
# Use some common-sense rules to select the most-probable edge name
# Generally this is the argmax(rel_probs) but there are cases that are illegal (or at least un-heard of) for AMR
# Since only the edge_names can be changed, loop through the most probably ones until we find one that passes
def edge_name_from_rules(self, entry):
target = self.concepts[entry.target_id]
is_attrib = _is_attr_form(target)
# Rules that exactly dictate the edge name
# Rule: imperative and expressive attributes always have mode as the edge
if target in ('imperative', 'expressive') and is_attrib:
return 'mode' # edge_name
# Loop until all rules are satisfied
edge_name_it = EdgeNameIterator(self.rel_vocab, entry.rel_probs)
edge_name = edge_name_it.get_next()
while edge_name_it.was_advanced:
edge_name_it.was_advanced = False
# Rule: don't repeat ARGx egdes
if edge_name.startswith('ARG') and edge_name in self.used_arcs[entry.source_id]:
edge_name = edge_name_it.get_next()
# Rule: edges for attributes should not be reversed (X-of type)
elif edge_name.endswith('_reverse_') and is_attrib:
edge_name = edge_name_it.get_next()
# Rule: domain is never an attribute, the target is always a node
elif edge_name == 'domain' and is_attrib:
edge_name = edge_name_it.get_next()
# Rule: polarity attributes are always have '-' for a value
elif edge_name == 'polarity' and target != '-':
edge_name = edge_name_it.get_next()
# Rule: All "name" edges end lead into "name" nodes (but the reverse is not always true)
elif edge_name == 'name' and target != 'name':
edge_name = edge_name_it.get_next()
# Rule: mode is always an attribute
elif edge_name == 'mode' and not is_attrib:
edge_name = edge_name_it.get_next()
# Keep track of used arcs and don't repeat them for the node
self.used_arcs[entry.source_id].add(edge_name)
return edge_name
# Helper class to loop through relation probabilities and get the best / next_best edge name
class EdgeNameIterator(object):
def __init__(self, rel_vocab, rel_probs):
self.rel_vocab = rel_vocab
self.indices = np.argsort(rel_probs)[::-1] # index of the probabilities, sorted high to low
self.ptr = 0
self.was_advanced = False
def get_next(self):
index = self.indices[self.ptr]
self.ptr += 1 # let this through an exception if we exhaust all available edges
self.was_advanced = True
return self.rel_vocab.idx2token(index)
|
en
| 0.876286
|
# Note that penman triples typically have a colon in front of the relationship but # it appears to add these automatically when creating the graph. # List of node names (concets and attributes) # list of (target_id, source_id, arc_prob, rel_prob:list(vocab)) # names for concepts (1:1) ie.. n1, n2, p1, Ohio@attr4@ , .. # Threshold of arc probability to add an edge **1 # **1: Experimentally 0.5 is about optimal, though increasing to 0.9 doesn't decrease the score # and decreasing to 0.1 only drops the smatch score by 0.014 smatch # Convert a list of concepts and relations into a penman graph # concept is a list of concepts # relation is a list of (target_id, source_id, arc_prob, rel_prob:list(vocab)) # keep track of edge names aready seen (key is source_id) # add self.names # Strip the uniqueness post tag (ie.. 2007@attr1@ -> 2007) # Create instance triples from a list of concepts (nodes and attributes) # This must be the first call because self.names is set here # unique variable or attribute with tag # Loop through the concepts # strings patterns match concept forms and thus require and instance variable # The penman library has an issue parsing concepts with parens or tildes # These characters shouldn't be present but parsing errors can lead to this. # I found 11 instances in 55,635 training data samples # The Smatch scoring AMR reader looks to have an additional issue when it sees a quote in a concept # get the enumerated graph variable and add a triple for it # Attributes # AMRGraph.py adds an underscore to the end of any string attributes # Trade the underscore for quotes so we can put it back in the penman format # The penman library has an issue parsing an attribute with a quote inside it # and, practially this probably doesn't make sense anyway, so remove it. # Numbers or other things which don't get double-quotes applied to them # Add a temporary tag to attribute names to gaurentee uniqueness. These will be stripped later. # Turn concept into a unique variable name (ie.. name -> n, n1, n2,...) # first letter # de-facto standard is to not append a 0 on the first instance but this # seems to cause an issue with my version of the smatch scorer's AMR reader, # so for now always append a number since a unique value is all that the spec requires. # Create edge and attribute triples from concepts/names and relations # Put relations n a little more readable format and create a dictionary of them based on target_id # Loop through an index for every concepts except the first, to find the best relation # Note that this is iterating target id backwards, which is not the way the original code was. # This produces much better results when combined with enforcing the rule that ARGx can not be # repeated for any source node. When iterating forward, smatch drops ~0.15 points # Look at all relations attached to this target concept. # Add triples for any non-attribute relation with a probability greater than 50%. # If none are above 50%, add the best one. # For attributes, add the best one (attribs only have 1 source connection) # Attribtes are never sources, they are always terminal nodes. # If greater than a 50% arc probability and it's not an attribute, add a triple for it # Keep track of the max probabilities # If the max probability is less than 50% or if the target is an attibute then the code above # didn't add any triples so add the best one. # Form a a triple(source, relation, target) for an edge or attribute triple # If the edge is a reverse type, form the triple backwards to show this # Use some common-sense rules to select the most-probable edge name # Generally this is the argmax(rel_probs) but there are cases that are illegal (or at least un-heard of) for AMR # Since only the edge_names can be changed, loop through the most probably ones until we find one that passes # Rules that exactly dictate the edge name # Rule: imperative and expressive attributes always have mode as the edge # edge_name # Loop until all rules are satisfied # Rule: don't repeat ARGx egdes # Rule: edges for attributes should not be reversed (X-of type) # Rule: domain is never an attribute, the target is always a node # Rule: polarity attributes are always have '-' for a value # Rule: All "name" edges end lead into "name" nodes (but the reverse is not always true) # Rule: mode is always an attribute # Keep track of used arcs and don't repeat them for the node # Helper class to loop through relation probabilities and get the best / next_best edge name # index of the probabilities, sorted high to low # let this through an exception if we exhaust all available edges
| 2.888338
| 3
|
cogs/moderation.py
|
Skullknight011/Cyanmaton
| 1
|
6626985
|
<filename>cogs/moderation.py
import discord
from discord.ext import commands
import os
import requests
import json
import time
from math import *
import time
import asyncio
from datetime import datetime
import dateparser
client = discord.Client()
class ModerationCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self._last_member = None
@commands.command(name='highrole', aliases=['top_role', 'toprole', 'high_role'])
@commands.cooldown(1, 2, commands.BucketType.user)
@commands.guild_only()
async def show_toprole(self, ctx, *, member: discord.Member = None):
if member is None:
member = ctx.author
embed = discord.Embed(title='Highest role for:',
description=member.name, colour=member.colour)
embed.set_author(icon_url=member.avatar_url, name=str(member))
embed.add_field(
name='\uFEFF', value=f'The highest role for {member.display_name} is {member.top_role.name}')
await ctx.send(content=None, embed=embed)
@commands.command(name='perms', aliases=['permissions'])
@commands.cooldown(1, 2, commands.BucketType.user)
@commands.guild_only()
async def check_permissions(self, ctx, *, member: discord.Member = None):
if not member:
member = ctx.author
perms = '\n'.join(
perm for perm, value in member.guild_permissions if value)
embed = discord.Embed(title='Permissions for:',
description=ctx.guild.name, colour=member.colour)
embed.set_author(icon_url=member.avatar_url, name=str(member))
embed.add_field(name='\uFEFF', value=perms)
await ctx.send(content=None, embed=embed)
@commands.command(name='addrole', aliases=['roleadd'], pass_context=True)
@commands.cooldown(1, 2, commands.BucketType.user)
@commands.has_permissions(manage_roles=True)
async def addrole(self, ctx, user: discord.Member, *, role: discord.Role):
await user.add_roles(role)
await ctx.send(f"Hey {ctx.author.name}, I have sucesfully given {user.name} the role {role.name}!")
@commands.command(name='removerole', aliases=['remrole', 'rem', 'remove'], pass_context=True)
@commands.cooldown(1, 2, commands.BucketType.user)
@commands.has_permissions(manage_roles=True)
async def removerole(self, ctx, user: discord.Member, *, role: discord.Role):
await user.remove_roles(role, reason=None, atomic=True)
await ctx.send(f"Hey {ctx.author.name}, I have sucesfully removed the role {role.name} from {user.name}!")
@commands.command(name="clear")
async def clear(self,ctx, limit: int = None):
passed = 0
failed = 0
async for msg in ctx.message.channel.history(limit=limit):
try:
await msg.delete()
passed += 1
except:
failed += 1
await ctx.send(f"Removed {passed} messages with {failed} fails")
time.sleep(2)
async for msg in ctx.message.channel.history(limit=1) :
try:
await msg.delete()
except:
await ctx.send(f"failed to delete bot's last words")
def setup(bot):
bot.add_cog(ModerationCog(bot))
|
<filename>cogs/moderation.py
import discord
from discord.ext import commands
import os
import requests
import json
import time
from math import *
import time
import asyncio
from datetime import datetime
import dateparser
client = discord.Client()
class ModerationCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self._last_member = None
@commands.command(name='highrole', aliases=['top_role', 'toprole', 'high_role'])
@commands.cooldown(1, 2, commands.BucketType.user)
@commands.guild_only()
async def show_toprole(self, ctx, *, member: discord.Member = None):
if member is None:
member = ctx.author
embed = discord.Embed(title='Highest role for:',
description=member.name, colour=member.colour)
embed.set_author(icon_url=member.avatar_url, name=str(member))
embed.add_field(
name='\uFEFF', value=f'The highest role for {member.display_name} is {member.top_role.name}')
await ctx.send(content=None, embed=embed)
@commands.command(name='perms', aliases=['permissions'])
@commands.cooldown(1, 2, commands.BucketType.user)
@commands.guild_only()
async def check_permissions(self, ctx, *, member: discord.Member = None):
if not member:
member = ctx.author
perms = '\n'.join(
perm for perm, value in member.guild_permissions if value)
embed = discord.Embed(title='Permissions for:',
description=ctx.guild.name, colour=member.colour)
embed.set_author(icon_url=member.avatar_url, name=str(member))
embed.add_field(name='\uFEFF', value=perms)
await ctx.send(content=None, embed=embed)
@commands.command(name='addrole', aliases=['roleadd'], pass_context=True)
@commands.cooldown(1, 2, commands.BucketType.user)
@commands.has_permissions(manage_roles=True)
async def addrole(self, ctx, user: discord.Member, *, role: discord.Role):
await user.add_roles(role)
await ctx.send(f"Hey {ctx.author.name}, I have sucesfully given {user.name} the role {role.name}!")
@commands.command(name='removerole', aliases=['remrole', 'rem', 'remove'], pass_context=True)
@commands.cooldown(1, 2, commands.BucketType.user)
@commands.has_permissions(manage_roles=True)
async def removerole(self, ctx, user: discord.Member, *, role: discord.Role):
await user.remove_roles(role, reason=None, atomic=True)
await ctx.send(f"Hey {ctx.author.name}, I have sucesfully removed the role {role.name} from {user.name}!")
@commands.command(name="clear")
async def clear(self,ctx, limit: int = None):
passed = 0
failed = 0
async for msg in ctx.message.channel.history(limit=limit):
try:
await msg.delete()
passed += 1
except:
failed += 1
await ctx.send(f"Removed {passed} messages with {failed} fails")
time.sleep(2)
async for msg in ctx.message.channel.history(limit=1) :
try:
await msg.delete()
except:
await ctx.send(f"failed to delete bot's last words")
def setup(bot):
bot.add_cog(ModerationCog(bot))
|
none
| 1
| 2.499318
| 2
|
|
notebooks/regression/bbp_homo_ccpp.py
|
Neronjust2017/Bayesian-neural-networks
| 4
|
6626986
|
<reponame>Neronjust2017/Bayesian-neural-networks
# %%
import GPy
import time
import copy
import math
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets, transforms
from torchvision.utils import make_grid
from tqdm import tqdm, trange
# %%
torch.cuda.device(0)
torch.cuda.get_device_name(torch.cuda.current_device())
# %%
def to_variable(var=(), cuda=True, volatile=False):
out = []
for v in var:
if isinstance(v, np.ndarray):
v = torch.from_numpy(v).type(torch.FloatTensor)
if not v.is_cuda and cuda:
v = v.cuda()
if not isinstance(v, Variable):
v = Variable(v, volatile=volatile)
out.append(v)
return out
# %%
def log_gaussian_loss(output, target, sigma, no_dim):
exponent = -0.5 * (target - output) ** 2 / sigma ** 2
log_coeff = -no_dim * torch.log(sigma)
return - (log_coeff + exponent).sum()
def get_kl_divergence(weights, prior, varpost):
prior_loglik = prior.loglik(weights)
varpost_loglik = varpost.loglik(weights)
varpost_lik = varpost_loglik.exp()
return (varpost_lik * (varpost_loglik - prior_loglik)).sum()
class gaussian:
def __init__(self, mu, sigma):
self.mu = mu
self.sigma = sigma
def loglik(self, weights):
exponent = -0.5 * (weights - self.mu) ** 2 / self.sigma ** 2
log_coeff = -0.5 * (np.log(2 * np.pi) + 2 * np.log(self.sigma))
return (exponent + log_coeff).sum()
# %%
class BayesLinear_Normalq(nn.Module):
def __init__(self, input_dim, output_dim, prior):
super(BayesLinear_Normalq, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.prior = prior
scale = (2 / self.input_dim) ** 0.5
rho_init = np.log(np.exp((2 / self.input_dim) ** 0.5) - 1)
self.weight_mus = nn.Parameter(torch.Tensor(self.input_dim, self.output_dim).uniform_(-0.05, 0.05))
self.weight_rhos = nn.Parameter(torch.Tensor(self.input_dim, self.output_dim).uniform_(-2, -1))
self.bias_mus = nn.Parameter(torch.Tensor(self.output_dim).uniform_(-0.05, 0.05))
self.bias_rhos = nn.Parameter(torch.Tensor(self.output_dim).uniform_(-2, -1))
def forward(self, x, sample=True):
if sample:
# sample gaussian noise for each weight and each bias
weight_epsilons = Variable(self.weight_mus.data.new(self.weight_mus.size()).normal_())
bias_epsilons = Variable(self.bias_mus.data.new(self.bias_mus.size()).normal_())
# calculate the weight and bias stds from the rho parameters
weight_stds = torch.log(1 + torch.exp(self.weight_rhos))
bias_stds = torch.log(1 + torch.exp(self.bias_rhos))
# calculate samples from the posterior from the sampled noise and mus/stds
weight_sample = self.weight_mus + weight_epsilons * weight_stds
bias_sample = self.bias_mus + bias_epsilons * bias_stds
output = torch.mm(x, weight_sample) + bias_sample
# computing the KL loss term
prior_cov, varpost_cov = self.prior.sigma ** 2, weight_stds ** 2
KL_loss = 0.5 * (torch.log(prior_cov / varpost_cov)).sum() - 0.5 * weight_stds.numel()
KL_loss = KL_loss + 0.5 * (varpost_cov / prior_cov).sum()
KL_loss = KL_loss + 0.5 * ((self.weight_mus - self.prior.mu) ** 2 / prior_cov).sum()
prior_cov, varpost_cov = self.prior.sigma ** 2, bias_stds ** 2
KL_loss = KL_loss + 0.5 * (torch.log(prior_cov / varpost_cov)).sum() - 0.5 * bias_stds.numel()
KL_loss = KL_loss + 0.5 * (varpost_cov / prior_cov).sum()
KL_loss = KL_loss + 0.5 * ((self.bias_mus - self.prior.mu) ** 2 / prior_cov).sum()
return output, KL_loss
else:
output = torch.mm(x, self.weight_mus) + self.bias_mus
# return output, KL_loss
return output
def sample_layer(self, no_samples):
all_samples = []
for i in range(no_samples):
# sample gaussian noise for each weight and each bias
weight_epsilons = Variable(self.weight_mus.data.new(self.weight_mus.size()).normal_())
# calculate the weight and bias stds from the rho parameters
weight_stds = torch.log(1 + torch.exp(self.weight_rhos))
# calculate samples from the posterior from the sampled noise and mus/stds
weight_sample = self.weight_mus + weight_epsilons * weight_stds
all_samples += weight_sample.view(-1).cpu().data.numpy().tolist()
return all_samples
# %%
class BBP_Homoscedastic_Model(nn.Module):
def __init__(self, input_dim, output_dim, no_units, init_log_noise):
super(BBP_Homoscedastic_Model, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
# network with two hidden and one output layer
self.layer1 = BayesLinear_Normalq(input_dim, no_units, gaussian(0, 1))
self.layer2 = BayesLinear_Normalq(no_units, output_dim, gaussian(0, 1))
# self.layer3 = BayesLinear_Normalq(no_units, output_dim, gaussian(0, 1))
# self.layer4 = BayesLinear_Normalq(no_units, no_units, gaussian(0, 1))
# self.layer5 = BayesLinear_Normalq(no_units, no_units, gaussian(0, 1))
# self.layer6 = BayesLinear_Normalq(no_units, no_units, gaussian(0, 1))
# self.layer7 = BayesLinear_Normalq(no_units, no_units, gaussian(0, 1))
# self.layer8 = BayesLinear_Normalq(no_units, output_dim, gaussian(0, 1))
# activation to be used between hidden layers
self.activation = nn.ReLU(inplace=True)
self.log_noise = nn.Parameter(torch.cuda.FloatTensor([init_log_noise]))
def forward(self, x):
KL_loss_total = 0
x = x.view(-1, self.input_dim)
x, KL_loss = self.layer1(x)
KL_loss_total = KL_loss_total + KL_loss
x = self.activation(x)
x, KL_loss = self.layer2(x)
KL_loss_total = KL_loss_total + KL_loss
return x, KL_loss_total
# %%
class BBP_Homoscedastic_Model_Wrapper:
def __init__(self, input_dim, output_dim, no_units, learn_rate, batch_size, no_batches, init_log_noise):
self.learn_rate = learn_rate
self.batch_size = batch_size
self.no_batches = no_batches
self.network = BBP_Homoscedastic_Model(input_dim=input_dim, output_dim=output_dim,
no_units=no_units, init_log_noise=init_log_noise)
self.network.cuda()
self.optimizer = torch.optim.SGD(self.network.parameters(), lr=self.learn_rate)
# self.optimizer = torch.optim.Adam(self.network.parameters(), lr = self.learn_rate)
self.loss_func = log_gaussian_loss
def fit(self, x, y, no_samples):
x, y = to_variable(var=(x, y), cuda=True)
# reset gradient and total loss
self.optimizer.zero_grad()
fit_loss_total = 0
for i in range(no_samples):
output, KL_loss_total = self.network(x)
# calculate fit loss based on mean and standard deviation of output
fit_loss_total = fit_loss_total + self.loss_func(output, y, self.network.log_noise.exp(),
self.network.output_dim)
KL_loss_total = KL_loss_total / self.no_batches
KL_loss_total = KL_loss_total
total_loss = (fit_loss_total + KL_loss_total) / (no_samples * x.shape[0])
total_loss.backward()
self.optimizer.step()
return fit_loss_total / no_samples, KL_loss_total
# %%
from openpyxl import load_workbook
workbook = load_workbook(filename='/data/weiyuhua/Bayesian-Neural-Networks/datasets/CCPP/Folds5x2_pp.xlsx')
sheet = workbook.get_sheet_by_name("Sheet1")
data = []
row_num = 2
while row_num <= 9569:
sample = []
for i in range(5):
sample.append(sheet.cell(row=row_num, column=i+1).value)
sample = np.array(sample)
data.append(sample)
row_num = row_num + 1
data = np.array(data)
N = data.shape[0]
ind = int(N * 0.9)
train_data = data[:ind]
test_data = data[ind:]
x_train = train_data[:,:4]
y_train = train_data[:,4]
x_test = test_data[:,:4]
y_test = test_data[:,4]
inputs = 4
outputs = 1
num_epochs, batch_size, nb_train = 2000, len(x_train), len(x_train)
net = BBP_Homoscedastic_Model_Wrapper(input_dim=4, output_dim=1, no_units=100, learn_rate=1e-1,
batch_size=batch_size, no_batches=1, init_log_noise=0)
fit_loss_train = np.zeros(num_epochs)
KL_loss_train = np.zeros(num_epochs)
total_loss = np.zeros(num_epochs)
best_net, best_loss = None, float('inf')
for i in range(num_epochs):
fit_loss, KL_loss = net.fit(x_train, y_train, no_samples=10)
fit_loss_train[i] += fit_loss.cpu().data.numpy()
KL_loss_train[i] += KL_loss.cpu().data.numpy()
total_loss[i] = fit_loss_train[i] + KL_loss_train[i]
if fit_loss < best_loss:
best_loss = fit_loss
best_net = copy.deepcopy(net.network)
if i % 100 == 0 or i == num_epochs - 1:
print("Epoch: %5d/%5d, Fit loss = %8.3f, KL loss = %8.3f, noise = %6.3f" %
(i + 1, num_epochs, fit_loss_train[i], KL_loss_train[i], net.network.log_noise.exp().cpu().data.numpy()))
samples = []
for i in range(100):
preds = net.network.forward(torch.linspace(-3, 3, 200).cuda())[0]
samples.append(preds.cpu().data.numpy()[:, 0])
# %%
# samples = []
# for i in range(100):
# preds = (best_net.forward(torch.linspace(-5, 5, 200).cuda())[0] * y_std) + y_mean
# samples.append(preds.cpu().data.numpy()[:, 0])
#
# samples = np.array(samples)
# means = samples.mean(axis=0)
#
# aleatoric = best_net.log_noise.exp().cpu().data.numpy()
# epistemic = samples.var(axis=0) ** 0.5
# total_unc = (aleatoric ** 2 + epistemic ** 2) ** 0.5
#
# c = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd',
# '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
#
# plt.figure(figsize=(6, 5))
# plt.style.use('default')
# plt.scatter((x_train * x_std) + x_mean, (y_train * y_std) + y_mean, s=10, marker='x', color='black', alpha=0.5)
# plt.fill_between(np.linspace(-5, 5, 200) * x_std + x_mean, means + aleatoric, means + total_unc, color=c[0], alpha=0.3,
# label=r'$\sigma(y^*|x^*)$')
# plt.fill_between(np.linspace(-5, 5, 200) * x_std + x_mean, means - total_unc, means - aleatoric, color=c[0], alpha=0.3)
# plt.fill_between(np.linspace(-5, 5, 200) * x_std + x_mean, means - aleatoric, means + aleatoric, color=c[1], alpha=0.4,
# label=r'$\EX[\sigma^2]^{1/2}$')
# plt.plot(np.linspace(-5, 5, 200) * x_std + x_mean, means, color='black', linewidth=1)
# plt.xlim([-5, 5])
# plt.ylim([-5, 7])
# plt.xlabel('$x$', fontsize=30)
# plt.title('BBP', fontsize=40)
# plt.tick_params(labelsize=30)
# plt.xticks(np.arange(-4, 5, 2))
# plt.gca().set_yticklabels([])
# plt.gca().yaxis.grid(alpha=0.3)
# plt.gca().xaxis.grid(alpha=0.3)
# plt.savefig('bbp_homo.pdf', bbox_inches='tight')
#
# # files.download("bbp_homo.pdf")
#
# plt.show()
#
# # %%
|
# %%
import GPy
import time
import copy
import math
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets, transforms
from torchvision.utils import make_grid
from tqdm import tqdm, trange
# %%
torch.cuda.device(0)
torch.cuda.get_device_name(torch.cuda.current_device())
# %%
def to_variable(var=(), cuda=True, volatile=False):
out = []
for v in var:
if isinstance(v, np.ndarray):
v = torch.from_numpy(v).type(torch.FloatTensor)
if not v.is_cuda and cuda:
v = v.cuda()
if not isinstance(v, Variable):
v = Variable(v, volatile=volatile)
out.append(v)
return out
# %%
def log_gaussian_loss(output, target, sigma, no_dim):
exponent = -0.5 * (target - output) ** 2 / sigma ** 2
log_coeff = -no_dim * torch.log(sigma)
return - (log_coeff + exponent).sum()
def get_kl_divergence(weights, prior, varpost):
prior_loglik = prior.loglik(weights)
varpost_loglik = varpost.loglik(weights)
varpost_lik = varpost_loglik.exp()
return (varpost_lik * (varpost_loglik - prior_loglik)).sum()
class gaussian:
def __init__(self, mu, sigma):
self.mu = mu
self.sigma = sigma
def loglik(self, weights):
exponent = -0.5 * (weights - self.mu) ** 2 / self.sigma ** 2
log_coeff = -0.5 * (np.log(2 * np.pi) + 2 * np.log(self.sigma))
return (exponent + log_coeff).sum()
# %%
class BayesLinear_Normalq(nn.Module):
def __init__(self, input_dim, output_dim, prior):
super(BayesLinear_Normalq, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.prior = prior
scale = (2 / self.input_dim) ** 0.5
rho_init = np.log(np.exp((2 / self.input_dim) ** 0.5) - 1)
self.weight_mus = nn.Parameter(torch.Tensor(self.input_dim, self.output_dim).uniform_(-0.05, 0.05))
self.weight_rhos = nn.Parameter(torch.Tensor(self.input_dim, self.output_dim).uniform_(-2, -1))
self.bias_mus = nn.Parameter(torch.Tensor(self.output_dim).uniform_(-0.05, 0.05))
self.bias_rhos = nn.Parameter(torch.Tensor(self.output_dim).uniform_(-2, -1))
def forward(self, x, sample=True):
if sample:
# sample gaussian noise for each weight and each bias
weight_epsilons = Variable(self.weight_mus.data.new(self.weight_mus.size()).normal_())
bias_epsilons = Variable(self.bias_mus.data.new(self.bias_mus.size()).normal_())
# calculate the weight and bias stds from the rho parameters
weight_stds = torch.log(1 + torch.exp(self.weight_rhos))
bias_stds = torch.log(1 + torch.exp(self.bias_rhos))
# calculate samples from the posterior from the sampled noise and mus/stds
weight_sample = self.weight_mus + weight_epsilons * weight_stds
bias_sample = self.bias_mus + bias_epsilons * bias_stds
output = torch.mm(x, weight_sample) + bias_sample
# computing the KL loss term
prior_cov, varpost_cov = self.prior.sigma ** 2, weight_stds ** 2
KL_loss = 0.5 * (torch.log(prior_cov / varpost_cov)).sum() - 0.5 * weight_stds.numel()
KL_loss = KL_loss + 0.5 * (varpost_cov / prior_cov).sum()
KL_loss = KL_loss + 0.5 * ((self.weight_mus - self.prior.mu) ** 2 / prior_cov).sum()
prior_cov, varpost_cov = self.prior.sigma ** 2, bias_stds ** 2
KL_loss = KL_loss + 0.5 * (torch.log(prior_cov / varpost_cov)).sum() - 0.5 * bias_stds.numel()
KL_loss = KL_loss + 0.5 * (varpost_cov / prior_cov).sum()
KL_loss = KL_loss + 0.5 * ((self.bias_mus - self.prior.mu) ** 2 / prior_cov).sum()
return output, KL_loss
else:
output = torch.mm(x, self.weight_mus) + self.bias_mus
# return output, KL_loss
return output
def sample_layer(self, no_samples):
all_samples = []
for i in range(no_samples):
# sample gaussian noise for each weight and each bias
weight_epsilons = Variable(self.weight_mus.data.new(self.weight_mus.size()).normal_())
# calculate the weight and bias stds from the rho parameters
weight_stds = torch.log(1 + torch.exp(self.weight_rhos))
# calculate samples from the posterior from the sampled noise and mus/stds
weight_sample = self.weight_mus + weight_epsilons * weight_stds
all_samples += weight_sample.view(-1).cpu().data.numpy().tolist()
return all_samples
# %%
class BBP_Homoscedastic_Model(nn.Module):
def __init__(self, input_dim, output_dim, no_units, init_log_noise):
super(BBP_Homoscedastic_Model, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
# network with two hidden and one output layer
self.layer1 = BayesLinear_Normalq(input_dim, no_units, gaussian(0, 1))
self.layer2 = BayesLinear_Normalq(no_units, output_dim, gaussian(0, 1))
# self.layer3 = BayesLinear_Normalq(no_units, output_dim, gaussian(0, 1))
# self.layer4 = BayesLinear_Normalq(no_units, no_units, gaussian(0, 1))
# self.layer5 = BayesLinear_Normalq(no_units, no_units, gaussian(0, 1))
# self.layer6 = BayesLinear_Normalq(no_units, no_units, gaussian(0, 1))
# self.layer7 = BayesLinear_Normalq(no_units, no_units, gaussian(0, 1))
# self.layer8 = BayesLinear_Normalq(no_units, output_dim, gaussian(0, 1))
# activation to be used between hidden layers
self.activation = nn.ReLU(inplace=True)
self.log_noise = nn.Parameter(torch.cuda.FloatTensor([init_log_noise]))
def forward(self, x):
KL_loss_total = 0
x = x.view(-1, self.input_dim)
x, KL_loss = self.layer1(x)
KL_loss_total = KL_loss_total + KL_loss
x = self.activation(x)
x, KL_loss = self.layer2(x)
KL_loss_total = KL_loss_total + KL_loss
return x, KL_loss_total
# %%
class BBP_Homoscedastic_Model_Wrapper:
def __init__(self, input_dim, output_dim, no_units, learn_rate, batch_size, no_batches, init_log_noise):
self.learn_rate = learn_rate
self.batch_size = batch_size
self.no_batches = no_batches
self.network = BBP_Homoscedastic_Model(input_dim=input_dim, output_dim=output_dim,
no_units=no_units, init_log_noise=init_log_noise)
self.network.cuda()
self.optimizer = torch.optim.SGD(self.network.parameters(), lr=self.learn_rate)
# self.optimizer = torch.optim.Adam(self.network.parameters(), lr = self.learn_rate)
self.loss_func = log_gaussian_loss
def fit(self, x, y, no_samples):
x, y = to_variable(var=(x, y), cuda=True)
# reset gradient and total loss
self.optimizer.zero_grad()
fit_loss_total = 0
for i in range(no_samples):
output, KL_loss_total = self.network(x)
# calculate fit loss based on mean and standard deviation of output
fit_loss_total = fit_loss_total + self.loss_func(output, y, self.network.log_noise.exp(),
self.network.output_dim)
KL_loss_total = KL_loss_total / self.no_batches
KL_loss_total = KL_loss_total
total_loss = (fit_loss_total + KL_loss_total) / (no_samples * x.shape[0])
total_loss.backward()
self.optimizer.step()
return fit_loss_total / no_samples, KL_loss_total
# %%
from openpyxl import load_workbook
workbook = load_workbook(filename='/data/weiyuhua/Bayesian-Neural-Networks/datasets/CCPP/Folds5x2_pp.xlsx')
sheet = workbook.get_sheet_by_name("Sheet1")
data = []
row_num = 2
while row_num <= 9569:
sample = []
for i in range(5):
sample.append(sheet.cell(row=row_num, column=i+1).value)
sample = np.array(sample)
data.append(sample)
row_num = row_num + 1
data = np.array(data)
N = data.shape[0]
ind = int(N * 0.9)
train_data = data[:ind]
test_data = data[ind:]
x_train = train_data[:,:4]
y_train = train_data[:,4]
x_test = test_data[:,:4]
y_test = test_data[:,4]
inputs = 4
outputs = 1
num_epochs, batch_size, nb_train = 2000, len(x_train), len(x_train)
net = BBP_Homoscedastic_Model_Wrapper(input_dim=4, output_dim=1, no_units=100, learn_rate=1e-1,
batch_size=batch_size, no_batches=1, init_log_noise=0)
fit_loss_train = np.zeros(num_epochs)
KL_loss_train = np.zeros(num_epochs)
total_loss = np.zeros(num_epochs)
best_net, best_loss = None, float('inf')
for i in range(num_epochs):
fit_loss, KL_loss = net.fit(x_train, y_train, no_samples=10)
fit_loss_train[i] += fit_loss.cpu().data.numpy()
KL_loss_train[i] += KL_loss.cpu().data.numpy()
total_loss[i] = fit_loss_train[i] + KL_loss_train[i]
if fit_loss < best_loss:
best_loss = fit_loss
best_net = copy.deepcopy(net.network)
if i % 100 == 0 or i == num_epochs - 1:
print("Epoch: %5d/%5d, Fit loss = %8.3f, KL loss = %8.3f, noise = %6.3f" %
(i + 1, num_epochs, fit_loss_train[i], KL_loss_train[i], net.network.log_noise.exp().cpu().data.numpy()))
samples = []
for i in range(100):
preds = net.network.forward(torch.linspace(-3, 3, 200).cuda())[0]
samples.append(preds.cpu().data.numpy()[:, 0])
# %%
# samples = []
# for i in range(100):
# preds = (best_net.forward(torch.linspace(-5, 5, 200).cuda())[0] * y_std) + y_mean
# samples.append(preds.cpu().data.numpy()[:, 0])
#
# samples = np.array(samples)
# means = samples.mean(axis=0)
#
# aleatoric = best_net.log_noise.exp().cpu().data.numpy()
# epistemic = samples.var(axis=0) ** 0.5
# total_unc = (aleatoric ** 2 + epistemic ** 2) ** 0.5
#
# c = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd',
# '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
#
# plt.figure(figsize=(6, 5))
# plt.style.use('default')
# plt.scatter((x_train * x_std) + x_mean, (y_train * y_std) + y_mean, s=10, marker='x', color='black', alpha=0.5)
# plt.fill_between(np.linspace(-5, 5, 200) * x_std + x_mean, means + aleatoric, means + total_unc, color=c[0], alpha=0.3,
# label=r'$\sigma(y^*|x^*)$')
# plt.fill_between(np.linspace(-5, 5, 200) * x_std + x_mean, means - total_unc, means - aleatoric, color=c[0], alpha=0.3)
# plt.fill_between(np.linspace(-5, 5, 200) * x_std + x_mean, means - aleatoric, means + aleatoric, color=c[1], alpha=0.4,
# label=r'$\EX[\sigma^2]^{1/2}$')
# plt.plot(np.linspace(-5, 5, 200) * x_std + x_mean, means, color='black', linewidth=1)
# plt.xlim([-5, 5])
# plt.ylim([-5, 7])
# plt.xlabel('$x$', fontsize=30)
# plt.title('BBP', fontsize=40)
# plt.tick_params(labelsize=30)
# plt.xticks(np.arange(-4, 5, 2))
# plt.gca().set_yticklabels([])
# plt.gca().yaxis.grid(alpha=0.3)
# plt.gca().xaxis.grid(alpha=0.3)
# plt.savefig('bbp_homo.pdf', bbox_inches='tight')
#
# # files.download("bbp_homo.pdf")
#
# plt.show()
#
# # %%
|
en
| 0.416016
|
# %% # %% # %% # %% # %% # sample gaussian noise for each weight and each bias # calculate the weight and bias stds from the rho parameters # calculate samples from the posterior from the sampled noise and mus/stds # computing the KL loss term # return output, KL_loss # sample gaussian noise for each weight and each bias # calculate the weight and bias stds from the rho parameters # calculate samples from the posterior from the sampled noise and mus/stds # %% # network with two hidden and one output layer # self.layer3 = BayesLinear_Normalq(no_units, output_dim, gaussian(0, 1)) # self.layer4 = BayesLinear_Normalq(no_units, no_units, gaussian(0, 1)) # self.layer5 = BayesLinear_Normalq(no_units, no_units, gaussian(0, 1)) # self.layer6 = BayesLinear_Normalq(no_units, no_units, gaussian(0, 1)) # self.layer7 = BayesLinear_Normalq(no_units, no_units, gaussian(0, 1)) # self.layer8 = BayesLinear_Normalq(no_units, output_dim, gaussian(0, 1)) # activation to be used between hidden layers # %% # self.optimizer = torch.optim.Adam(self.network.parameters(), lr = self.learn_rate) # reset gradient and total loss # calculate fit loss based on mean and standard deviation of output # %% # %% # samples = [] # for i in range(100): # preds = (best_net.forward(torch.linspace(-5, 5, 200).cuda())[0] * y_std) + y_mean # samples.append(preds.cpu().data.numpy()[:, 0]) # # samples = np.array(samples) # means = samples.mean(axis=0) # # aleatoric = best_net.log_noise.exp().cpu().data.numpy() # epistemic = samples.var(axis=0) ** 0.5 # total_unc = (aleatoric ** 2 + epistemic ** 2) ** 0.5 # # c = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', # '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf'] # # plt.figure(figsize=(6, 5)) # plt.style.use('default') # plt.scatter((x_train * x_std) + x_mean, (y_train * y_std) + y_mean, s=10, marker='x', color='black', alpha=0.5) # plt.fill_between(np.linspace(-5, 5, 200) * x_std + x_mean, means + aleatoric, means + total_unc, color=c[0], alpha=0.3, # label=r'$\sigma(y^*|x^*)$') # plt.fill_between(np.linspace(-5, 5, 200) * x_std + x_mean, means - total_unc, means - aleatoric, color=c[0], alpha=0.3) # plt.fill_between(np.linspace(-5, 5, 200) * x_std + x_mean, means - aleatoric, means + aleatoric, color=c[1], alpha=0.4, # label=r'$\EX[\sigma^2]^{1/2}$') # plt.plot(np.linspace(-5, 5, 200) * x_std + x_mean, means, color='black', linewidth=1) # plt.xlim([-5, 5]) # plt.ylim([-5, 7]) # plt.xlabel('$x$', fontsize=30) # plt.title('BBP', fontsize=40) # plt.tick_params(labelsize=30) # plt.xticks(np.arange(-4, 5, 2)) # plt.gca().set_yticklabels([]) # plt.gca().yaxis.grid(alpha=0.3) # plt.gca().xaxis.grid(alpha=0.3) # plt.savefig('bbp_homo.pdf', bbox_inches='tight') # # # files.download("bbp_homo.pdf") # # plt.show() # # # %%
| 2.173267
| 2
|
maintainers/scripts/update.py
|
aidalgol/nixpkgs
| 2
|
6626987
|
<reponame>aidalgol/nixpkgs
from __future__ import annotations
from typing import Dict, Generator, List, Optional, Tuple
import argparse
import asyncio
import contextlib
import json
import os
import re
import subprocess
import sys
import tempfile
class CalledProcessError(Exception):
process: asyncio.subprocess.Process
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
async def check_subprocess(*args, **kwargs):
"""
Emulate check argument of subprocess.run function.
"""
process = await asyncio.create_subprocess_exec(*args, **kwargs)
returncode = await process.wait()
if returncode != 0:
error = CalledProcessError()
error.process = process
raise error
return process
async def run_update_script(nixpkgs_root: str, merge_lock: asyncio.Lock, temp_dir: Optional[Tuple[str, str]], package: Dict, keep_going: bool):
worktree: Optional[str] = None
update_script_command = package['updateScript']
if temp_dir is not None:
worktree, _branch = temp_dir
# Ensure the worktree is clean before update.
await check_subprocess('git', 'reset', '--hard', '--quiet', 'HEAD', cwd=worktree)
# Update scripts can use $(dirname $0) to get their location but we want to run
# their clones in the git worktree, not in the main nixpkgs repo.
update_script_command = map(lambda arg: re.sub(r'^{0}'.format(re.escape(nixpkgs_root)), worktree, arg), update_script_command)
eprint(f" - {package['name']}: UPDATING ...")
try:
update_process = await check_subprocess('env', f"UPDATE_NIX_ATTR_PATH={package['attrPath']}", *update_script_command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, cwd=worktree)
update_info = await update_process.stdout.read()
await merge_changes(merge_lock, package, update_info, temp_dir)
except KeyboardInterrupt as e:
eprint('Cancelling…')
raise asyncio.exceptions.CancelledError()
except CalledProcessError as e:
eprint(f" - {package['name']}: ERROR")
eprint()
eprint(f"--- SHOWING ERROR LOG FOR {package['name']} ----------------------")
eprint()
stderr = await e.process.stderr.read()
eprint(stderr.decode('utf-8'))
with open(f"{package['pname']}.log", 'wb') as logfile:
logfile.write(stderr)
eprint()
eprint(f"--- SHOWING ERROR LOG FOR {package['name']} ----------------------")
if not keep_going:
raise asyncio.exceptions.CancelledError()
@contextlib.contextmanager
def make_worktree() -> Generator[Tuple[str, str], None, None]:
with tempfile.TemporaryDirectory() as wt:
branch_name = f'update-{os.path.basename(wt)}'
target_directory = f'{wt}/nixpkgs'
subprocess.run(['git', 'worktree', 'add', '-b', branch_name, target_directory])
yield (target_directory, branch_name)
subprocess.run(['git', 'worktree', 'remove', '--force', target_directory])
subprocess.run(['git', 'branch', '-D', branch_name])
async def commit_changes(name: str, merge_lock: asyncio.Lock, worktree: str, branch: str, changes: List[Dict]) -> None:
for change in changes:
# Git can only handle a single index operation at a time
async with merge_lock:
await check_subprocess('git', 'add', *change['files'], cwd=worktree)
commit_message = '{attrPath}: {oldVersion} → {newVersion}'.format(**change)
if 'commitMessage' in change:
commit_message = change['commitMessage']
elif 'commitBody' in change:
commit_message = commit_message + '\n\n' + change['commitBody']
await check_subprocess('git', 'commit', '--quiet', '-m', commit_message, cwd=worktree)
await check_subprocess('git', 'cherry-pick', branch)
async def check_changes(package: Dict, worktree: str, update_info: str):
if 'commit' in package['supportedFeatures']:
changes = json.loads(update_info)
else:
changes = [{}]
# Try to fill in missing attributes when there is just a single change.
if len(changes) == 1:
# Dynamic data from updater take precedence over static data from passthru.updateScript.
if 'attrPath' not in changes[0]:
# update.nix is always passing attrPath
changes[0]['attrPath'] = package['attrPath']
if 'oldVersion' not in changes[0]:
# update.nix is always passing oldVersion
changes[0]['oldVersion'] = package['oldVersion']
if 'newVersion' not in changes[0]:
attr_path = changes[0]['attrPath']
obtain_new_version_process = await check_subprocess('nix-instantiate', '--expr', f'with import ./. {{}}; lib.getVersion {attr_path}', '--eval', '--strict', '--json', stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, cwd=worktree)
changes[0]['newVersion'] = json.loads((await obtain_new_version_process.stdout.read()).decode('utf-8'))
if 'files' not in changes[0]:
changed_files_process = await check_subprocess('git', 'diff', '--name-only', 'HEAD', stdout=asyncio.subprocess.PIPE, cwd=worktree)
changed_files = (await changed_files_process.stdout.read()).splitlines()
changes[0]['files'] = changed_files
if len(changed_files) == 0:
return []
return changes
async def merge_changes(merge_lock: asyncio.Lock, package: Dict, update_info: str, temp_dir: Optional[Tuple[str, str]]) -> None:
if temp_dir is not None:
worktree, branch = temp_dir
changes = await check_changes(package, worktree, update_info)
if len(changes) > 0:
await commit_changes(package['name'], merge_lock, worktree, branch, changes)
else:
eprint(f" - {package['name']}: DONE, no changes.")
else:
eprint(f" - {package['name']}: DONE.")
async def updater(nixpkgs_root: str, temp_dir: Optional[Tuple[str, str]], merge_lock: asyncio.Lock, packages_to_update: asyncio.Queue[Optional[Dict]], keep_going: bool, commit: bool):
while True:
package = await packages_to_update.get()
if package is None:
# A sentinel received, we are done.
return
if not ('commit' in package['supportedFeatures'] or 'attrPath' in package):
temp_dir = None
await run_update_script(nixpkgs_root, merge_lock, temp_dir, package, keep_going)
async def start_updates(max_workers: int, keep_going: bool, commit: bool, packages: List[Dict]):
merge_lock = asyncio.Lock()
packages_to_update: asyncio.Queue[Optional[Dict]] = asyncio.Queue()
with contextlib.ExitStack() as stack:
temp_dirs: List[Optional[Tuple[str, str]]] = []
# Do not create more workers than there are packages.
num_workers = min(max_workers, len(packages))
nixpkgs_root_process = await check_subprocess('git', 'rev-parse', '--show-toplevel', stdout=asyncio.subprocess.PIPE)
nixpkgs_root = (await nixpkgs_root_process.stdout.read()).decode('utf-8').strip()
# Set up temporary directories when using auto-commit.
for i in range(num_workers):
temp_dir = stack.enter_context(make_worktree()) if commit else None
temp_dirs.append(temp_dir)
# Fill up an update queue,
for package in packages:
await packages_to_update.put(package)
# Add sentinels, one for each worker.
# A workers will terminate when it gets sentinel from the queue.
for i in range(num_workers):
await packages_to_update.put(None)
# Prepare updater workers for each temp_dir directory.
# At most `num_workers` instances of `run_update_script` will be running at one time.
updaters = asyncio.gather(*[updater(nixpkgs_root, temp_dir, merge_lock, packages_to_update, keep_going, commit) for temp_dir in temp_dirs])
try:
# Start updater workers.
await updaters
except asyncio.exceptions.CancelledError as e:
# When one worker is cancelled, cancel the others too.
updaters.cancel()
def main(max_workers: int, keep_going: bool, commit: bool, packages_path: str) -> None:
with open(packages_path) as f:
packages = json.load(f)
eprint()
eprint('Going to be running update for following packages:')
for package in packages:
eprint(f" - {package['name']}")
eprint()
confirm = input('Press Enter key to continue...')
if confirm == '':
eprint()
eprint('Running update for:')
asyncio.run(start_updates(max_workers, keep_going, commit, packages))
eprint()
eprint('Packages updated!')
sys.exit()
else:
eprint('Aborting!')
sys.exit(130)
parser = argparse.ArgumentParser(description='Update packages')
parser.add_argument('--max-workers', '-j', dest='max_workers', type=int, help='Number of updates to run concurrently', nargs='?', default=4)
parser.add_argument('--keep-going', '-k', dest='keep_going', action='store_true', help='Do not stop after first failure')
parser.add_argument('--commit', '-c', dest='commit', action='store_true', help='Commit the changes')
parser.add_argument('packages', help='JSON file containing the list of package names and their update scripts')
if __name__ == '__main__':
args = parser.parse_args()
try:
main(args.max_workers, args.keep_going, args.commit, args.packages)
except KeyboardInterrupt as e:
# Let’s cancel outside of the main loop too.
sys.exit(130)
|
from __future__ import annotations
from typing import Dict, Generator, List, Optional, Tuple
import argparse
import asyncio
import contextlib
import json
import os
import re
import subprocess
import sys
import tempfile
class CalledProcessError(Exception):
process: asyncio.subprocess.Process
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
async def check_subprocess(*args, **kwargs):
"""
Emulate check argument of subprocess.run function.
"""
process = await asyncio.create_subprocess_exec(*args, **kwargs)
returncode = await process.wait()
if returncode != 0:
error = CalledProcessError()
error.process = process
raise error
return process
async def run_update_script(nixpkgs_root: str, merge_lock: asyncio.Lock, temp_dir: Optional[Tuple[str, str]], package: Dict, keep_going: bool):
worktree: Optional[str] = None
update_script_command = package['updateScript']
if temp_dir is not None:
worktree, _branch = temp_dir
# Ensure the worktree is clean before update.
await check_subprocess('git', 'reset', '--hard', '--quiet', 'HEAD', cwd=worktree)
# Update scripts can use $(dirname $0) to get their location but we want to run
# their clones in the git worktree, not in the main nixpkgs repo.
update_script_command = map(lambda arg: re.sub(r'^{0}'.format(re.escape(nixpkgs_root)), worktree, arg), update_script_command)
eprint(f" - {package['name']}: UPDATING ...")
try:
update_process = await check_subprocess('env', f"UPDATE_NIX_ATTR_PATH={package['attrPath']}", *update_script_command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, cwd=worktree)
update_info = await update_process.stdout.read()
await merge_changes(merge_lock, package, update_info, temp_dir)
except KeyboardInterrupt as e:
eprint('Cancelling…')
raise asyncio.exceptions.CancelledError()
except CalledProcessError as e:
eprint(f" - {package['name']}: ERROR")
eprint()
eprint(f"--- SHOWING ERROR LOG FOR {package['name']} ----------------------")
eprint()
stderr = await e.process.stderr.read()
eprint(stderr.decode('utf-8'))
with open(f"{package['pname']}.log", 'wb') as logfile:
logfile.write(stderr)
eprint()
eprint(f"--- SHOWING ERROR LOG FOR {package['name']} ----------------------")
if not keep_going:
raise asyncio.exceptions.CancelledError()
@contextlib.contextmanager
def make_worktree() -> Generator[Tuple[str, str], None, None]:
with tempfile.TemporaryDirectory() as wt:
branch_name = f'update-{os.path.basename(wt)}'
target_directory = f'{wt}/nixpkgs'
subprocess.run(['git', 'worktree', 'add', '-b', branch_name, target_directory])
yield (target_directory, branch_name)
subprocess.run(['git', 'worktree', 'remove', '--force', target_directory])
subprocess.run(['git', 'branch', '-D', branch_name])
async def commit_changes(name: str, merge_lock: asyncio.Lock, worktree: str, branch: str, changes: List[Dict]) -> None:
for change in changes:
# Git can only handle a single index operation at a time
async with merge_lock:
await check_subprocess('git', 'add', *change['files'], cwd=worktree)
commit_message = '{attrPath}: {oldVersion} → {newVersion}'.format(**change)
if 'commitMessage' in change:
commit_message = change['commitMessage']
elif 'commitBody' in change:
commit_message = commit_message + '\n\n' + change['commitBody']
await check_subprocess('git', 'commit', '--quiet', '-m', commit_message, cwd=worktree)
await check_subprocess('git', 'cherry-pick', branch)
async def check_changes(package: Dict, worktree: str, update_info: str):
if 'commit' in package['supportedFeatures']:
changes = json.loads(update_info)
else:
changes = [{}]
# Try to fill in missing attributes when there is just a single change.
if len(changes) == 1:
# Dynamic data from updater take precedence over static data from passthru.updateScript.
if 'attrPath' not in changes[0]:
# update.nix is always passing attrPath
changes[0]['attrPath'] = package['attrPath']
if 'oldVersion' not in changes[0]:
# update.nix is always passing oldVersion
changes[0]['oldVersion'] = package['oldVersion']
if 'newVersion' not in changes[0]:
attr_path = changes[0]['attrPath']
obtain_new_version_process = await check_subprocess('nix-instantiate', '--expr', f'with import ./. {{}}; lib.getVersion {attr_path}', '--eval', '--strict', '--json', stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, cwd=worktree)
changes[0]['newVersion'] = json.loads((await obtain_new_version_process.stdout.read()).decode('utf-8'))
if 'files' not in changes[0]:
changed_files_process = await check_subprocess('git', 'diff', '--name-only', 'HEAD', stdout=asyncio.subprocess.PIPE, cwd=worktree)
changed_files = (await changed_files_process.stdout.read()).splitlines()
changes[0]['files'] = changed_files
if len(changed_files) == 0:
return []
return changes
async def merge_changes(merge_lock: asyncio.Lock, package: Dict, update_info: str, temp_dir: Optional[Tuple[str, str]]) -> None:
if temp_dir is not None:
worktree, branch = temp_dir
changes = await check_changes(package, worktree, update_info)
if len(changes) > 0:
await commit_changes(package['name'], merge_lock, worktree, branch, changes)
else:
eprint(f" - {package['name']}: DONE, no changes.")
else:
eprint(f" - {package['name']}: DONE.")
async def updater(nixpkgs_root: str, temp_dir: Optional[Tuple[str, str]], merge_lock: asyncio.Lock, packages_to_update: asyncio.Queue[Optional[Dict]], keep_going: bool, commit: bool):
while True:
package = await packages_to_update.get()
if package is None:
# A sentinel received, we are done.
return
if not ('commit' in package['supportedFeatures'] or 'attrPath' in package):
temp_dir = None
await run_update_script(nixpkgs_root, merge_lock, temp_dir, package, keep_going)
async def start_updates(max_workers: int, keep_going: bool, commit: bool, packages: List[Dict]):
merge_lock = asyncio.Lock()
packages_to_update: asyncio.Queue[Optional[Dict]] = asyncio.Queue()
with contextlib.ExitStack() as stack:
temp_dirs: List[Optional[Tuple[str, str]]] = []
# Do not create more workers than there are packages.
num_workers = min(max_workers, len(packages))
nixpkgs_root_process = await check_subprocess('git', 'rev-parse', '--show-toplevel', stdout=asyncio.subprocess.PIPE)
nixpkgs_root = (await nixpkgs_root_process.stdout.read()).decode('utf-8').strip()
# Set up temporary directories when using auto-commit.
for i in range(num_workers):
temp_dir = stack.enter_context(make_worktree()) if commit else None
temp_dirs.append(temp_dir)
# Fill up an update queue,
for package in packages:
await packages_to_update.put(package)
# Add sentinels, one for each worker.
# A workers will terminate when it gets sentinel from the queue.
for i in range(num_workers):
await packages_to_update.put(None)
# Prepare updater workers for each temp_dir directory.
# At most `num_workers` instances of `run_update_script` will be running at one time.
updaters = asyncio.gather(*[updater(nixpkgs_root, temp_dir, merge_lock, packages_to_update, keep_going, commit) for temp_dir in temp_dirs])
try:
# Start updater workers.
await updaters
except asyncio.exceptions.CancelledError as e:
# When one worker is cancelled, cancel the others too.
updaters.cancel()
def main(max_workers: int, keep_going: bool, commit: bool, packages_path: str) -> None:
with open(packages_path) as f:
packages = json.load(f)
eprint()
eprint('Going to be running update for following packages:')
for package in packages:
eprint(f" - {package['name']}")
eprint()
confirm = input('Press Enter key to continue...')
if confirm == '':
eprint()
eprint('Running update for:')
asyncio.run(start_updates(max_workers, keep_going, commit, packages))
eprint()
eprint('Packages updated!')
sys.exit()
else:
eprint('Aborting!')
sys.exit(130)
parser = argparse.ArgumentParser(description='Update packages')
parser.add_argument('--max-workers', '-j', dest='max_workers', type=int, help='Number of updates to run concurrently', nargs='?', default=4)
parser.add_argument('--keep-going', '-k', dest='keep_going', action='store_true', help='Do not stop after first failure')
parser.add_argument('--commit', '-c', dest='commit', action='store_true', help='Commit the changes')
parser.add_argument('packages', help='JSON file containing the list of package names and their update scripts')
if __name__ == '__main__':
args = parser.parse_args()
try:
main(args.max_workers, args.keep_going, args.commit, args.packages)
except KeyboardInterrupt as e:
# Let’s cancel outside of the main loop too.
sys.exit(130)
|
en
| 0.83892
|
Emulate check argument of subprocess.run function. # Ensure the worktree is clean before update. # Update scripts can use $(dirname $0) to get their location but we want to run # their clones in the git worktree, not in the main nixpkgs repo. # Git can only handle a single index operation at a time # Try to fill in missing attributes when there is just a single change. # Dynamic data from updater take precedence over static data from passthru.updateScript. # update.nix is always passing attrPath # update.nix is always passing oldVersion # A sentinel received, we are done. # Do not create more workers than there are packages. # Set up temporary directories when using auto-commit. # Fill up an update queue, # Add sentinels, one for each worker. # A workers will terminate when it gets sentinel from the queue. # Prepare updater workers for each temp_dir directory. # At most `num_workers` instances of `run_update_script` will be running at one time. # Start updater workers. # When one worker is cancelled, cancel the others too. # Let’s cancel outside of the main loop too.
| 2.307205
| 2
|
src/tests/test_normalize.py
|
Shashvatb/pythia
| 84
|
6626988
|
import py.test
from src.utils import normalize
import subprocess
import os
'''
TEST FROM PYTHIA
'''
def test_empty_string():
assert normalize.normalize_and_remove_stop_words("") == ""
def test_links():
assert normalize.remove_links("my link is <http://link.com>") == "my link is <http:>"
def test_HTML():
assert normalize.normalize_and_remove_stop_words("<p> <title=cats> cats pounce </p>") == "cats pounce"
def test_letters():
assert normalize.normalize_and_remove_stop_words("19 cats&dogs don't eat?") == "cats dogs don t eat"
def test_lower_case():
assert normalize.normalize_and_remove_stop_words("Hi BillyBob JOE") == "hi billybob joe"
def test_stop_words_text():
assert normalize.normalize_and_remove_stop_words("the cat has name") == "cat"
def test_stop_words():
assert normalize.remove_stop_words(["the", "fool", "is", "mine"]) == ["fool"]
assert normalize.remove_stop_words(["the", "fool", "is", "mine"], ["fool"]) == ['the', 'is', 'mine']
def test_xml_normalize():
assert normalize.xml_normalize("my link is <http://link.com>. Enough.") == 'my link is . enough.'
def test_combo():
assert normalize.normalize_and_remove_stop_words("<p> <title=cats> <body> Cats pounce all the time! <http://catlink.com> is a video of cats JUMPING 10 times!! cool, right? </body></p>") == "cats pounce time video cats jumping times cool right"
|
import py.test
from src.utils import normalize
import subprocess
import os
'''
TEST FROM PYTHIA
'''
def test_empty_string():
assert normalize.normalize_and_remove_stop_words("") == ""
def test_links():
assert normalize.remove_links("my link is <http://link.com>") == "my link is <http:>"
def test_HTML():
assert normalize.normalize_and_remove_stop_words("<p> <title=cats> cats pounce </p>") == "cats pounce"
def test_letters():
assert normalize.normalize_and_remove_stop_words("19 cats&dogs don't eat?") == "cats dogs don t eat"
def test_lower_case():
assert normalize.normalize_and_remove_stop_words("Hi BillyBob JOE") == "hi billybob joe"
def test_stop_words_text():
assert normalize.normalize_and_remove_stop_words("the cat has name") == "cat"
def test_stop_words():
assert normalize.remove_stop_words(["the", "fool", "is", "mine"]) == ["fool"]
assert normalize.remove_stop_words(["the", "fool", "is", "mine"], ["fool"]) == ['the', 'is', 'mine']
def test_xml_normalize():
assert normalize.xml_normalize("my link is <http://link.com>. Enough.") == 'my link is . enough.'
def test_combo():
assert normalize.normalize_and_remove_stop_words("<p> <title=cats> <body> Cats pounce all the time! <http://catlink.com> is a video of cats JUMPING 10 times!! cool, right? </body></p>") == "cats pounce time video cats jumping times cool right"
|
en
| 0.39053
|
TEST FROM PYTHIA
| 2.84235
| 3
|
multi_agent_sac/src/multi_agent_sac/test.py
|
redvinaa/multiagent-path-finding-continuous
| 0
|
6626989
|
#! /usr/bin/env python3.6
import argparse
import torch
import os
import numpy as np
from multi_agent_sac.algorithm import MASAC
from multi_agent_sac.misc import from_unit_actions
from multi_agent_sac.env_wrapper import ParallelEnv
from rospkg import RosPack
import json
class TestMASACRos:
def __init__(self, config: dict):
# load config
pkg_path = RosPack().get_path('multi_agent_sac')
self.run_dir = os.path.join(pkg_path, 'runs', config['run_name'])
self.model_dir = os.path.join(self.run_dir, f'run_{config["run_index"]}', 'models')
self.log_dir = os.path.join(self.run_dir, f'run_{config["run_index"]}', 'logs')
with open(os.path.join(self.run_dir, 'config.json'), 'r') as f:
self.c = json.load(f)
# seed from config file
torch.manual_seed(self.c['seed'])
np.random.seed(self.c['seed'])
self.c.update(config)
# create env
maps_dir_path = os.path.join(RosPack().get_path('mapf_environment'), 'maps')
image = os.path.join(maps_dir_path, self.c['map_image'] + '.jpg')
self.c.update({'n_threads': 1, 'map_image': image})
self.env = ParallelEnv(self.c, np.random.default_rng())
# create model, load weights
device = torch.device('cuda' if self.c['device']=='cuda' and
torch.cuda.is_available() else 'cpu')
self.model = MASAC( # most of these are not used
n_agents = self.c['n_agents'],
obs_space = self.env.get_observation_space(),
act_space = self.env.get_action_space(),
gamma = self.c['gamma'],
tau = self.c['tau'],
auto_entropy = self.c['auto_entropy'],
actor_hidden = [self.c['actor_hidden_dim']],
critic_hidden = [self.c['critic_hidden_dim']],
model_dir = self.model_dir,
device = device)
self.model.load()
def run(self) -> type(None):
obs = self.env.reset()
while True:
# get actions
act = np.stack([self.model.step(o, explore=False) for o in obs[0]])
# render
self.env.render(500, False)
# step
obs, _, _, _ = \
self.env.step(
from_unit_actions(act, \
self.c['min_linear_speed'],
self.c['max_linear_speed'], \
self.c['max_angular_speed']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('run_name', default='empty_4x4', nargs='?', type=str,
help='Name of the run to load')
parser.add_argument('run_index', default=0, nargs='?', type=int,
help='Index of the run to load')
parser.add_argument('--seed', default=0, type=int)
config = parser.parse_args()
test = TestMASACRos(vars(config))
test.run()
|
#! /usr/bin/env python3.6
import argparse
import torch
import os
import numpy as np
from multi_agent_sac.algorithm import MASAC
from multi_agent_sac.misc import from_unit_actions
from multi_agent_sac.env_wrapper import ParallelEnv
from rospkg import RosPack
import json
class TestMASACRos:
def __init__(self, config: dict):
# load config
pkg_path = RosPack().get_path('multi_agent_sac')
self.run_dir = os.path.join(pkg_path, 'runs', config['run_name'])
self.model_dir = os.path.join(self.run_dir, f'run_{config["run_index"]}', 'models')
self.log_dir = os.path.join(self.run_dir, f'run_{config["run_index"]}', 'logs')
with open(os.path.join(self.run_dir, 'config.json'), 'r') as f:
self.c = json.load(f)
# seed from config file
torch.manual_seed(self.c['seed'])
np.random.seed(self.c['seed'])
self.c.update(config)
# create env
maps_dir_path = os.path.join(RosPack().get_path('mapf_environment'), 'maps')
image = os.path.join(maps_dir_path, self.c['map_image'] + '.jpg')
self.c.update({'n_threads': 1, 'map_image': image})
self.env = ParallelEnv(self.c, np.random.default_rng())
# create model, load weights
device = torch.device('cuda' if self.c['device']=='cuda' and
torch.cuda.is_available() else 'cpu')
self.model = MASAC( # most of these are not used
n_agents = self.c['n_agents'],
obs_space = self.env.get_observation_space(),
act_space = self.env.get_action_space(),
gamma = self.c['gamma'],
tau = self.c['tau'],
auto_entropy = self.c['auto_entropy'],
actor_hidden = [self.c['actor_hidden_dim']],
critic_hidden = [self.c['critic_hidden_dim']],
model_dir = self.model_dir,
device = device)
self.model.load()
def run(self) -> type(None):
obs = self.env.reset()
while True:
# get actions
act = np.stack([self.model.step(o, explore=False) for o in obs[0]])
# render
self.env.render(500, False)
# step
obs, _, _, _ = \
self.env.step(
from_unit_actions(act, \
self.c['min_linear_speed'],
self.c['max_linear_speed'], \
self.c['max_angular_speed']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('run_name', default='empty_4x4', nargs='?', type=str,
help='Name of the run to load')
parser.add_argument('run_index', default=0, nargs='?', type=int,
help='Index of the run to load')
parser.add_argument('--seed', default=0, type=int)
config = parser.parse_args()
test = TestMASACRos(vars(config))
test.run()
|
en
| 0.638332
|
#! /usr/bin/env python3.6 # load config # seed from config file # create env # create model, load weights # most of these are not used # get actions # render # step
| 1.864146
| 2
|
graphics/dotplot.py
|
yanxueqing621/jcvi
| 1
|
6626990
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
%prog [anchorfile|ksfile] --qbed query.bed --sbed subject.bed
visualize the anchorfile in a dotplot. anchorfile contains two columns
indicating gene pairs, followed by an optional column (e.g. Ks value).
The option --colormap specifies the block color to highlight certain blocks in
a file. Block ids are 1-based (non-digit chars will be removed). For example, below
requests that the 7th blocks to be colored red.
rice-sigma07 sigma
rice-sigma10 tau
Before running this script it is recommended to check/install
TeX Live (http://www.tug.org/texlive/) and
Ghostscript (http://www.ghostscript.com/)
see more here: http://matplotlib.sourceforge.net/users/usetex.html
"""
import os.path as op
import sys
import logging
import string
from random import sample
from jcvi.compara.synteny import AnchorFile, batch_scan, check_beds
from jcvi.utils.cbook import seqid_parse, thousands
from jcvi.apps.base import OptionParser, need_update
from jcvi.graphics.base import plt, Rectangle, set_human_axis, savefig, \
draw_cmap, TextHandler, latex, markup, normalize_axes
class Palette (dict):
def __init__(self, palettefile):
pal = "rbcygmk"
fp = open(palettefile)
for row in fp:
a, b = row.split()
a = "".join(x for x in a if x in string.digits)
a = int(a)
self[a] = b
self.categories = sorted(set(self.values()))
self.colors = dict(zip(self.categories, pal))
logging.debug("Color info ({0} categories) imported for {1} blocks.".\
format(len(self.colors), len(self)))
logging.debug(str(self.colors))
for k, v in self.items(): # Update from categories to colors
self[k] = self.colors[v]
def draw_box(clusters, ax, color="b"):
for cluster in clusters:
xrect, yrect = zip(*cluster)
xmin, xmax, ymin, ymax = min(xrect), max(xrect), \
min(yrect), max(yrect)
ax.add_patch(Rectangle((xmin, ymin), xmax - xmin, ymax - ymin,\
ec=color, fc='y', alpha=.5))
def plot_breaks_and_labels(fig, root, ax, gx, gy, xsize, ysize,
qbreaks, sbreaks, sep=True, chrlw=.1,
sepcolor="g", minfont=5, stdpf=True):
xlim = (0, xsize)
ylim = (ysize, 0) # invert the y-axis
# Tag to mark whether to plot chr name (skip small ones)
xchr_labels, ychr_labels = [], []
th = TextHandler(fig)
# plot the chromosome breaks
for (seqid, beg, end) in qbreaks:
xsize_ratio = abs(end - beg) * .8 / xsize
fontsize = th.select_fontsize(xsize_ratio)
seqid = "".join(seqid_parse(seqid, stdpf=stdpf)[:2])
xchr_labels.append((seqid, (beg + end) / 2, fontsize))
if sep:
ax.plot([beg, beg], ylim, "-", lw=chrlw, color=sepcolor)
for (seqid, beg, end) in sbreaks:
ysize_ratio = abs(end - beg) * .8 / ysize
fontsize = th.select_fontsize(ysize_ratio)
seqid = "".join(seqid_parse(seqid, stdpf=stdpf)[:2])
ychr_labels.append((seqid, (beg + end) / 2, fontsize))
if sep:
ax.plot(xlim, [beg, beg], "-", lw=chrlw, color=sepcolor)
# plot the chromosome labels
for label, pos, fontsize in xchr_labels:
pos = .1 + pos * .8 / xsize
if fontsize >= minfont:
root.text(pos, .91, latex(label), size=fontsize,
ha="center", va="bottom", rotation=45, color="grey")
# remember y labels are inverted
for label, pos, fontsize in ychr_labels:
pos = .9 - pos * .8 / ysize
if fontsize >= minfont:
root.text(.91, pos, latex(label), size=fontsize,
va="center", color="grey")
# Plot the frame
ax.plot(xlim, [0, 0], "-", lw=chrlw, color=sepcolor)
ax.plot(xlim, [ysize, ysize], "-", lw=chrlw, color=sepcolor)
ax.plot([0, 0], ylim, "-", lw=chrlw, color=sepcolor)
ax.plot([xsize, xsize], ylim, "-", lw=chrlw, color=sepcolor)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xlabel(gx, size=16)
ax.set_ylabel(gy, size=16)
# beautify the numeric axis
for tick in ax.get_xticklines() + ax.get_yticklines():
tick.set_visible(False)
set_human_axis(ax)
plt.setp(ax.get_xticklabels() + ax.get_yticklabels(),
color='gray', size=10)
return xlim, ylim
def downsample(data, sample_number=10000):
npairs = len(data)
# Only show random subset
if npairs > sample_number:
logging.debug("Showing a random subset of {0} data points (total {1}) " \
"for clarity.".format(sample_number, npairs))
data = sample(data, sample_number)
return npairs
def dotplot(anchorfile, qbed, sbed, fig, root, ax, vmin=0, vmax=1,
is_self=False, synteny=False, cmap_text=None, cmap="copper",
genomenames=None, sample_number=10000, minfont=5, palette=None,
chrlw=.1, title=None, sep=True, sepcolor="g", stdpf=True):
fp = open(anchorfile)
# add genome names
if genomenames:
gx, gy = genomenames.split("_")
else:
to_ax_label = lambda fname: op.basename(fname).split(".")[0]
gx, gy = [to_ax_label(x.filename) for x in (qbed, sbed)]
gx, gy = markup(gx), markup(gy)
qorder = qbed.order
sorder = sbed.order
data = []
if cmap_text:
logging.debug("Capping values within [{0:.1f}, {1:.1f}]"\
.format(vmin, vmax))
block_id = 0
for row in fp:
atoms = row.split()
block_color = None
if row[0] == "#":
block_id += 1
if palette:
block_color = palette.get(block_id, "k")
continue
# first two columns are query and subject, and an optional third column
if len(atoms) < 2:
continue
query, subject = atoms[:2]
value = atoms[-1]
if cmap_text:
try:
value = float(value)
except ValueError:
value = vmax
if value < vmin:
continue
if value > vmax:
continue
else:
value = 0
if query not in qorder:
continue
if subject not in sorder:
continue
qi, q = qorder[query]
si, s = sorder[subject]
nv = value if block_color is None else block_color
data.append((qi, si, nv))
if is_self: # Mirror image
data.append((si, qi, nv))
npairs = downsample(data, sample_number=sample_number)
x, y, c = zip(*data)
if palette:
ax.scatter(x, y, c=c, edgecolors="none", s=2, lw=0)
else:
ax.scatter(x, y, c=c, edgecolors="none", s=2, lw=0, cmap=cmap,
vmin=vmin, vmax=vmax)
if synteny:
clusters = batch_scan(data, qbed, sbed)
draw_box(clusters, ax)
if cmap_text:
draw_cmap(root, cmap_text, vmin, vmax, cmap=cmap)
xsize, ysize = len(qbed), len(sbed)
logging.debug("xsize=%d ysize=%d" % (xsize, ysize))
qbreaks = qbed.get_breaks()
sbreaks = sbed.get_breaks()
xlim, ylim = plot_breaks_and_labels(fig, root, ax, gx, gy, xsize, ysize,
qbreaks, sbreaks, sep=sep, chrlw=chrlw,
sepcolor=sepcolor, minfont=minfont, stdpf=stdpf)
# create a diagonal to separate mirror image for self comparison
if is_self:
ax.plot(xlim, (0, ysize), 'm-', alpha=.5, lw=2)
if palette: # bottom-left has the palette, if available
colors = palette.colors
xstart, ystart = .1, .05
for category, c in sorted(colors.items()):
root.add_patch(Rectangle((xstart, ystart), .03, .02, lw=0, fc=c))
root.text(xstart + .04, ystart, category, color=c)
xstart += .1
if title is None:
title = "Inter-genomic comparison: {0} vs {1}".format(gx, gy)
if is_self:
title = "Intra-genomic comparison within {0}".format(gx)
npairs /= 2
title += " ({0} gene pairs)".format(thousands(npairs))
root.set_title(title, x=.5, y=.96, color="k")
if title:
logging.debug("Dot plot title: {}".format(title))
normalize_axes(root)
def subset_bed(bed, seqids):
from copy import deepcopy
newbed = deepcopy(bed)
del newbed[:]
for b in bed:
if b.seqid not in seqids:
continue
newbed.append(b)
return newbed
def dotplot_main(args):
p = OptionParser(__doc__)
p.set_beds()
p.add_option("--synteny", default=False, action="store_true",
help="Run a fast synteny scan and display blocks [default: %default]")
p.add_option("--cmaptext", help="Draw colormap box on the bottom-left corner")
p.add_option("--vmin", dest="vmin", type="float", default=0,
help="Minimum value in the colormap [default: %default]")
p.add_option("--vmax", dest="vmax", type="float", default=2,
help="Maximum value in the colormap [default: %default]")
p.add_option("--genomenames", type="string", default=None,
help="genome names for labeling axes in the form of qname_sname, " \
"eg. \"Vitis vinifera_Oryza sativa\"")
p.add_option("--nmax", dest="sample_number", type="int", default=10000,
help="Maximum number of data points to plot [default: %default]")
p.add_option("--minfont", type="int", default=4,
help="Do not render labels with size smaller than")
p.add_option("--colormap",
help="Two column file, block id to color mapping [default: %default]")
p.add_option("--nosort", default=False, action="store_true",
help="Do not sort the seqids along the axes")
p.add_option("--nosep", default=False, action="store_true",
help="Do not add contig lines")
p.add_option("--nostdpf", default=False, action="store_true",
help="Do not standardize contig names")
p.add_option("--skipempty", default=False, action="store_true",
help="Skip seqids that do not have matches")
p.add_option("--title", help="Title of the dot plot")
p.set_outfile(outfile=None)
opts, args, iopts = p.set_image_options(args, figsize="8x8",
style="dark", dpi=90, cmap="copper")
if len(args) != 1:
sys.exit(not p.print_help())
palette = opts.colormap
if palette:
palette = Palette(palette)
anchorfile, = args
cmaptext = opts.cmaptext
if anchorfile.endswith(".ks"):
from jcvi.apps.ks import KsFile
logging.debug("Anchors contain Ks values")
cmaptext = cmaptext or "*Ks* values"
anchorksfile = anchorfile + ".anchors"
if need_update(anchorfile, anchorksfile):
ksfile = KsFile(anchorfile)
ksfile.print_to_anchors(anchorksfile)
anchorfile = anchorksfile
qbed, sbed, qorder, sorder, is_self = check_beds(anchorfile, p, opts,
sorted=(not opts.nosort))
if opts.skipempty:
ac = AnchorFile(anchorfile)
if is_self:
qseqids = sseqids = set()
else:
qseqids, sseqids = set(), set()
for pair in ac.iter_pairs():
q, s = pair[:2]
qi, q = qorder[q]
si, s = sorder[s]
qseqids.add(q.seqid)
sseqids.add(s.seqid)
if is_self:
qbed = sbed = subset_bed(qbed, qseqids)
else:
qbed = subset_bed(qbed, qseqids)
sbed = subset_bed(sbed, sseqids)
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1]) # the whole canvas
ax = fig.add_axes([.1, .1, .8, .8]) # the dot plot
dotplot(anchorfile, qbed, sbed, fig, root, ax,
vmin=opts.vmin, vmax=opts.vmax, is_self=is_self,
synteny=opts.synteny, cmap_text=opts.cmaptext, cmap=iopts.cmap,
genomenames=opts.genomenames, sample_number=opts.sample_number,
minfont=opts.minfont, palette=palette, sep=(not opts.nosep),
title=opts.title, stdpf=(not opts.nostdpf))
image_name = opts.outfile or \
(op.splitext(anchorfile)[0] + "." + opts.format)
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
fig.clear()
if __name__ == "__main__":
dotplot_main(sys.argv[1:])
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
%prog [anchorfile|ksfile] --qbed query.bed --sbed subject.bed
visualize the anchorfile in a dotplot. anchorfile contains two columns
indicating gene pairs, followed by an optional column (e.g. Ks value).
The option --colormap specifies the block color to highlight certain blocks in
a file. Block ids are 1-based (non-digit chars will be removed). For example, below
requests that the 7th blocks to be colored red.
rice-sigma07 sigma
rice-sigma10 tau
Before running this script it is recommended to check/install
TeX Live (http://www.tug.org/texlive/) and
Ghostscript (http://www.ghostscript.com/)
see more here: http://matplotlib.sourceforge.net/users/usetex.html
"""
import os.path as op
import sys
import logging
import string
from random import sample
from jcvi.compara.synteny import AnchorFile, batch_scan, check_beds
from jcvi.utils.cbook import seqid_parse, thousands
from jcvi.apps.base import OptionParser, need_update
from jcvi.graphics.base import plt, Rectangle, set_human_axis, savefig, \
draw_cmap, TextHandler, latex, markup, normalize_axes
class Palette (dict):
def __init__(self, palettefile):
pal = "rbcygmk"
fp = open(palettefile)
for row in fp:
a, b = row.split()
a = "".join(x for x in a if x in string.digits)
a = int(a)
self[a] = b
self.categories = sorted(set(self.values()))
self.colors = dict(zip(self.categories, pal))
logging.debug("Color info ({0} categories) imported for {1} blocks.".\
format(len(self.colors), len(self)))
logging.debug(str(self.colors))
for k, v in self.items(): # Update from categories to colors
self[k] = self.colors[v]
def draw_box(clusters, ax, color="b"):
for cluster in clusters:
xrect, yrect = zip(*cluster)
xmin, xmax, ymin, ymax = min(xrect), max(xrect), \
min(yrect), max(yrect)
ax.add_patch(Rectangle((xmin, ymin), xmax - xmin, ymax - ymin,\
ec=color, fc='y', alpha=.5))
def plot_breaks_and_labels(fig, root, ax, gx, gy, xsize, ysize,
qbreaks, sbreaks, sep=True, chrlw=.1,
sepcolor="g", minfont=5, stdpf=True):
xlim = (0, xsize)
ylim = (ysize, 0) # invert the y-axis
# Tag to mark whether to plot chr name (skip small ones)
xchr_labels, ychr_labels = [], []
th = TextHandler(fig)
# plot the chromosome breaks
for (seqid, beg, end) in qbreaks:
xsize_ratio = abs(end - beg) * .8 / xsize
fontsize = th.select_fontsize(xsize_ratio)
seqid = "".join(seqid_parse(seqid, stdpf=stdpf)[:2])
xchr_labels.append((seqid, (beg + end) / 2, fontsize))
if sep:
ax.plot([beg, beg], ylim, "-", lw=chrlw, color=sepcolor)
for (seqid, beg, end) in sbreaks:
ysize_ratio = abs(end - beg) * .8 / ysize
fontsize = th.select_fontsize(ysize_ratio)
seqid = "".join(seqid_parse(seqid, stdpf=stdpf)[:2])
ychr_labels.append((seqid, (beg + end) / 2, fontsize))
if sep:
ax.plot(xlim, [beg, beg], "-", lw=chrlw, color=sepcolor)
# plot the chromosome labels
for label, pos, fontsize in xchr_labels:
pos = .1 + pos * .8 / xsize
if fontsize >= minfont:
root.text(pos, .91, latex(label), size=fontsize,
ha="center", va="bottom", rotation=45, color="grey")
# remember y labels are inverted
for label, pos, fontsize in ychr_labels:
pos = .9 - pos * .8 / ysize
if fontsize >= minfont:
root.text(.91, pos, latex(label), size=fontsize,
va="center", color="grey")
# Plot the frame
ax.plot(xlim, [0, 0], "-", lw=chrlw, color=sepcolor)
ax.plot(xlim, [ysize, ysize], "-", lw=chrlw, color=sepcolor)
ax.plot([0, 0], ylim, "-", lw=chrlw, color=sepcolor)
ax.plot([xsize, xsize], ylim, "-", lw=chrlw, color=sepcolor)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xlabel(gx, size=16)
ax.set_ylabel(gy, size=16)
# beautify the numeric axis
for tick in ax.get_xticklines() + ax.get_yticklines():
tick.set_visible(False)
set_human_axis(ax)
plt.setp(ax.get_xticklabels() + ax.get_yticklabels(),
color='gray', size=10)
return xlim, ylim
def downsample(data, sample_number=10000):
npairs = len(data)
# Only show random subset
if npairs > sample_number:
logging.debug("Showing a random subset of {0} data points (total {1}) " \
"for clarity.".format(sample_number, npairs))
data = sample(data, sample_number)
return npairs
def dotplot(anchorfile, qbed, sbed, fig, root, ax, vmin=0, vmax=1,
is_self=False, synteny=False, cmap_text=None, cmap="copper",
genomenames=None, sample_number=10000, minfont=5, palette=None,
chrlw=.1, title=None, sep=True, sepcolor="g", stdpf=True):
fp = open(anchorfile)
# add genome names
if genomenames:
gx, gy = genomenames.split("_")
else:
to_ax_label = lambda fname: op.basename(fname).split(".")[0]
gx, gy = [to_ax_label(x.filename) for x in (qbed, sbed)]
gx, gy = markup(gx), markup(gy)
qorder = qbed.order
sorder = sbed.order
data = []
if cmap_text:
logging.debug("Capping values within [{0:.1f}, {1:.1f}]"\
.format(vmin, vmax))
block_id = 0
for row in fp:
atoms = row.split()
block_color = None
if row[0] == "#":
block_id += 1
if palette:
block_color = palette.get(block_id, "k")
continue
# first two columns are query and subject, and an optional third column
if len(atoms) < 2:
continue
query, subject = atoms[:2]
value = atoms[-1]
if cmap_text:
try:
value = float(value)
except ValueError:
value = vmax
if value < vmin:
continue
if value > vmax:
continue
else:
value = 0
if query not in qorder:
continue
if subject not in sorder:
continue
qi, q = qorder[query]
si, s = sorder[subject]
nv = value if block_color is None else block_color
data.append((qi, si, nv))
if is_self: # Mirror image
data.append((si, qi, nv))
npairs = downsample(data, sample_number=sample_number)
x, y, c = zip(*data)
if palette:
ax.scatter(x, y, c=c, edgecolors="none", s=2, lw=0)
else:
ax.scatter(x, y, c=c, edgecolors="none", s=2, lw=0, cmap=cmap,
vmin=vmin, vmax=vmax)
if synteny:
clusters = batch_scan(data, qbed, sbed)
draw_box(clusters, ax)
if cmap_text:
draw_cmap(root, cmap_text, vmin, vmax, cmap=cmap)
xsize, ysize = len(qbed), len(sbed)
logging.debug("xsize=%d ysize=%d" % (xsize, ysize))
qbreaks = qbed.get_breaks()
sbreaks = sbed.get_breaks()
xlim, ylim = plot_breaks_and_labels(fig, root, ax, gx, gy, xsize, ysize,
qbreaks, sbreaks, sep=sep, chrlw=chrlw,
sepcolor=sepcolor, minfont=minfont, stdpf=stdpf)
# create a diagonal to separate mirror image for self comparison
if is_self:
ax.plot(xlim, (0, ysize), 'm-', alpha=.5, lw=2)
if palette: # bottom-left has the palette, if available
colors = palette.colors
xstart, ystart = .1, .05
for category, c in sorted(colors.items()):
root.add_patch(Rectangle((xstart, ystart), .03, .02, lw=0, fc=c))
root.text(xstart + .04, ystart, category, color=c)
xstart += .1
if title is None:
title = "Inter-genomic comparison: {0} vs {1}".format(gx, gy)
if is_self:
title = "Intra-genomic comparison within {0}".format(gx)
npairs /= 2
title += " ({0} gene pairs)".format(thousands(npairs))
root.set_title(title, x=.5, y=.96, color="k")
if title:
logging.debug("Dot plot title: {}".format(title))
normalize_axes(root)
def subset_bed(bed, seqids):
from copy import deepcopy
newbed = deepcopy(bed)
del newbed[:]
for b in bed:
if b.seqid not in seqids:
continue
newbed.append(b)
return newbed
def dotplot_main(args):
p = OptionParser(__doc__)
p.set_beds()
p.add_option("--synteny", default=False, action="store_true",
help="Run a fast synteny scan and display blocks [default: %default]")
p.add_option("--cmaptext", help="Draw colormap box on the bottom-left corner")
p.add_option("--vmin", dest="vmin", type="float", default=0,
help="Minimum value in the colormap [default: %default]")
p.add_option("--vmax", dest="vmax", type="float", default=2,
help="Maximum value in the colormap [default: %default]")
p.add_option("--genomenames", type="string", default=None,
help="genome names for labeling axes in the form of qname_sname, " \
"eg. \"Vitis vinifera_Oryza sativa\"")
p.add_option("--nmax", dest="sample_number", type="int", default=10000,
help="Maximum number of data points to plot [default: %default]")
p.add_option("--minfont", type="int", default=4,
help="Do not render labels with size smaller than")
p.add_option("--colormap",
help="Two column file, block id to color mapping [default: %default]")
p.add_option("--nosort", default=False, action="store_true",
help="Do not sort the seqids along the axes")
p.add_option("--nosep", default=False, action="store_true",
help="Do not add contig lines")
p.add_option("--nostdpf", default=False, action="store_true",
help="Do not standardize contig names")
p.add_option("--skipempty", default=False, action="store_true",
help="Skip seqids that do not have matches")
p.add_option("--title", help="Title of the dot plot")
p.set_outfile(outfile=None)
opts, args, iopts = p.set_image_options(args, figsize="8x8",
style="dark", dpi=90, cmap="copper")
if len(args) != 1:
sys.exit(not p.print_help())
palette = opts.colormap
if palette:
palette = Palette(palette)
anchorfile, = args
cmaptext = opts.cmaptext
if anchorfile.endswith(".ks"):
from jcvi.apps.ks import KsFile
logging.debug("Anchors contain Ks values")
cmaptext = cmaptext or "*Ks* values"
anchorksfile = anchorfile + ".anchors"
if need_update(anchorfile, anchorksfile):
ksfile = KsFile(anchorfile)
ksfile.print_to_anchors(anchorksfile)
anchorfile = anchorksfile
qbed, sbed, qorder, sorder, is_self = check_beds(anchorfile, p, opts,
sorted=(not opts.nosort))
if opts.skipempty:
ac = AnchorFile(anchorfile)
if is_self:
qseqids = sseqids = set()
else:
qseqids, sseqids = set(), set()
for pair in ac.iter_pairs():
q, s = pair[:2]
qi, q = qorder[q]
si, s = sorder[s]
qseqids.add(q.seqid)
sseqids.add(s.seqid)
if is_self:
qbed = sbed = subset_bed(qbed, qseqids)
else:
qbed = subset_bed(qbed, qseqids)
sbed = subset_bed(sbed, sseqids)
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1]) # the whole canvas
ax = fig.add_axes([.1, .1, .8, .8]) # the dot plot
dotplot(anchorfile, qbed, sbed, fig, root, ax,
vmin=opts.vmin, vmax=opts.vmax, is_self=is_self,
synteny=opts.synteny, cmap_text=opts.cmaptext, cmap=iopts.cmap,
genomenames=opts.genomenames, sample_number=opts.sample_number,
minfont=opts.minfont, palette=palette, sep=(not opts.nosep),
title=opts.title, stdpf=(not opts.nostdpf))
image_name = opts.outfile or \
(op.splitext(anchorfile)[0] + "." + opts.format)
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
fig.clear()
if __name__ == "__main__":
dotplot_main(sys.argv[1:])
|
en
| 0.592183
|
#!/usr/bin/env python # -*- coding: UTF-8 -*- %prog [anchorfile|ksfile] --qbed query.bed --sbed subject.bed visualize the anchorfile in a dotplot. anchorfile contains two columns indicating gene pairs, followed by an optional column (e.g. Ks value). The option --colormap specifies the block color to highlight certain blocks in a file. Block ids are 1-based (non-digit chars will be removed). For example, below requests that the 7th blocks to be colored red. rice-sigma07 sigma rice-sigma10 tau Before running this script it is recommended to check/install TeX Live (http://www.tug.org/texlive/) and Ghostscript (http://www.ghostscript.com/) see more here: http://matplotlib.sourceforge.net/users/usetex.html # Update from categories to colors # invert the y-axis # Tag to mark whether to plot chr name (skip small ones) # plot the chromosome breaks # plot the chromosome labels # remember y labels are inverted # Plot the frame # beautify the numeric axis # Only show random subset # add genome names # first two columns are query and subject, and an optional third column # Mirror image # create a diagonal to separate mirror image for self comparison # bottom-left has the palette, if available # the whole canvas # the dot plot
| 2.80923
| 3
|
core.py
|
gizemiskender/MalwareDetector
| 43
|
6626991
|
from androguard.core.bytecodes.dvm import DalvikVMFormat
from androguard.core.analysis.analysis import VMAnalysis
from androguard.decompiler.decompiler import DecompilerDAD
from androguard.core.bytecodes.apk import APK
from androguard.core.analysis import analysis
from androguard.core.bytecodes import dvm
from constants import SPECIAL_STRINGS, DB_REGEX, API_CALLS, PERMISSIONS
import math
import hashlib
def extract_features(file_path):
result = {}
try:
a = APK(file_path)
d = DalvikVMFormat(a.get_dex())
dx = VMAnalysis(d)
vm = dvm.DalvikVMFormat(a.get_dex())
vmx = analysis.uVMAnalysis(vm)
d.set_vmanalysis(dx)
d.set_decompiler(DecompilerDAD(d, dx))
except:
return None
result['android_version_code'] = a.get_androidversion_code()
result['android_version_name'] = a.get_androidversion_name()
result['max_sdk'] = a.get_max_sdk_version()
result['min_sdk'] = a.get_min_sdk_version()
result['libraries'] = a.get_libraries()
result['filename'] = a.get_filename()
result['target_sdk'] = a.get_target_sdk_version()
result['md5'] = hashlib.md5(a.get_raw()).hexdigest()
result['sha256'] = hashlib.sha256(a.get_raw()).hexdigest()
result['permissions'] = a.get_permissions()
result['activities'] = a.get_activities()
result['providers'] = a.get_providers()
result['services'] = a.get_services()
result['strings'] = d.get_strings()
result['class_names'] = [c.get_name() for c in d.get_classes()]
result['method_names'] = [m.get_name() for m in d.get_methods()]
result['field_names'] = [f.get_name() for f in d.get_fields()]
result['is_native_code'] = 1 if analysis.is_native_code(dx) else 0
result['is_obfuscation'] = 1 if analysis.is_ascii_obfuscation(d) else 0
result['is_crypto_code'] = 1 if analysis.is_crypto_code(dx) else 0
result['is_dyn_code'] = 1 if analysis.is_dyn_code(dx) else 0
result['is_reflection_code'] = 1 if analysis.is_reflection_code(vmx) else 0
result['is_database'] = 1 if d.get_regex_strings(DB_REGEX) else 0
s_list = []
s_list.extend(result['class_names'])
s_list.extend(result['method_names'])
s_list.extend(result['field_names'])
result['entropy_rate'] = entropy_rate(s_list)
result['feature_vectors'] = {}
result['feature_vectors']['api_calls'] = []
for call in API_CALLS:
status = 1 if dx.tainted_packages.search_methods(".", call, ".") else 0
result['feature_vectors']['api_calls'].append(status)
result['feature_vectors']['permissions'] = []
for permission in PERMISSIONS:
status = 1 if permission in result['permissions'] else 0
result['feature_vectors']['permissions'].append(status)
result['feature_vectors']['special_strings'] = []
for word in SPECIAL_STRINGS:
status = 1 if d.get_regex_strings(word) else 0
result['feature_vectors']['special_strings'].append(status)
result['feature_vectors']['others'] = [
result['is_reflection_code'],
result['is_crypto_code'],
result['is_native_code'],
result['is_obfuscation'],
result['is_database'],
result['is_dyn_code']
]
return result
def entropy_rate(data):
for s in data:
prob = [float(s.count(c)) / len(s) for c in dict.fromkeys(list(s))]
entropy = - sum([p * math.log(p) / math.log(2.0) for p in prob])
p = 1.0 / len(data)
idealize = -1.0 * len(data) * p * math.log(p) / math.log(2.0)
return round((abs(idealize) - entropy) / idealize, 2)
def create_vector_multiple(data):
feature_vector = []
target_vector = []
for apk in data:
apk_vector = []
apk_vector.extend(apk['feature_vectors']['permissions'])
apk_vector.extend(apk['feature_vectors']['api_calls'])
apk_vector.extend(apk['feature_vectors']['special_strings'])
# apk_vector.extend(apk['feature_vectors']['others'])
entropy_rate = int(apk['entropy_rate'])
native = int(apk['is_crypto_code'])
db = int(apk['is_database'])
apk_vector.append(entropy_rate)
apk_vector.append(native)
apk_vector.append(db)
target_type = 1 if apk['data_type'] == 'malware' else 0
feature_vector.append(apk_vector)
target_vector.append(target_type)
return feature_vector, target_vector
def create_vector_single(apk):
feature_vector = []
feature_vector.extend(apk['feature_vectors']['permissions'])
feature_vector.extend(apk['feature_vectors']['api_calls'])
feature_vector.extend(apk['feature_vectors']['special_strings'])
# feature_vector.extend(apk['feature_vectors']['others'])
entropy_rate = int(apk['entropy_rate'])
native = int(apk['is_crypto_code'])
db = int(apk['is_database'])
feature_vector.append(entropy_rate)
feature_vector.append(native)
feature_vector.append(db)
return feature_vector
|
from androguard.core.bytecodes.dvm import DalvikVMFormat
from androguard.core.analysis.analysis import VMAnalysis
from androguard.decompiler.decompiler import DecompilerDAD
from androguard.core.bytecodes.apk import APK
from androguard.core.analysis import analysis
from androguard.core.bytecodes import dvm
from constants import SPECIAL_STRINGS, DB_REGEX, API_CALLS, PERMISSIONS
import math
import hashlib
def extract_features(file_path):
result = {}
try:
a = APK(file_path)
d = DalvikVMFormat(a.get_dex())
dx = VMAnalysis(d)
vm = dvm.DalvikVMFormat(a.get_dex())
vmx = analysis.uVMAnalysis(vm)
d.set_vmanalysis(dx)
d.set_decompiler(DecompilerDAD(d, dx))
except:
return None
result['android_version_code'] = a.get_androidversion_code()
result['android_version_name'] = a.get_androidversion_name()
result['max_sdk'] = a.get_max_sdk_version()
result['min_sdk'] = a.get_min_sdk_version()
result['libraries'] = a.get_libraries()
result['filename'] = a.get_filename()
result['target_sdk'] = a.get_target_sdk_version()
result['md5'] = hashlib.md5(a.get_raw()).hexdigest()
result['sha256'] = hashlib.sha256(a.get_raw()).hexdigest()
result['permissions'] = a.get_permissions()
result['activities'] = a.get_activities()
result['providers'] = a.get_providers()
result['services'] = a.get_services()
result['strings'] = d.get_strings()
result['class_names'] = [c.get_name() for c in d.get_classes()]
result['method_names'] = [m.get_name() for m in d.get_methods()]
result['field_names'] = [f.get_name() for f in d.get_fields()]
result['is_native_code'] = 1 if analysis.is_native_code(dx) else 0
result['is_obfuscation'] = 1 if analysis.is_ascii_obfuscation(d) else 0
result['is_crypto_code'] = 1 if analysis.is_crypto_code(dx) else 0
result['is_dyn_code'] = 1 if analysis.is_dyn_code(dx) else 0
result['is_reflection_code'] = 1 if analysis.is_reflection_code(vmx) else 0
result['is_database'] = 1 if d.get_regex_strings(DB_REGEX) else 0
s_list = []
s_list.extend(result['class_names'])
s_list.extend(result['method_names'])
s_list.extend(result['field_names'])
result['entropy_rate'] = entropy_rate(s_list)
result['feature_vectors'] = {}
result['feature_vectors']['api_calls'] = []
for call in API_CALLS:
status = 1 if dx.tainted_packages.search_methods(".", call, ".") else 0
result['feature_vectors']['api_calls'].append(status)
result['feature_vectors']['permissions'] = []
for permission in PERMISSIONS:
status = 1 if permission in result['permissions'] else 0
result['feature_vectors']['permissions'].append(status)
result['feature_vectors']['special_strings'] = []
for word in SPECIAL_STRINGS:
status = 1 if d.get_regex_strings(word) else 0
result['feature_vectors']['special_strings'].append(status)
result['feature_vectors']['others'] = [
result['is_reflection_code'],
result['is_crypto_code'],
result['is_native_code'],
result['is_obfuscation'],
result['is_database'],
result['is_dyn_code']
]
return result
def entropy_rate(data):
for s in data:
prob = [float(s.count(c)) / len(s) for c in dict.fromkeys(list(s))]
entropy = - sum([p * math.log(p) / math.log(2.0) for p in prob])
p = 1.0 / len(data)
idealize = -1.0 * len(data) * p * math.log(p) / math.log(2.0)
return round((abs(idealize) - entropy) / idealize, 2)
def create_vector_multiple(data):
feature_vector = []
target_vector = []
for apk in data:
apk_vector = []
apk_vector.extend(apk['feature_vectors']['permissions'])
apk_vector.extend(apk['feature_vectors']['api_calls'])
apk_vector.extend(apk['feature_vectors']['special_strings'])
# apk_vector.extend(apk['feature_vectors']['others'])
entropy_rate = int(apk['entropy_rate'])
native = int(apk['is_crypto_code'])
db = int(apk['is_database'])
apk_vector.append(entropy_rate)
apk_vector.append(native)
apk_vector.append(db)
target_type = 1 if apk['data_type'] == 'malware' else 0
feature_vector.append(apk_vector)
target_vector.append(target_type)
return feature_vector, target_vector
def create_vector_single(apk):
feature_vector = []
feature_vector.extend(apk['feature_vectors']['permissions'])
feature_vector.extend(apk['feature_vectors']['api_calls'])
feature_vector.extend(apk['feature_vectors']['special_strings'])
# feature_vector.extend(apk['feature_vectors']['others'])
entropy_rate = int(apk['entropy_rate'])
native = int(apk['is_crypto_code'])
db = int(apk['is_database'])
feature_vector.append(entropy_rate)
feature_vector.append(native)
feature_vector.append(db)
return feature_vector
|
en
| 0.420202
|
# apk_vector.extend(apk['feature_vectors']['others']) # feature_vector.extend(apk['feature_vectors']['others'])
| 1.999262
| 2
|
tutorials/12_NAS_LSTM/dh_project/dh_project/lstm_search/search_space.py
|
sjiang87/tutorials
| 0
|
6626992
|
<reponame>sjiang87/tutorials
import collections
import tensorflow as tf
from deephyper.nas.space import KSearchSpace, AutoKSearchSpace
from deephyper.nas.space.node import ConstantNode, VariableNode
from deephyper.nas.space.op.basic import Tensor
from deephyper.nas.space.op.connect import Connect
from deephyper.nas.space.op.merge import AddByProjecting, AddByPadding, Concatenate
from deephyper.nas.space.op.op1d import Dense, Identity
def add_dense_to_(node):
node.add_op(Identity()) # we do not want to create a layer in this case
activations = [None, tf.nn.relu, tf.nn.tanh, tf.nn.sigmoid]
for units in range(16, 97, 16):
for activation in activations:
node.add_op(Dense(units=units, activation=activation))
def add_lstm_seq_(node):
node.add_op(Identity()) # we do not want to create a layer in this case
#activations = [None, tf.nn.relu, tf.nn.tanh, tf.nn.sigmoid]
for units in range(16, 97, 16):
node.add_op(tf.keras.layers.LSTM(units=units, return_sequences=True))
def add_lstm_oplayer_(node,units):
node.set_op(tf.keras.layers.LSTM(units=units, return_sequences=True))
def add_lstm_(node):
node.add_op(Identity()) # we do not want to create a layer in this case
#activations = [None, tf.nn.relu, tf.nn.tanh, tf.nn.sigmoid]
for units in range(16, 97, 16):
node.add_op(tf.keras.layers.LSTM(units=units, return_sequences=False))
def create_search_space(input_shape=(8,5,),
output_shape=(8,5,),
num_layers=10,
*args, **kwargs):
arch = KSearchSpace(input_shape, output_shape, regression=True)
source = prev_input = arch.input_nodes[0]
# look over skip connections within a range of the 2 previous nodes
anchor_points = collections.deque([source], maxlen=2)
for _ in range(num_layers):
vnode = VariableNode()
add_lstm_seq_(vnode)
arch.connect(prev_input, vnode)
# * Cell output
cell_output = vnode
cmerge = ConstantNode()
cmerge.set_op(AddByProjecting(arch, [cell_output], activation='relu'))
# cmerge.set_op(Concatenate(arch, [cell_output]))
for anchor in anchor_points:
skipco = VariableNode()
skipco.add_op(Tensor([]))
skipco.add_op(Connect(arch, anchor))
arch.connect(skipco, cmerge)
# ! for next iter
prev_input = cmerge
anchor_points.append(prev_input)
# prev_input = cell_output
cnode = ConstantNode()
add_lstm_oplayer_(cnode,5)
arch.connect(prev_input,cnode)
return arch
def test_create_search_space():
"""Generate a random neural network from the search_space definition.
"""
import random
random.seed(10)
from random import random
from tensorflow.keras.utils import plot_model
import tensorflow as tf
tf.random.set_seed(10)
search_space = create_search_space(num_layers=5)
ops = [random() for _ in range(search_space.num_nodes)]
search_space.set_ops(ops)
model = search_space.create_model()
model.summary()
print(f'This search_space needs {len(ops)} choices to generate a neural network.')
if __name__ == '__main__':
test_create_search_space()
|
import collections
import tensorflow as tf
from deephyper.nas.space import KSearchSpace, AutoKSearchSpace
from deephyper.nas.space.node import ConstantNode, VariableNode
from deephyper.nas.space.op.basic import Tensor
from deephyper.nas.space.op.connect import Connect
from deephyper.nas.space.op.merge import AddByProjecting, AddByPadding, Concatenate
from deephyper.nas.space.op.op1d import Dense, Identity
def add_dense_to_(node):
node.add_op(Identity()) # we do not want to create a layer in this case
activations = [None, tf.nn.relu, tf.nn.tanh, tf.nn.sigmoid]
for units in range(16, 97, 16):
for activation in activations:
node.add_op(Dense(units=units, activation=activation))
def add_lstm_seq_(node):
node.add_op(Identity()) # we do not want to create a layer in this case
#activations = [None, tf.nn.relu, tf.nn.tanh, tf.nn.sigmoid]
for units in range(16, 97, 16):
node.add_op(tf.keras.layers.LSTM(units=units, return_sequences=True))
def add_lstm_oplayer_(node,units):
node.set_op(tf.keras.layers.LSTM(units=units, return_sequences=True))
def add_lstm_(node):
node.add_op(Identity()) # we do not want to create a layer in this case
#activations = [None, tf.nn.relu, tf.nn.tanh, tf.nn.sigmoid]
for units in range(16, 97, 16):
node.add_op(tf.keras.layers.LSTM(units=units, return_sequences=False))
def create_search_space(input_shape=(8,5,),
output_shape=(8,5,),
num_layers=10,
*args, **kwargs):
arch = KSearchSpace(input_shape, output_shape, regression=True)
source = prev_input = arch.input_nodes[0]
# look over skip connections within a range of the 2 previous nodes
anchor_points = collections.deque([source], maxlen=2)
for _ in range(num_layers):
vnode = VariableNode()
add_lstm_seq_(vnode)
arch.connect(prev_input, vnode)
# * Cell output
cell_output = vnode
cmerge = ConstantNode()
cmerge.set_op(AddByProjecting(arch, [cell_output], activation='relu'))
# cmerge.set_op(Concatenate(arch, [cell_output]))
for anchor in anchor_points:
skipco = VariableNode()
skipco.add_op(Tensor([]))
skipco.add_op(Connect(arch, anchor))
arch.connect(skipco, cmerge)
# ! for next iter
prev_input = cmerge
anchor_points.append(prev_input)
# prev_input = cell_output
cnode = ConstantNode()
add_lstm_oplayer_(cnode,5)
arch.connect(prev_input,cnode)
return arch
def test_create_search_space():
"""Generate a random neural network from the search_space definition.
"""
import random
random.seed(10)
from random import random
from tensorflow.keras.utils import plot_model
import tensorflow as tf
tf.random.set_seed(10)
search_space = create_search_space(num_layers=5)
ops = [random() for _ in range(search_space.num_nodes)]
search_space.set_ops(ops)
model = search_space.create_model()
model.summary()
print(f'This search_space needs {len(ops)} choices to generate a neural network.')
if __name__ == '__main__':
test_create_search_space()
|
en
| 0.595036
|
# we do not want to create a layer in this case # we do not want to create a layer in this case #activations = [None, tf.nn.relu, tf.nn.tanh, tf.nn.sigmoid] # we do not want to create a layer in this case #activations = [None, tf.nn.relu, tf.nn.tanh, tf.nn.sigmoid] # look over skip connections within a range of the 2 previous nodes # * Cell output # cmerge.set_op(Concatenate(arch, [cell_output])) # ! for next iter # prev_input = cell_output Generate a random neural network from the search_space definition.
| 2.487682
| 2
|
hexmap/render.py
|
yogurt-company/gaia_ai
| 0
|
6626993
|
from abc import ABCMeta, abstractmethod
import pygame
import math
from hexmap.map import Grid, Map, MapUnit
import sys
SQRT3 = math.sqrt( 3 )
class Render( pygame.Surface, metaclass=ABCMeta ):
def __init__( self, map, radius=24, *args, **keywords ):
self.map = map
self.radius = radius
# Colors for the map
self.GRID_COLOR = pygame.Color( 50, 50, 50 )
super().__init__( ( self.width, self.height ), *args, **keywords )
self.cell = [( .5 * self.radius, 0 ),
( 1.5 * self.radius, 0 ),
( 2 * self.radius, SQRT3 / 2 * self.radius ),
( 1.5 * self.radius, SQRT3 * self.radius ),
( .5 * self.radius, SQRT3 * self.radius ),
( 0, SQRT3 / 2 * self.radius )
]
@property
def width( self ):
return self.map.cols * self.radius * 1.5 + self.radius / 2.0
@property
def height( self ):
return ( self.map.rows + .5 ) * self.radius * SQRT3 + 1
def get_surface( self, grid_size ):
"""
Returns a subsurface corresponding to the surface, hopefully with trim_cell wrapped around the blit method.
"""
( row, col ) = grid_size
width = 2 * self.radius
height = self.radius * SQRT3
top = ( row - math.ceil( col / 2.0 ) ) * height + ( height / 2 if col % 2 == 1 else 0 )
left = 1.5 * self.radius * col
return self.subsurface( pygame.Rect( left, top, width, height ) )
# Draw methods
@abstractmethod
def draw( self ):
"""
An abstract base method for various render objects to call to paint
themselves. If called via super, it fills the screen with the colorkey,
if the colorkey is not set, it sets the colorkey to magenta (#FF00FF)
and fills this surface.
"""
color = self.get_colorkey()
if not color:
magenta = pygame.Color( 255, 0, 255 )
self.set_colorkey( magenta )
color = magenta
self.fill( color )
# Identify cell
def get_cell( self, grid_size ):
"""
Identify the cell clicked in terms of row and column
"""
( x, y ) = grid_size
row = math.floor( y / ( SQRT3 * self.radius ) )
col = math.floor( x / ( 1.5 * self.radius ) )
# Determine if cell outside cell centered in this grid.
x = x - col * 1.5 * self.radius
y = y - row * SQRT3 * self.radius
# Transform row to match our hex coordinates, approximately
row = row + math.floor( ( col + 1 ) / 2.0 )
# Correct row and col for boundaries of a hex grid
if col % 2 == 0:
if y < SQRT3 * self.radius / 2 and x < .5 * self.radius and \
y < SQRT3 * self.radius / 2 - x:
row, col = row - 1, col - 1
elif y > SQRT3 * self.radius / 2 and x < .5 * self.radius and \
y > SQRT3 * self.radius / 2 + x:
row, col = row, col - 1
else:
if x < .5 * self.radius and abs( y - SQRT3 * self.radius / 2 ) < SQRT3 * self.radius / 2 - x:
row, col = row - 1 , col - 1
elif y < SQRT3 * self.radius / 2:
row, col = row - 1, col
return ( row, col ) if self.map.valid_cell( ( row, col ) ) else None
def fit_window( self, window ):
top = max( window.get_height() - self.height, 0 )
left = max( window.get_width() - map.width, 0 )
return ( top, left )
class RenderUnits( Render ):
"""
A premade render object that will automatically draw the Units from the map
"""
def __init__( self, map, *args, **keywords ):
super().__init__( map, *args, **keywords )
if not hasattr( self.map, 'units' ):
self.map.units = Grid()
def draw( self ):
"""
Calls unit.paint for all units on self.map
"""
super().draw()
units = self.map.units
for position, unit in list(units.items()):
surface = self.get_surface( position )
unit.paint( surface )
class RenderGrid( Render ):
def draw( self ):
"""
Draws a hex grid, based on the map object, onto this Surface
"""
super().draw()
# A point list describing a single cell, based on the radius of each hex
for col in range( self.map.cols ):
# Alternate the offset of the cells based on column
offset = self.radius * SQRT3 / 2 if col % 2 else 0
for row in range( self.map.rows ):
# Calculate the offset of the cell
top = offset + SQRT3 * row * self.radius
left = 1.5 * col * self.radius
# Create a point list containing the offset cell
points = [( x + left, y + top ) for ( x, y ) in self.cell]
# Draw the polygon onto the surface
pygame.draw.polygon( self, self.GRID_COLOR, points, 1 )
class RenderFog( Render ):
OBSCURED = pygame.Color( 00, 00, 00, 255 )
SEEN = pygame.Color( 00, 00, 00, 100 )
VISIBLE = pygame.Color( 00, 00, 00, 0 )
HIGHLIGHTED = pygame.Color( 00, 255, 00, 110 )
def __init__( self, map, *args, **keywords ):
super().__init__( map, *args, flags=pygame.SRCALPHA, **keywords )
if not hasattr( self.map, 'fog' ):
self.map.fog = Grid( default=self.OBSCURED )
def draw( self ):
#Some constants for the math
height = self.radius * SQRT3
width = 1.5 * self.radius
offset = height / 2
self.fill( self.OBSCURED )
for cell in self.map.cells():
row, col = cell
surface = self.get_cell( cell )
# Calculate the position of the cell
top = row * height - offset * col
left = width * col
#Determine the points that corresponds with
points = [( x + left, y + top ) for ( x, y ) in self.cell]
# Draw the polygon onto the surface
pygame.draw.polygon( self, self.map.fog[ cell ], points, 0 )
def trim_cell( surface ):
pass
if __name__ == '__main__':
class Unit( MapUnit ):
color = pygame.Color( 200, 200, 200 )
def paint( self, surface ):
radius = surface.get_width() / 2
pygame.draw.circle( surface, self.color, ( radius, int( SQRT3 / 2 * radius ) ), int( radius - radius * .3 ) )
m = Map( ( 5, 5 ) )
grid = RenderGrid( m, radius=32 )
units = RenderUnits( m, radius=32 )
fog = RenderFog( m, radius=32 )
m.units[( 0, 0 ) ] = Unit( m )
m.units[( 3, 2 ) ] = Unit( m )
m.units[( 5, 3 ) ] = Unit( m )
m.units[( 5, 4 ) ] = Unit( m )
for cell in m.spread( ( 3, 2 ), radius=2 ):
m.fog[cell] = fog.SEEN
for cell in m.spread( ( 3, 2 ) ):
m.fog[cell] = fog.VISIBLE
print(( m.ascii() ))
try:
pygame.init()
fpsClock = pygame.time.Clock()
window = pygame.display.set_mode( ( 640, 480 ), 1 )
from pygame.locals import QUIT, MOUSEBUTTONDOWN
#Leave it running until exit
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == MOUSEBUTTONDOWN:
print(( units.get_cell( event.pos ) ))
window.fill( pygame.Color( 'white' ) )
grid.draw()
units.draw()
fog.draw()
window.blit( grid, ( 0, 0 ) )
window.blit( units, ( 0, 0 ) )
window.blit( fog, ( 0, 0 ) )
pygame.display.update()
fpsClock.tick( 10 )
finally:
pygame.quit()
|
from abc import ABCMeta, abstractmethod
import pygame
import math
from hexmap.map import Grid, Map, MapUnit
import sys
SQRT3 = math.sqrt( 3 )
class Render( pygame.Surface, metaclass=ABCMeta ):
def __init__( self, map, radius=24, *args, **keywords ):
self.map = map
self.radius = radius
# Colors for the map
self.GRID_COLOR = pygame.Color( 50, 50, 50 )
super().__init__( ( self.width, self.height ), *args, **keywords )
self.cell = [( .5 * self.radius, 0 ),
( 1.5 * self.radius, 0 ),
( 2 * self.radius, SQRT3 / 2 * self.radius ),
( 1.5 * self.radius, SQRT3 * self.radius ),
( .5 * self.radius, SQRT3 * self.radius ),
( 0, SQRT3 / 2 * self.radius )
]
@property
def width( self ):
return self.map.cols * self.radius * 1.5 + self.radius / 2.0
@property
def height( self ):
return ( self.map.rows + .5 ) * self.radius * SQRT3 + 1
def get_surface( self, grid_size ):
"""
Returns a subsurface corresponding to the surface, hopefully with trim_cell wrapped around the blit method.
"""
( row, col ) = grid_size
width = 2 * self.radius
height = self.radius * SQRT3
top = ( row - math.ceil( col / 2.0 ) ) * height + ( height / 2 if col % 2 == 1 else 0 )
left = 1.5 * self.radius * col
return self.subsurface( pygame.Rect( left, top, width, height ) )
# Draw methods
@abstractmethod
def draw( self ):
"""
An abstract base method for various render objects to call to paint
themselves. If called via super, it fills the screen with the colorkey,
if the colorkey is not set, it sets the colorkey to magenta (#FF00FF)
and fills this surface.
"""
color = self.get_colorkey()
if not color:
magenta = pygame.Color( 255, 0, 255 )
self.set_colorkey( magenta )
color = magenta
self.fill( color )
# Identify cell
def get_cell( self, grid_size ):
"""
Identify the cell clicked in terms of row and column
"""
( x, y ) = grid_size
row = math.floor( y / ( SQRT3 * self.radius ) )
col = math.floor( x / ( 1.5 * self.radius ) )
# Determine if cell outside cell centered in this grid.
x = x - col * 1.5 * self.radius
y = y - row * SQRT3 * self.radius
# Transform row to match our hex coordinates, approximately
row = row + math.floor( ( col + 1 ) / 2.0 )
# Correct row and col for boundaries of a hex grid
if col % 2 == 0:
if y < SQRT3 * self.radius / 2 and x < .5 * self.radius and \
y < SQRT3 * self.radius / 2 - x:
row, col = row - 1, col - 1
elif y > SQRT3 * self.radius / 2 and x < .5 * self.radius and \
y > SQRT3 * self.radius / 2 + x:
row, col = row, col - 1
else:
if x < .5 * self.radius and abs( y - SQRT3 * self.radius / 2 ) < SQRT3 * self.radius / 2 - x:
row, col = row - 1 , col - 1
elif y < SQRT3 * self.radius / 2:
row, col = row - 1, col
return ( row, col ) if self.map.valid_cell( ( row, col ) ) else None
def fit_window( self, window ):
top = max( window.get_height() - self.height, 0 )
left = max( window.get_width() - map.width, 0 )
return ( top, left )
class RenderUnits( Render ):
"""
A premade render object that will automatically draw the Units from the map
"""
def __init__( self, map, *args, **keywords ):
super().__init__( map, *args, **keywords )
if not hasattr( self.map, 'units' ):
self.map.units = Grid()
def draw( self ):
"""
Calls unit.paint for all units on self.map
"""
super().draw()
units = self.map.units
for position, unit in list(units.items()):
surface = self.get_surface( position )
unit.paint( surface )
class RenderGrid( Render ):
def draw( self ):
"""
Draws a hex grid, based on the map object, onto this Surface
"""
super().draw()
# A point list describing a single cell, based on the radius of each hex
for col in range( self.map.cols ):
# Alternate the offset of the cells based on column
offset = self.radius * SQRT3 / 2 if col % 2 else 0
for row in range( self.map.rows ):
# Calculate the offset of the cell
top = offset + SQRT3 * row * self.radius
left = 1.5 * col * self.radius
# Create a point list containing the offset cell
points = [( x + left, y + top ) for ( x, y ) in self.cell]
# Draw the polygon onto the surface
pygame.draw.polygon( self, self.GRID_COLOR, points, 1 )
class RenderFog( Render ):
OBSCURED = pygame.Color( 00, 00, 00, 255 )
SEEN = pygame.Color( 00, 00, 00, 100 )
VISIBLE = pygame.Color( 00, 00, 00, 0 )
HIGHLIGHTED = pygame.Color( 00, 255, 00, 110 )
def __init__( self, map, *args, **keywords ):
super().__init__( map, *args, flags=pygame.SRCALPHA, **keywords )
if not hasattr( self.map, 'fog' ):
self.map.fog = Grid( default=self.OBSCURED )
def draw( self ):
#Some constants for the math
height = self.radius * SQRT3
width = 1.5 * self.radius
offset = height / 2
self.fill( self.OBSCURED )
for cell in self.map.cells():
row, col = cell
surface = self.get_cell( cell )
# Calculate the position of the cell
top = row * height - offset * col
left = width * col
#Determine the points that corresponds with
points = [( x + left, y + top ) for ( x, y ) in self.cell]
# Draw the polygon onto the surface
pygame.draw.polygon( self, self.map.fog[ cell ], points, 0 )
def trim_cell( surface ):
pass
if __name__ == '__main__':
class Unit( MapUnit ):
color = pygame.Color( 200, 200, 200 )
def paint( self, surface ):
radius = surface.get_width() / 2
pygame.draw.circle( surface, self.color, ( radius, int( SQRT3 / 2 * radius ) ), int( radius - radius * .3 ) )
m = Map( ( 5, 5 ) )
grid = RenderGrid( m, radius=32 )
units = RenderUnits( m, radius=32 )
fog = RenderFog( m, radius=32 )
m.units[( 0, 0 ) ] = Unit( m )
m.units[( 3, 2 ) ] = Unit( m )
m.units[( 5, 3 ) ] = Unit( m )
m.units[( 5, 4 ) ] = Unit( m )
for cell in m.spread( ( 3, 2 ), radius=2 ):
m.fog[cell] = fog.SEEN
for cell in m.spread( ( 3, 2 ) ):
m.fog[cell] = fog.VISIBLE
print(( m.ascii() ))
try:
pygame.init()
fpsClock = pygame.time.Clock()
window = pygame.display.set_mode( ( 640, 480 ), 1 )
from pygame.locals import QUIT, MOUSEBUTTONDOWN
#Leave it running until exit
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == MOUSEBUTTONDOWN:
print(( units.get_cell( event.pos ) ))
window.fill( pygame.Color( 'white' ) )
grid.draw()
units.draw()
fog.draw()
window.blit( grid, ( 0, 0 ) )
window.blit( units, ( 0, 0 ) )
window.blit( fog, ( 0, 0 ) )
pygame.display.update()
fpsClock.tick( 10 )
finally:
pygame.quit()
|
en
| 0.837968
|
# Colors for the map Returns a subsurface corresponding to the surface, hopefully with trim_cell wrapped around the blit method. # Draw methods An abstract base method for various render objects to call to paint themselves. If called via super, it fills the screen with the colorkey, if the colorkey is not set, it sets the colorkey to magenta (#FF00FF) and fills this surface. # Identify cell Identify the cell clicked in terms of row and column # Determine if cell outside cell centered in this grid. # Transform row to match our hex coordinates, approximately # Correct row and col for boundaries of a hex grid A premade render object that will automatically draw the Units from the map Calls unit.paint for all units on self.map Draws a hex grid, based on the map object, onto this Surface # A point list describing a single cell, based on the radius of each hex # Alternate the offset of the cells based on column # Calculate the offset of the cell # Create a point list containing the offset cell # Draw the polygon onto the surface #Some constants for the math # Calculate the position of the cell #Determine the points that corresponds with # Draw the polygon onto the surface #Leave it running until exit
| 3.552565
| 4
|
cinder/tests/unit/api/contrib/test_consistencygroups.py
|
potsmaster/cinder
| 0
|
6626994
|
<reponame>potsmaster/cinder<gh_stars>0
# Copyright (C) 2012 - 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for consistency group code.
"""
import json
from xml.dom import minidom
import mock
import webob
import cinder.consistencygroup
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit.api.v2 import stubs
from cinder.tests.unit import utils
from cinder.volume import api as volume_api
class ConsistencyGroupsAPITestCase(test.TestCase):
"""Test Case for consistency groups API."""
def setUp(self):
super(ConsistencyGroupsAPITestCase, self).setUp()
self.cg_api = cinder.consistencygroup.API()
self.ctxt = context.RequestContext('fake', 'fake', auth_token=True)
def _create_consistencygroup(
self,
ctxt=None,
name='test_consistencygroup',
description='this is a test consistency group',
volume_type_id='123456',
availability_zone='az1',
host='fakehost',
status='creating'):
"""Create a consistency group object."""
ctxt = ctxt or self.ctxt
consistencygroup = objects.ConsistencyGroup(ctxt)
consistencygroup.user_id = 'fake'
consistencygroup.project_id = 'fake'
consistencygroup.availability_zone = availability_zone
consistencygroup.name = name
consistencygroup.description = description
consistencygroup.volume_type_id = volume_type_id
consistencygroup.host = host
consistencygroup.status = status
consistencygroup.create()
return consistencygroup
def test_show_consistencygroup(self):
consistencygroup = self._create_consistencygroup()
req = webob.Request.blank('/v2/fake/consistencygroups/%s' %
consistencygroup.id)
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual('az1',
res_dict['consistencygroup']['availability_zone'])
self.assertEqual('this is a test consistency group',
res_dict['consistencygroup']['description'])
self.assertEqual('test_consistencygroup',
res_dict['consistencygroup']['name'])
self.assertEqual('creating',
res_dict['consistencygroup']['status'])
consistencygroup.destroy()
def test_show_consistencygroup_xml_content_type(self):
consistencygroup = self._create_consistencygroup()
req = webob.Request.blank('/v2/fake/consistencygroups/%s' %
consistencygroup.id)
req.method = 'GET'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
dom = minidom.parseString(res.body)
consistencygroups = dom.getElementsByTagName('consistencygroup')
name = consistencygroups.item(0).getAttribute('name')
self.assertEqual("test_consistencygroup", name.strip())
consistencygroup.destroy()
def test_show_consistencygroup_with_consistencygroup_NotFound(self):
req = webob.Request.blank('/v2/fake/consistencygroups/9999')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(404, res.status_int)
self.assertEqual(404, res_dict['itemNotFound']['code'])
self.assertEqual('ConsistencyGroup 9999 could not be found.',
res_dict['itemNotFound']['message'])
def test_list_consistencygroups_json(self):
consistencygroup1 = self._create_consistencygroup()
consistencygroup2 = self._create_consistencygroup()
consistencygroup3 = self._create_consistencygroup()
req = webob.Request.blank('/v2/fake/consistencygroups')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual(consistencygroup1.id,
res_dict['consistencygroups'][0]['id'])
self.assertEqual('test_consistencygroup',
res_dict['consistencygroups'][0]['name'])
self.assertEqual(consistencygroup2.id,
res_dict['consistencygroups'][1]['id'])
self.assertEqual('test_consistencygroup',
res_dict['consistencygroups'][1]['name'])
self.assertEqual(consistencygroup3.id,
res_dict['consistencygroups'][2]['id'])
self.assertEqual('test_consistencygroup',
res_dict['consistencygroups'][2]['name'])
consistencygroup1.destroy()
consistencygroup2.destroy()
consistencygroup3.destroy()
def test_list_consistencygroups_xml(self):
consistencygroup1 = self._create_consistencygroup()
consistencygroup2 = self._create_consistencygroup()
consistencygroup3 = self._create_consistencygroup()
req = webob.Request.blank('/v2/fake/consistencygroups')
req.method = 'GET'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
dom = minidom.parseString(res.body)
consistencygroup_list = dom.getElementsByTagName('consistencygroup')
self.assertEqual(consistencygroup1.id,
consistencygroup_list.item(0).getAttribute('id'))
self.assertEqual(consistencygroup2.id,
consistencygroup_list.item(1).getAttribute('id'))
self.assertEqual(consistencygroup3.id,
consistencygroup_list.item(2).getAttribute('id'))
consistencygroup3.destroy()
consistencygroup2.destroy()
consistencygroup1.destroy()
def test_list_consistencygroups_detail_json(self):
consistencygroup1 = self._create_consistencygroup()
consistencygroup2 = self._create_consistencygroup()
consistencygroup3 = self._create_consistencygroup()
req = webob.Request.blank('/v2/fake/consistencygroups/detail')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
req.headers['Accept'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual('az1',
res_dict['consistencygroups'][0]['availability_zone'])
self.assertEqual('this is a test consistency group',
res_dict['consistencygroups'][0]['description'])
self.assertEqual('test_consistencygroup',
res_dict['consistencygroups'][0]['name'])
self.assertEqual(consistencygroup1.id,
res_dict['consistencygroups'][0]['id'])
self.assertEqual('creating',
res_dict['consistencygroups'][0]['status'])
self.assertEqual('az1',
res_dict['consistencygroups'][1]['availability_zone'])
self.assertEqual('this is a test consistency group',
res_dict['consistencygroups'][1]['description'])
self.assertEqual('test_consistencygroup',
res_dict['consistencygroups'][1]['name'])
self.assertEqual(consistencygroup2.id,
res_dict['consistencygroups'][1]['id'])
self.assertEqual('creating',
res_dict['consistencygroups'][1]['status'])
self.assertEqual('az1',
res_dict['consistencygroups'][2]['availability_zone'])
self.assertEqual('this is a test consistency group',
res_dict['consistencygroups'][2]['description'])
self.assertEqual('test_consistencygroup',
res_dict['consistencygroups'][2]['name'])
self.assertEqual(consistencygroup3.id,
res_dict['consistencygroups'][2]['id'])
self.assertEqual('creating',
res_dict['consistencygroups'][2]['status'])
consistencygroup1.destroy()
consistencygroup2.destroy()
consistencygroup3.destroy()
def test_list_consistencygroups_detail_xml(self):
consistencygroup1 = self._create_consistencygroup()
consistencygroup2 = self._create_consistencygroup()
consistencygroup3 = self._create_consistencygroup()
req = webob.Request.blank('/v2/fake/consistencygroups/detail')
req.method = 'GET'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
dom = minidom.parseString(res.body)
consistencygroup_detail = dom.getElementsByTagName('consistencygroup')
self.assertEqual(
'az1',
consistencygroup_detail.item(0).getAttribute('availability_zone'))
self.assertEqual(
'this is a test consistency group',
consistencygroup_detail.item(0).getAttribute('description'))
self.assertEqual(
'test_consistencygroup',
consistencygroup_detail.item(0).getAttribute('name'))
self.assertEqual(
consistencygroup1.id,
consistencygroup_detail.item(0).getAttribute('id'))
self.assertEqual(
'creating',
consistencygroup_detail.item(0).getAttribute('status'))
self.assertEqual(
'az1',
consistencygroup_detail.item(1).getAttribute('availability_zone'))
self.assertEqual(
'this is a test consistency group',
consistencygroup_detail.item(1).getAttribute('description'))
self.assertEqual(
'test_consistencygroup',
consistencygroup_detail.item(1).getAttribute('name'))
self.assertEqual(
consistencygroup2.id,
consistencygroup_detail.item(1).getAttribute('id'))
self.assertEqual(
'creating',
consistencygroup_detail.item(1).getAttribute('status'))
self.assertEqual(
'az1',
consistencygroup_detail.item(2).getAttribute('availability_zone'))
self.assertEqual(
'this is a test consistency group',
consistencygroup_detail.item(2).getAttribute('description'))
self.assertEqual(
'test_consistencygroup',
consistencygroup_detail.item(2).getAttribute('name'))
self.assertEqual(
consistencygroup3.id,
consistencygroup_detail.item(2).getAttribute('id'))
self.assertEqual(
'creating',
consistencygroup_detail.item(2).getAttribute('status'))
consistencygroup3.destroy()
consistencygroup2.destroy()
consistencygroup1.destroy()
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_create_consistencygroup_json(self, mock_validate):
group_id = "1"
# Create volume type
vol_type = 'test'
db.volume_type_create(context.get_admin_context(),
{'name': vol_type, 'extra_specs': {}})
body = {"consistencygroup": {"name": "cg1",
"volume_types": vol_type,
"description":
"Consistency Group 1", }}
req = webob.Request.blank('/v2/fake/consistencygroups')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(202, res.status_int)
self.assertIn('id', res_dict['consistencygroup'])
self.assertTrue(mock_validate.called)
group_id = res_dict['consistencygroup']['id']
cg = objects.ConsistencyGroup.get_by_id(context.get_admin_context(),
group_id)
cg.destroy()
def test_create_consistencygroup_with_no_body(self):
# omit body from the request
req = webob.Request.blank('/v2/fake/consistencygroups')
req.body = json.dumps(None)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.headers['Accept'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
self.assertEqual("Missing required element 'consistencygroup' in "
"request body.",
res_dict['badRequest']['message'])
def test_delete_consistencygroup_available(self):
consistencygroup = self._create_consistencygroup(status='available')
req = webob.Request.blank('/v2/fake/consistencygroups/%s/delete' %
consistencygroup.id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"force": True}}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
consistencygroup = objects.ConsistencyGroup.get_by_id(
self.ctxt, consistencygroup.id)
self.assertEqual(202, res.status_int)
self.assertEqual('deleting', consistencygroup.status)
consistencygroup.destroy()
def test_delete_consistencygroup_with_consistencygroup_NotFound(self):
req = webob.Request.blank('/v2/fake/consistencygroups/9999/delete')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(None)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(404, res.status_int)
self.assertEqual(404, res_dict['itemNotFound']['code'])
self.assertEqual('ConsistencyGroup 9999 could not be found.',
res_dict['itemNotFound']['message'])
def test_delete_consistencygroup_with_Invalidconsistencygroup(self):
consistencygroup = self._create_consistencygroup(status='invalid')
req = webob.Request.blank('/v2/fake/consistencygroups/%s/delete' %
consistencygroup.id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"force": False}}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
msg = (_('Invalid ConsistencyGroup: Consistency group status must be '
'available or error, but current status is: invalid'))
self.assertEqual(msg, res_dict['badRequest']['message'])
consistencygroup.destroy()
def test_delete_consistencygroup_no_host(self):
consistencygroup = self._create_consistencygroup(
host=None,
status='error')
req = webob.Request.blank('/v2/fake/consistencygroups/%s/delete' %
consistencygroup.id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"force": True}}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(202, res.status_int)
cg = objects.ConsistencyGroup.get_by_id(
context.get_admin_context(read_deleted='yes'),
consistencygroup.id)
self.assertEqual('deleted', cg.status)
self.assertIsNone(cg.host)
def test_create_delete_consistencygroup_update_quota(self):
name = 'mycg'
description = 'consistency group 1'
fake_type = {'id': '1', 'name': 'fake_type'}
self.stubs.Set(db, 'volume_types_get_by_name_or_id',
mock.Mock(return_value=[fake_type]))
self.stubs.Set(self.cg_api,
'_cast_create_consistencygroup',
mock.Mock())
self.stubs.Set(self.cg_api, 'update_quota',
mock.Mock())
cg = self.cg_api.create(self.ctxt, name, description,
fake_type['name'])
self.cg_api.update_quota.assert_called_once_with(
self.ctxt, cg, 1)
self.assertEqual('creating', cg.status)
self.assertIsNone(cg.host)
self.cg_api.update_quota.reset_mock()
cg.status = 'error'
self.cg_api.delete(self.ctxt, cg)
self.cg_api.update_quota.assert_called_once_with(
self.ctxt, cg, -1, self.ctxt.project_id)
cg = objects.ConsistencyGroup.get_by_id(
context.get_admin_context(read_deleted='yes'),
cg.id)
self.assertEqual('deleted', cg.status)
def test_delete_consistencygroup_with_invalid_body(self):
consistencygroup = self._create_consistencygroup(status='available')
req = webob.Request.blank('/v2/fake/consistencygroups/%s/delete' %
consistencygroup.id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
body = {"invalid_request_element": {"force": False}}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
def test_delete_consistencygroup_with_invalid_force_value_in_body(self):
consistencygroup = self._create_consistencygroup(status='available')
req = webob.Request.blank('/v2/fake/consistencygroups/%s/delete' %
consistencygroup.id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"force": "abcd"}}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
def test_delete_consistencygroup_with_empty_force_value_in_body(self):
consistencygroup = self._create_consistencygroup(status='available')
req = webob.Request.blank('/v2/fake/consistencygroups/%s/delete' %
consistencygroup.id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"force": ""}}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
def test_create_consistencygroup_failed_no_volume_type(self):
name = 'cg1'
body = {"consistencygroup": {"name": name,
"description":
"Consistency Group 1", }}
req = webob.Request.blank('/v2/fake/consistencygroups')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
msg = (_('volume_types must be provided to create '
'consistency group %s.') % name)
self.assertEqual(msg, res_dict['badRequest']['message'])
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_update_consistencygroup_success(self, mock_validate):
volume_type_id = '123456'
consistencygroup = self._create_consistencygroup(status='available',
host='test_host')
remove_volume_id = utils.create_volume(
self.ctxt,
volume_type_id=volume_type_id,
consistencygroup_id=consistencygroup.id)['id']
remove_volume_id2 = utils.create_volume(
self.ctxt,
volume_type_id=volume_type_id,
consistencygroup_id=consistencygroup.id)['id']
self.assertEqual('available', consistencygroup.status)
cg_volumes = db.volume_get_all_by_group(self.ctxt.elevated(),
consistencygroup.id)
cg_vol_ids = [cg_vol['id'] for cg_vol in cg_volumes]
self.assertIn(remove_volume_id, cg_vol_ids)
self.assertIn(remove_volume_id2, cg_vol_ids)
add_volume_id = utils.create_volume(
self.ctxt,
volume_type_id=volume_type_id)['id']
add_volume_id2 = utils.create_volume(
self.ctxt,
volume_type_id=volume_type_id)['id']
req = webob.Request.blank('/v2/fake/consistencygroups/%s/update' %
consistencygroup.id)
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
name = 'newcg'
description = 'New Consistency Group Description'
add_volumes = add_volume_id + "," + add_volume_id2
remove_volumes = remove_volume_id + "," + remove_volume_id2
body = {"consistencygroup": {"name": name,
"description": description,
"add_volumes": add_volumes,
"remove_volumes": remove_volumes, }}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
consistencygroup = objects.ConsistencyGroup.get_by_id(
self.ctxt, consistencygroup.id)
self.assertEqual(202, res.status_int)
self.assertTrue(mock_validate.called)
self.assertEqual('updating', consistencygroup.status)
consistencygroup.destroy()
def test_update_consistencygroup_add_volume_not_found(self):
consistencygroup = self._create_consistencygroup(ctxt=self.ctxt,
status='available')
req = webob.Request.blank('/v2/fake/consistencygroups/%s/update' %
consistencygroup.id)
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"name": None,
"description": None,
"add_volumes": "fake-volume-uuid",
"remove_volumes": None, }}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
msg = (_("Invalid volume: Cannot add volume fake-volume-uuid "
"to consistency group %(group_id)s because volume cannot "
"be found.") %
{'group_id': consistencygroup.id})
self.assertEqual(msg, res_dict['badRequest']['message'])
consistencygroup.destroy()
def test_update_consistencygroup_remove_volume_not_found(self):
consistencygroup = self._create_consistencygroup(ctxt=self.ctxt,
status='available')
req = webob.Request.blank('/v2/fake/consistencygroups/%s/update' %
consistencygroup.id)
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"name": None,
"description": "new description",
"add_volumes": None,
"remove_volumes": "fake-volume-uuid", }}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
msg = (_("Invalid volume: Cannot remove volume fake-volume-uuid "
"from consistency group %(group_id)s because it is not "
"in the group.") %
{'group_id': consistencygroup.id})
self.assertEqual(msg, res_dict['badRequest']['message'])
consistencygroup.destroy()
def test_update_consistencygroup_empty_parameters(self):
consistencygroup = self._create_consistencygroup(ctxt=self.ctxt,
status='available')
req = webob.Request.blank('/v2/fake/consistencygroups/%s/update' %
consistencygroup.id)
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"name": "",
"description": "",
"add_volumes": None,
"remove_volumes": None, }}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
consistencygroup.destroy()
def test_update_consistencygroup_add_volume_invalid_state(self):
volume_type_id = '123456'
consistencygroup = self._create_consistencygroup(ctxt=self.ctxt,
status='available')
add_volume_id = utils.create_volume(
self.ctxt,
volume_type_id=volume_type_id,
status='wrong_status')['id']
req = webob.Request.blank('/v2/fake/consistencygroups/%s/update' %
consistencygroup.id)
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
add_volumes = add_volume_id
body = {"consistencygroup": {"name": "cg1",
"description": "",
"add_volumes": add_volumes,
"remove_volumes": None, }}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
msg = (_("Invalid volume: Cannot add volume %(volume_id)s "
"to consistency group %(group_id)s because volume is in an "
"invalid state: %(status)s. Valid states are: ('available', "
"'in-use').") %
{'volume_id': add_volume_id,
'group_id': consistencygroup.id,
'status': 'wrong_status'})
self.assertEqual(msg, res_dict['badRequest']['message'])
consistencygroup.destroy()
def test_update_consistencygroup_add_volume_invalid_volume_type(self):
consistencygroup = self._create_consistencygroup(ctxt=self.ctxt,
status='available')
wrong_type = 'wrong-volume-type-id'
add_volume_id = utils.create_volume(
self.ctxt,
volume_type_id=wrong_type)['id']
req = webob.Request.blank('/v2/fake/consistencygroups/%s/update' %
consistencygroup.id)
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
add_volumes = add_volume_id
body = {"consistencygroup": {"name": "cg1",
"description": "",
"add_volumes": add_volumes,
"remove_volumes": None, }}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
msg = (_("Invalid volume: Cannot add volume %(volume_id)s "
"to consistency group %(group_id)s because volume type "
"%(volume_type)s is not supported by the group.") %
{'volume_id': add_volume_id,
'group_id': consistencygroup.id,
'volume_type': wrong_type})
self.assertEqual(msg, res_dict['badRequest']['message'])
consistencygroup.destroy()
def test_update_consistencygroup_add_volume_already_in_cg(self):
consistencygroup = self._create_consistencygroup(ctxt=self.ctxt,
status='available')
add_volume_id = utils.create_volume(
self.ctxt,
consistencygroup_id='some_other_cg')['id']
req = webob.Request.blank('/v2/fake/consistencygroups/%s/update' %
consistencygroup.id)
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
add_volumes = add_volume_id
body = {"consistencygroup": {"name": "cg1",
"description": "",
"add_volumes": add_volumes,
"remove_volumes": None, }}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
consistencygroup.destroy()
def test_update_consistencygroup_invalid_state(self):
wrong_status = 'wrong_status'
consistencygroup = self._create_consistencygroup(status=wrong_status,
ctxt=self.ctxt)
req = webob.Request.blank('/v2/fake/consistencygroups/%s/update' %
consistencygroup.id)
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"name": "new name",
"description": None,
"add_volumes": None,
"remove_volumes": None, }}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
msg = _("Invalid ConsistencyGroup: Consistency group status must be "
"available, but current status is: %s.") % wrong_status
self.assertEqual(msg, res_dict['badRequest']['message'])
consistencygroup.destroy()
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_create_consistencygroup_from_src(self, mock_validate):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
consistencygroup = utils.create_consistencygroup(self.ctxt)
volume_id = utils.create_volume(
self.ctxt,
consistencygroup_id=consistencygroup.id)['id']
cgsnapshot_id = utils.create_cgsnapshot(
self.ctxt,
consistencygroup_id=consistencygroup.id)['id']
snapshot_id = utils.create_snapshot(
self.ctxt,
volume_id,
cgsnapshot_id=cgsnapshot_id,
status='available')['id']
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"cgsnapshot_id": cgsnapshot_id}}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(202, res.status_int)
self.assertIn('id', res_dict['consistencygroup'])
self.assertEqual(test_cg_name, res_dict['consistencygroup']['name'])
self.assertTrue(mock_validate.called)
cg_ref = objects.ConsistencyGroup.get_by_id(
self.ctxt.elevated(), res_dict['consistencygroup']['id'])
cg_ref.destroy()
db.snapshot_destroy(self.ctxt.elevated(), snapshot_id)
db.cgsnapshot_destroy(self.ctxt.elevated(), cgsnapshot_id)
db.volume_destroy(self.ctxt.elevated(), volume_id)
consistencygroup.destroy()
def test_create_consistencygroup_from_src_cg(self):
self.mock_object(volume_api.API, "create", stubs.stub_volume_create)
source_cg = utils.create_consistencygroup(self.ctxt)
volume_id = utils.create_volume(
self.ctxt,
consistencygroup_id=source_cg.id)['id']
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"source_cgid": source_cg.id}}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(202, res.status_int)
self.assertIn('id', res_dict['consistencygroup'])
self.assertEqual(test_cg_name, res_dict['consistencygroup']['name'])
cg = objects.ConsistencyGroup.get_by_id(
self.ctxt, res_dict['consistencygroup']['id'])
cg.destroy
db.volume_destroy(self.ctxt.elevated(), volume_id)
source_cg.destroy()
def test_create_consistencygroup_from_src_both_snap_cg(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
consistencygroup = utils.create_consistencygroup(self.ctxt)
volume_id = utils.create_volume(
self.ctxt,
consistencygroup_id=consistencygroup.id)['id']
cgsnapshot_id = utils.create_cgsnapshot(
self.ctxt,
consistencygroup_id=consistencygroup.id)['id']
snapshot_id = utils.create_snapshot(
self.ctxt,
volume_id,
cgsnapshot_id=cgsnapshot_id,
status='available')['id']
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"cgsnapshot_id": cgsnapshot_id,
"source_cgid":
consistencygroup.id}}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
db.snapshot_destroy(self.ctxt.elevated(), snapshot_id)
db.cgsnapshot_destroy(self.ctxt.elevated(), cgsnapshot_id)
db.volume_destroy(self.ctxt.elevated(), volume_id)
consistencygroup.destroy()
def test_create_consistencygroup_from_src_invalid_body(self):
name = 'cg1'
body = {"invalid": {"name": name,
"description":
"Consistency Group 1", }}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
# Missing 'consistencygroup-from-src' in the body.
self.assertIsNotNone(res_dict['badRequest']['message'])
def test_create_consistencygroup_from_src_no_source_id(self):
name = 'cg1'
body = {"consistencygroup-from-src": {"name": name,
"description":
"Consistency Group 1", }}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
def test_create_consistencygroup_from_src_no_host(self):
consistencygroup = utils.create_consistencygroup(self.ctxt, host=None)
volume_id = utils.create_volume(
self.ctxt,
consistencygroup_id=consistencygroup.id)['id']
cgsnapshot_id = utils.create_cgsnapshot(
self.ctxt,
consistencygroup_id=consistencygroup.id)['id']
snapshot_id = utils.create_snapshot(
self.ctxt,
volume_id,
cgsnapshot_id=cgsnapshot_id,
status='available')['id']
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"cgsnapshot_id": cgsnapshot_id}}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
msg = _('Invalid ConsistencyGroup: No host to create consistency '
'group')
self.assertIn(msg, res_dict['badRequest']['message'])
db.snapshot_destroy(self.ctxt.elevated(), snapshot_id)
db.cgsnapshot_destroy(self.ctxt.elevated(), cgsnapshot_id)
db.volume_destroy(self.ctxt.elevated(), volume_id)
consistencygroup.destroy()
def test_create_consistencygroup_from_src_cgsnapshot_empty(self):
consistencygroup = utils.create_consistencygroup(self.ctxt)
volume_id = utils.create_volume(
self.ctxt,
consistencygroup_id=consistencygroup.id)['id']
cgsnapshot_id = utils.create_cgsnapshot(
self.ctxt,
consistencygroup_id=consistencygroup.id)['id']
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"cgsnapshot_id": cgsnapshot_id}}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
db.cgsnapshot_destroy(self.ctxt.elevated(), cgsnapshot_id)
db.volume_destroy(self.ctxt.elevated(), volume_id)
consistencygroup.destroy()
def test_create_consistencygroup_from_src_source_cg_empty(self):
source_cg = utils.create_consistencygroup(self.ctxt)
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"source_cgid": source_cg.id}}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
source_cg.destroy()
def test_create_consistencygroup_from_src_cgsnapshot_notfound(self):
consistencygroup = utils.create_consistencygroup(self.ctxt)
volume_id = utils.create_volume(
self.ctxt,
consistencygroup_id=consistencygroup.id)['id']
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"cgsnapshot_id": "fake_cgsnap"}}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(404, res.status_int)
self.assertEqual(404, res_dict['itemNotFound']['code'])
self.assertIsNotNone(res_dict['itemNotFound']['message'])
db.volume_destroy(self.ctxt.elevated(), volume_id)
consistencygroup.destroy()
def test_create_consistencygroup_from_src_source_cg_notfound(self):
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"source_cgid": "fake_source_cg"}}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(404, res.status_int)
self.assertEqual(404, res_dict['itemNotFound']['code'])
self.assertIsNotNone(res_dict['itemNotFound']['message'])
@mock.patch.object(volume_api.API, 'create',
side_effect=exception.CinderException(
'Create volume failed.'))
def test_create_consistencygroup_from_src_cgsnapshot_create_volume_failed(
self, mock_create):
consistencygroup = utils.create_consistencygroup(self.ctxt)
volume_id = utils.create_volume(
self.ctxt,
consistencygroup_id=consistencygroup.id)['id']
cgsnapshot_id = utils.create_cgsnapshot(
self.ctxt,
consistencygroup_id=consistencygroup.id)['id']
snapshot_id = utils.create_snapshot(
self.ctxt,
volume_id,
cgsnapshot_id=cgsnapshot_id,
status='available')['id']
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"cgsnapshot_id": cgsnapshot_id}}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
msg = _("Create volume failed.")
self.assertEqual(msg, res_dict['badRequest']['message'])
db.snapshot_destroy(self.ctxt.elevated(), snapshot_id)
db.cgsnapshot_destroy(self.ctxt.elevated(), cgsnapshot_id)
db.volume_destroy(self.ctxt.elevated(), volume_id)
consistencygroup.destroy()
@mock.patch.object(volume_api.API, 'create',
side_effect=exception.CinderException(
'Create volume failed.'))
def test_create_consistencygroup_from_src_cg_create_volume_failed(
self, mock_create):
source_cg = utils.create_consistencygroup(self.ctxt)
volume_id = utils.create_volume(
self.ctxt,
consistencygroup_id=source_cg.id)['id']
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"source_cgid": source_cg.id}}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
db.volume_destroy(self.ctxt.elevated(), volume_id)
source_cg.destroy()
|
# Copyright (C) 2012 - 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for consistency group code.
"""
import json
from xml.dom import minidom
import mock
import webob
import cinder.consistencygroup
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit.api.v2 import stubs
from cinder.tests.unit import utils
from cinder.volume import api as volume_api
class ConsistencyGroupsAPITestCase(test.TestCase):
"""Test Case for consistency groups API."""
def setUp(self):
super(ConsistencyGroupsAPITestCase, self).setUp()
self.cg_api = cinder.consistencygroup.API()
self.ctxt = context.RequestContext('fake', 'fake', auth_token=True)
def _create_consistencygroup(
self,
ctxt=None,
name='test_consistencygroup',
description='this is a test consistency group',
volume_type_id='123456',
availability_zone='az1',
host='fakehost',
status='creating'):
"""Create a consistency group object."""
ctxt = ctxt or self.ctxt
consistencygroup = objects.ConsistencyGroup(ctxt)
consistencygroup.user_id = 'fake'
consistencygroup.project_id = 'fake'
consistencygroup.availability_zone = availability_zone
consistencygroup.name = name
consistencygroup.description = description
consistencygroup.volume_type_id = volume_type_id
consistencygroup.host = host
consistencygroup.status = status
consistencygroup.create()
return consistencygroup
def test_show_consistencygroup(self):
consistencygroup = self._create_consistencygroup()
req = webob.Request.blank('/v2/fake/consistencygroups/%s' %
consistencygroup.id)
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual('az1',
res_dict['consistencygroup']['availability_zone'])
self.assertEqual('this is a test consistency group',
res_dict['consistencygroup']['description'])
self.assertEqual('test_consistencygroup',
res_dict['consistencygroup']['name'])
self.assertEqual('creating',
res_dict['consistencygroup']['status'])
consistencygroup.destroy()
def test_show_consistencygroup_xml_content_type(self):
consistencygroup = self._create_consistencygroup()
req = webob.Request.blank('/v2/fake/consistencygroups/%s' %
consistencygroup.id)
req.method = 'GET'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
dom = minidom.parseString(res.body)
consistencygroups = dom.getElementsByTagName('consistencygroup')
name = consistencygroups.item(0).getAttribute('name')
self.assertEqual("test_consistencygroup", name.strip())
consistencygroup.destroy()
def test_show_consistencygroup_with_consistencygroup_NotFound(self):
req = webob.Request.blank('/v2/fake/consistencygroups/9999')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(404, res.status_int)
self.assertEqual(404, res_dict['itemNotFound']['code'])
self.assertEqual('ConsistencyGroup 9999 could not be found.',
res_dict['itemNotFound']['message'])
def test_list_consistencygroups_json(self):
consistencygroup1 = self._create_consistencygroup()
consistencygroup2 = self._create_consistencygroup()
consistencygroup3 = self._create_consistencygroup()
req = webob.Request.blank('/v2/fake/consistencygroups')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual(consistencygroup1.id,
res_dict['consistencygroups'][0]['id'])
self.assertEqual('test_consistencygroup',
res_dict['consistencygroups'][0]['name'])
self.assertEqual(consistencygroup2.id,
res_dict['consistencygroups'][1]['id'])
self.assertEqual('test_consistencygroup',
res_dict['consistencygroups'][1]['name'])
self.assertEqual(consistencygroup3.id,
res_dict['consistencygroups'][2]['id'])
self.assertEqual('test_consistencygroup',
res_dict['consistencygroups'][2]['name'])
consistencygroup1.destroy()
consistencygroup2.destroy()
consistencygroup3.destroy()
def test_list_consistencygroups_xml(self):
consistencygroup1 = self._create_consistencygroup()
consistencygroup2 = self._create_consistencygroup()
consistencygroup3 = self._create_consistencygroup()
req = webob.Request.blank('/v2/fake/consistencygroups')
req.method = 'GET'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
dom = minidom.parseString(res.body)
consistencygroup_list = dom.getElementsByTagName('consistencygroup')
self.assertEqual(consistencygroup1.id,
consistencygroup_list.item(0).getAttribute('id'))
self.assertEqual(consistencygroup2.id,
consistencygroup_list.item(1).getAttribute('id'))
self.assertEqual(consistencygroup3.id,
consistencygroup_list.item(2).getAttribute('id'))
consistencygroup3.destroy()
consistencygroup2.destroy()
consistencygroup1.destroy()
def test_list_consistencygroups_detail_json(self):
consistencygroup1 = self._create_consistencygroup()
consistencygroup2 = self._create_consistencygroup()
consistencygroup3 = self._create_consistencygroup()
req = webob.Request.blank('/v2/fake/consistencygroups/detail')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
req.headers['Accept'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual('az1',
res_dict['consistencygroups'][0]['availability_zone'])
self.assertEqual('this is a test consistency group',
res_dict['consistencygroups'][0]['description'])
self.assertEqual('test_consistencygroup',
res_dict['consistencygroups'][0]['name'])
self.assertEqual(consistencygroup1.id,
res_dict['consistencygroups'][0]['id'])
self.assertEqual('creating',
res_dict['consistencygroups'][0]['status'])
self.assertEqual('az1',
res_dict['consistencygroups'][1]['availability_zone'])
self.assertEqual('this is a test consistency group',
res_dict['consistencygroups'][1]['description'])
self.assertEqual('test_consistencygroup',
res_dict['consistencygroups'][1]['name'])
self.assertEqual(consistencygroup2.id,
res_dict['consistencygroups'][1]['id'])
self.assertEqual('creating',
res_dict['consistencygroups'][1]['status'])
self.assertEqual('az1',
res_dict['consistencygroups'][2]['availability_zone'])
self.assertEqual('this is a test consistency group',
res_dict['consistencygroups'][2]['description'])
self.assertEqual('test_consistencygroup',
res_dict['consistencygroups'][2]['name'])
self.assertEqual(consistencygroup3.id,
res_dict['consistencygroups'][2]['id'])
self.assertEqual('creating',
res_dict['consistencygroups'][2]['status'])
consistencygroup1.destroy()
consistencygroup2.destroy()
consistencygroup3.destroy()
def test_list_consistencygroups_detail_xml(self):
consistencygroup1 = self._create_consistencygroup()
consistencygroup2 = self._create_consistencygroup()
consistencygroup3 = self._create_consistencygroup()
req = webob.Request.blank('/v2/fake/consistencygroups/detail')
req.method = 'GET'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
dom = minidom.parseString(res.body)
consistencygroup_detail = dom.getElementsByTagName('consistencygroup')
self.assertEqual(
'az1',
consistencygroup_detail.item(0).getAttribute('availability_zone'))
self.assertEqual(
'this is a test consistency group',
consistencygroup_detail.item(0).getAttribute('description'))
self.assertEqual(
'test_consistencygroup',
consistencygroup_detail.item(0).getAttribute('name'))
self.assertEqual(
consistencygroup1.id,
consistencygroup_detail.item(0).getAttribute('id'))
self.assertEqual(
'creating',
consistencygroup_detail.item(0).getAttribute('status'))
self.assertEqual(
'az1',
consistencygroup_detail.item(1).getAttribute('availability_zone'))
self.assertEqual(
'this is a test consistency group',
consistencygroup_detail.item(1).getAttribute('description'))
self.assertEqual(
'test_consistencygroup',
consistencygroup_detail.item(1).getAttribute('name'))
self.assertEqual(
consistencygroup2.id,
consistencygroup_detail.item(1).getAttribute('id'))
self.assertEqual(
'creating',
consistencygroup_detail.item(1).getAttribute('status'))
self.assertEqual(
'az1',
consistencygroup_detail.item(2).getAttribute('availability_zone'))
self.assertEqual(
'this is a test consistency group',
consistencygroup_detail.item(2).getAttribute('description'))
self.assertEqual(
'test_consistencygroup',
consistencygroup_detail.item(2).getAttribute('name'))
self.assertEqual(
consistencygroup3.id,
consistencygroup_detail.item(2).getAttribute('id'))
self.assertEqual(
'creating',
consistencygroup_detail.item(2).getAttribute('status'))
consistencygroup3.destroy()
consistencygroup2.destroy()
consistencygroup1.destroy()
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_create_consistencygroup_json(self, mock_validate):
group_id = "1"
# Create volume type
vol_type = 'test'
db.volume_type_create(context.get_admin_context(),
{'name': vol_type, 'extra_specs': {}})
body = {"consistencygroup": {"name": "cg1",
"volume_types": vol_type,
"description":
"Consistency Group 1", }}
req = webob.Request.blank('/v2/fake/consistencygroups')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(202, res.status_int)
self.assertIn('id', res_dict['consistencygroup'])
self.assertTrue(mock_validate.called)
group_id = res_dict['consistencygroup']['id']
cg = objects.ConsistencyGroup.get_by_id(context.get_admin_context(),
group_id)
cg.destroy()
def test_create_consistencygroup_with_no_body(self):
# omit body from the request
req = webob.Request.blank('/v2/fake/consistencygroups')
req.body = json.dumps(None)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.headers['Accept'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
self.assertEqual("Missing required element 'consistencygroup' in "
"request body.",
res_dict['badRequest']['message'])
def test_delete_consistencygroup_available(self):
consistencygroup = self._create_consistencygroup(status='available')
req = webob.Request.blank('/v2/fake/consistencygroups/%s/delete' %
consistencygroup.id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"force": True}}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
consistencygroup = objects.ConsistencyGroup.get_by_id(
self.ctxt, consistencygroup.id)
self.assertEqual(202, res.status_int)
self.assertEqual('deleting', consistencygroup.status)
consistencygroup.destroy()
def test_delete_consistencygroup_with_consistencygroup_NotFound(self):
req = webob.Request.blank('/v2/fake/consistencygroups/9999/delete')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(None)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(404, res.status_int)
self.assertEqual(404, res_dict['itemNotFound']['code'])
self.assertEqual('ConsistencyGroup 9999 could not be found.',
res_dict['itemNotFound']['message'])
def test_delete_consistencygroup_with_Invalidconsistencygroup(self):
consistencygroup = self._create_consistencygroup(status='invalid')
req = webob.Request.blank('/v2/fake/consistencygroups/%s/delete' %
consistencygroup.id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"force": False}}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
msg = (_('Invalid ConsistencyGroup: Consistency group status must be '
'available or error, but current status is: invalid'))
self.assertEqual(msg, res_dict['badRequest']['message'])
consistencygroup.destroy()
def test_delete_consistencygroup_no_host(self):
consistencygroup = self._create_consistencygroup(
host=None,
status='error')
req = webob.Request.blank('/v2/fake/consistencygroups/%s/delete' %
consistencygroup.id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"force": True}}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(202, res.status_int)
cg = objects.ConsistencyGroup.get_by_id(
context.get_admin_context(read_deleted='yes'),
consistencygroup.id)
self.assertEqual('deleted', cg.status)
self.assertIsNone(cg.host)
def test_create_delete_consistencygroup_update_quota(self):
name = 'mycg'
description = 'consistency group 1'
fake_type = {'id': '1', 'name': 'fake_type'}
self.stubs.Set(db, 'volume_types_get_by_name_or_id',
mock.Mock(return_value=[fake_type]))
self.stubs.Set(self.cg_api,
'_cast_create_consistencygroup',
mock.Mock())
self.stubs.Set(self.cg_api, 'update_quota',
mock.Mock())
cg = self.cg_api.create(self.ctxt, name, description,
fake_type['name'])
self.cg_api.update_quota.assert_called_once_with(
self.ctxt, cg, 1)
self.assertEqual('creating', cg.status)
self.assertIsNone(cg.host)
self.cg_api.update_quota.reset_mock()
cg.status = 'error'
self.cg_api.delete(self.ctxt, cg)
self.cg_api.update_quota.assert_called_once_with(
self.ctxt, cg, -1, self.ctxt.project_id)
cg = objects.ConsistencyGroup.get_by_id(
context.get_admin_context(read_deleted='yes'),
cg.id)
self.assertEqual('deleted', cg.status)
def test_delete_consistencygroup_with_invalid_body(self):
consistencygroup = self._create_consistencygroup(status='available')
req = webob.Request.blank('/v2/fake/consistencygroups/%s/delete' %
consistencygroup.id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
body = {"invalid_request_element": {"force": False}}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
def test_delete_consistencygroup_with_invalid_force_value_in_body(self):
consistencygroup = self._create_consistencygroup(status='available')
req = webob.Request.blank('/v2/fake/consistencygroups/%s/delete' %
consistencygroup.id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"force": "abcd"}}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
def test_delete_consistencygroup_with_empty_force_value_in_body(self):
consistencygroup = self._create_consistencygroup(status='available')
req = webob.Request.blank('/v2/fake/consistencygroups/%s/delete' %
consistencygroup.id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"force": ""}}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
def test_create_consistencygroup_failed_no_volume_type(self):
name = 'cg1'
body = {"consistencygroup": {"name": name,
"description":
"Consistency Group 1", }}
req = webob.Request.blank('/v2/fake/consistencygroups')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
msg = (_('volume_types must be provided to create '
'consistency group %s.') % name)
self.assertEqual(msg, res_dict['badRequest']['message'])
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_update_consistencygroup_success(self, mock_validate):
volume_type_id = '123456'
consistencygroup = self._create_consistencygroup(status='available',
host='test_host')
remove_volume_id = utils.create_volume(
self.ctxt,
volume_type_id=volume_type_id,
consistencygroup_id=consistencygroup.id)['id']
remove_volume_id2 = utils.create_volume(
self.ctxt,
volume_type_id=volume_type_id,
consistencygroup_id=consistencygroup.id)['id']
self.assertEqual('available', consistencygroup.status)
cg_volumes = db.volume_get_all_by_group(self.ctxt.elevated(),
consistencygroup.id)
cg_vol_ids = [cg_vol['id'] for cg_vol in cg_volumes]
self.assertIn(remove_volume_id, cg_vol_ids)
self.assertIn(remove_volume_id2, cg_vol_ids)
add_volume_id = utils.create_volume(
self.ctxt,
volume_type_id=volume_type_id)['id']
add_volume_id2 = utils.create_volume(
self.ctxt,
volume_type_id=volume_type_id)['id']
req = webob.Request.blank('/v2/fake/consistencygroups/%s/update' %
consistencygroup.id)
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
name = 'newcg'
description = 'New Consistency Group Description'
add_volumes = add_volume_id + "," + add_volume_id2
remove_volumes = remove_volume_id + "," + remove_volume_id2
body = {"consistencygroup": {"name": name,
"description": description,
"add_volumes": add_volumes,
"remove_volumes": remove_volumes, }}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
consistencygroup = objects.ConsistencyGroup.get_by_id(
self.ctxt, consistencygroup.id)
self.assertEqual(202, res.status_int)
self.assertTrue(mock_validate.called)
self.assertEqual('updating', consistencygroup.status)
consistencygroup.destroy()
def test_update_consistencygroup_add_volume_not_found(self):
consistencygroup = self._create_consistencygroup(ctxt=self.ctxt,
status='available')
req = webob.Request.blank('/v2/fake/consistencygroups/%s/update' %
consistencygroup.id)
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"name": None,
"description": None,
"add_volumes": "fake-volume-uuid",
"remove_volumes": None, }}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
msg = (_("Invalid volume: Cannot add volume fake-volume-uuid "
"to consistency group %(group_id)s because volume cannot "
"be found.") %
{'group_id': consistencygroup.id})
self.assertEqual(msg, res_dict['badRequest']['message'])
consistencygroup.destroy()
def test_update_consistencygroup_remove_volume_not_found(self):
consistencygroup = self._create_consistencygroup(ctxt=self.ctxt,
status='available')
req = webob.Request.blank('/v2/fake/consistencygroups/%s/update' %
consistencygroup.id)
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"name": None,
"description": "new description",
"add_volumes": None,
"remove_volumes": "fake-volume-uuid", }}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
msg = (_("Invalid volume: Cannot remove volume fake-volume-uuid "
"from consistency group %(group_id)s because it is not "
"in the group.") %
{'group_id': consistencygroup.id})
self.assertEqual(msg, res_dict['badRequest']['message'])
consistencygroup.destroy()
def test_update_consistencygroup_empty_parameters(self):
consistencygroup = self._create_consistencygroup(ctxt=self.ctxt,
status='available')
req = webob.Request.blank('/v2/fake/consistencygroups/%s/update' %
consistencygroup.id)
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"name": "",
"description": "",
"add_volumes": None,
"remove_volumes": None, }}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
consistencygroup.destroy()
def test_update_consistencygroup_add_volume_invalid_state(self):
volume_type_id = '123456'
consistencygroup = self._create_consistencygroup(ctxt=self.ctxt,
status='available')
add_volume_id = utils.create_volume(
self.ctxt,
volume_type_id=volume_type_id,
status='wrong_status')['id']
req = webob.Request.blank('/v2/fake/consistencygroups/%s/update' %
consistencygroup.id)
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
add_volumes = add_volume_id
body = {"consistencygroup": {"name": "cg1",
"description": "",
"add_volumes": add_volumes,
"remove_volumes": None, }}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
msg = (_("Invalid volume: Cannot add volume %(volume_id)s "
"to consistency group %(group_id)s because volume is in an "
"invalid state: %(status)s. Valid states are: ('available', "
"'in-use').") %
{'volume_id': add_volume_id,
'group_id': consistencygroup.id,
'status': 'wrong_status'})
self.assertEqual(msg, res_dict['badRequest']['message'])
consistencygroup.destroy()
def test_update_consistencygroup_add_volume_invalid_volume_type(self):
consistencygroup = self._create_consistencygroup(ctxt=self.ctxt,
status='available')
wrong_type = 'wrong-volume-type-id'
add_volume_id = utils.create_volume(
self.ctxt,
volume_type_id=wrong_type)['id']
req = webob.Request.blank('/v2/fake/consistencygroups/%s/update' %
consistencygroup.id)
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
add_volumes = add_volume_id
body = {"consistencygroup": {"name": "cg1",
"description": "",
"add_volumes": add_volumes,
"remove_volumes": None, }}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
msg = (_("Invalid volume: Cannot add volume %(volume_id)s "
"to consistency group %(group_id)s because volume type "
"%(volume_type)s is not supported by the group.") %
{'volume_id': add_volume_id,
'group_id': consistencygroup.id,
'volume_type': wrong_type})
self.assertEqual(msg, res_dict['badRequest']['message'])
consistencygroup.destroy()
def test_update_consistencygroup_add_volume_already_in_cg(self):
consistencygroup = self._create_consistencygroup(ctxt=self.ctxt,
status='available')
add_volume_id = utils.create_volume(
self.ctxt,
consistencygroup_id='some_other_cg')['id']
req = webob.Request.blank('/v2/fake/consistencygroups/%s/update' %
consistencygroup.id)
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
add_volumes = add_volume_id
body = {"consistencygroup": {"name": "cg1",
"description": "",
"add_volumes": add_volumes,
"remove_volumes": None, }}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
consistencygroup.destroy()
def test_update_consistencygroup_invalid_state(self):
wrong_status = 'wrong_status'
consistencygroup = self._create_consistencygroup(status=wrong_status,
ctxt=self.ctxt)
req = webob.Request.blank('/v2/fake/consistencygroups/%s/update' %
consistencygroup.id)
req.method = 'PUT'
req.headers['Content-Type'] = 'application/json'
body = {"consistencygroup": {"name": "new name",
"description": None,
"add_volumes": None,
"remove_volumes": None, }}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
msg = _("Invalid ConsistencyGroup: Consistency group status must be "
"available, but current status is: %s.") % wrong_status
self.assertEqual(msg, res_dict['badRequest']['message'])
consistencygroup.destroy()
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_create_consistencygroup_from_src(self, mock_validate):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
consistencygroup = utils.create_consistencygroup(self.ctxt)
volume_id = utils.create_volume(
self.ctxt,
consistencygroup_id=consistencygroup.id)['id']
cgsnapshot_id = utils.create_cgsnapshot(
self.ctxt,
consistencygroup_id=consistencygroup.id)['id']
snapshot_id = utils.create_snapshot(
self.ctxt,
volume_id,
cgsnapshot_id=cgsnapshot_id,
status='available')['id']
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"cgsnapshot_id": cgsnapshot_id}}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(202, res.status_int)
self.assertIn('id', res_dict['consistencygroup'])
self.assertEqual(test_cg_name, res_dict['consistencygroup']['name'])
self.assertTrue(mock_validate.called)
cg_ref = objects.ConsistencyGroup.get_by_id(
self.ctxt.elevated(), res_dict['consistencygroup']['id'])
cg_ref.destroy()
db.snapshot_destroy(self.ctxt.elevated(), snapshot_id)
db.cgsnapshot_destroy(self.ctxt.elevated(), cgsnapshot_id)
db.volume_destroy(self.ctxt.elevated(), volume_id)
consistencygroup.destroy()
def test_create_consistencygroup_from_src_cg(self):
self.mock_object(volume_api.API, "create", stubs.stub_volume_create)
source_cg = utils.create_consistencygroup(self.ctxt)
volume_id = utils.create_volume(
self.ctxt,
consistencygroup_id=source_cg.id)['id']
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"source_cgid": source_cg.id}}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(202, res.status_int)
self.assertIn('id', res_dict['consistencygroup'])
self.assertEqual(test_cg_name, res_dict['consistencygroup']['name'])
cg = objects.ConsistencyGroup.get_by_id(
self.ctxt, res_dict['consistencygroup']['id'])
cg.destroy
db.volume_destroy(self.ctxt.elevated(), volume_id)
source_cg.destroy()
def test_create_consistencygroup_from_src_both_snap_cg(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
consistencygroup = utils.create_consistencygroup(self.ctxt)
volume_id = utils.create_volume(
self.ctxt,
consistencygroup_id=consistencygroup.id)['id']
cgsnapshot_id = utils.create_cgsnapshot(
self.ctxt,
consistencygroup_id=consistencygroup.id)['id']
snapshot_id = utils.create_snapshot(
self.ctxt,
volume_id,
cgsnapshot_id=cgsnapshot_id,
status='available')['id']
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"cgsnapshot_id": cgsnapshot_id,
"source_cgid":
consistencygroup.id}}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
db.snapshot_destroy(self.ctxt.elevated(), snapshot_id)
db.cgsnapshot_destroy(self.ctxt.elevated(), cgsnapshot_id)
db.volume_destroy(self.ctxt.elevated(), volume_id)
consistencygroup.destroy()
def test_create_consistencygroup_from_src_invalid_body(self):
name = 'cg1'
body = {"invalid": {"name": name,
"description":
"Consistency Group 1", }}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
# Missing 'consistencygroup-from-src' in the body.
self.assertIsNotNone(res_dict['badRequest']['message'])
def test_create_consistencygroup_from_src_no_source_id(self):
name = 'cg1'
body = {"consistencygroup-from-src": {"name": name,
"description":
"Consistency Group 1", }}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
def test_create_consistencygroup_from_src_no_host(self):
consistencygroup = utils.create_consistencygroup(self.ctxt, host=None)
volume_id = utils.create_volume(
self.ctxt,
consistencygroup_id=consistencygroup.id)['id']
cgsnapshot_id = utils.create_cgsnapshot(
self.ctxt,
consistencygroup_id=consistencygroup.id)['id']
snapshot_id = utils.create_snapshot(
self.ctxt,
volume_id,
cgsnapshot_id=cgsnapshot_id,
status='available')['id']
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"cgsnapshot_id": cgsnapshot_id}}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
msg = _('Invalid ConsistencyGroup: No host to create consistency '
'group')
self.assertIn(msg, res_dict['badRequest']['message'])
db.snapshot_destroy(self.ctxt.elevated(), snapshot_id)
db.cgsnapshot_destroy(self.ctxt.elevated(), cgsnapshot_id)
db.volume_destroy(self.ctxt.elevated(), volume_id)
consistencygroup.destroy()
def test_create_consistencygroup_from_src_cgsnapshot_empty(self):
consistencygroup = utils.create_consistencygroup(self.ctxt)
volume_id = utils.create_volume(
self.ctxt,
consistencygroup_id=consistencygroup.id)['id']
cgsnapshot_id = utils.create_cgsnapshot(
self.ctxt,
consistencygroup_id=consistencygroup.id)['id']
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"cgsnapshot_id": cgsnapshot_id}}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
db.cgsnapshot_destroy(self.ctxt.elevated(), cgsnapshot_id)
db.volume_destroy(self.ctxt.elevated(), volume_id)
consistencygroup.destroy()
def test_create_consistencygroup_from_src_source_cg_empty(self):
source_cg = utils.create_consistencygroup(self.ctxt)
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"source_cgid": source_cg.id}}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
source_cg.destroy()
def test_create_consistencygroup_from_src_cgsnapshot_notfound(self):
consistencygroup = utils.create_consistencygroup(self.ctxt)
volume_id = utils.create_volume(
self.ctxt,
consistencygroup_id=consistencygroup.id)['id']
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"cgsnapshot_id": "fake_cgsnap"}}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(404, res.status_int)
self.assertEqual(404, res_dict['itemNotFound']['code'])
self.assertIsNotNone(res_dict['itemNotFound']['message'])
db.volume_destroy(self.ctxt.elevated(), volume_id)
consistencygroup.destroy()
def test_create_consistencygroup_from_src_source_cg_notfound(self):
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"source_cgid": "fake_source_cg"}}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(404, res.status_int)
self.assertEqual(404, res_dict['itemNotFound']['code'])
self.assertIsNotNone(res_dict['itemNotFound']['message'])
@mock.patch.object(volume_api.API, 'create',
side_effect=exception.CinderException(
'Create volume failed.'))
def test_create_consistencygroup_from_src_cgsnapshot_create_volume_failed(
self, mock_create):
consistencygroup = utils.create_consistencygroup(self.ctxt)
volume_id = utils.create_volume(
self.ctxt,
consistencygroup_id=consistencygroup.id)['id']
cgsnapshot_id = utils.create_cgsnapshot(
self.ctxt,
consistencygroup_id=consistencygroup.id)['id']
snapshot_id = utils.create_snapshot(
self.ctxt,
volume_id,
cgsnapshot_id=cgsnapshot_id,
status='available')['id']
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"cgsnapshot_id": cgsnapshot_id}}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
msg = _("Create volume failed.")
self.assertEqual(msg, res_dict['badRequest']['message'])
db.snapshot_destroy(self.ctxt.elevated(), snapshot_id)
db.cgsnapshot_destroy(self.ctxt.elevated(), cgsnapshot_id)
db.volume_destroy(self.ctxt.elevated(), volume_id)
consistencygroup.destroy()
@mock.patch.object(volume_api.API, 'create',
side_effect=exception.CinderException(
'Create volume failed.'))
def test_create_consistencygroup_from_src_cg_create_volume_failed(
self, mock_create):
source_cg = utils.create_consistencygroup(self.ctxt)
volume_id = utils.create_volume(
self.ctxt,
consistencygroup_id=source_cg.id)['id']
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
"description":
"Consistency Group 1",
"source_cgid": source_cg.id}}
req = webob.Request.blank('/v2/fake/consistencygroups/create_from_src')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(400, res.status_int)
self.assertEqual(400, res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
db.volume_destroy(self.ctxt.elevated(), volume_id)
source_cg.destroy()
|
en
| 0.831794
|
# Copyright (C) 2012 - 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Tests for consistency group code. Test Case for consistency groups API. Create a consistency group object. # Create volume type # omit body from the request # Missing 'consistencygroup-from-src' in the body.
| 1.971326
| 2
|
build/lib/gains/adaptive.py
|
wesleybeckner/gains
| 7
|
6626995
|
<gh_stars>1-10
from math import sqrt
from scipy.spatial import ConvexHull
from sklearn.preprocessing import MinMaxScaler
from sklearn.neighbors import KernelDensity
import numpy as np
from os.path import dirname, join
import pandas as pd
from rdkit.Chem import AllChem as Chem
import re
import salty
from rdkit.ML.Descriptors.MoleculeDescriptors import \
MolecularDescriptorCalculator as Calculator
from sklearn.preprocessing import StandardScaler
from math import log
def build_model_from_md(df, property_to_model, temperature=[298.1, 299],
pressure=[101, 102], output_ranges=[[200, 3000]],
md_temperature=298.15, md_pressure=101.325):
"""
creates new qspr models using md data
Parameters
----------
df : pandas DataFrame
salt_log data from the genetic algorithm. Contains
the headers 'Salt Smiles' and 'MD Calculation'. Current
support is only for cpt and density
property_to_model : str
current support is for 'cpt' or 'density'
temperature : array, optional
temperature bounds on experimental data to add. Default
297, 316 K
pressure : array, optional
pressure bounds on experimental data to add. Default
99, 102 kpa
output_ranges : array, optional
property bounds on experimental data to add. Default
200, 3000 (kg/m3 or kj/molK)
md_temperature : float, optional
temperature used to generate the md data. Default
298.15 K
md_pressure : float, optional
pressure used to generate the md data. Dfault
101.325 kPa
Returns
-------
newmodel : salt dev_model object
new_MD_data_index : int
start index of the newly incorporated MD data
Summary
-------
Create 4 lists from df: cation/anion smiles, cpt, density
Nans will be used for cation/anion name in the newmodel
output
"""
cpt = []
density = []
cation_smi = []
anion_smi = []
for i in range(df.shape[0]):
calculation = df["MD Calculation"][i]
cpt.append(re.findall("\d+\.\d+", calculation)[0])
density.append(re.findall("\d+\.\d+", calculation)[1])
cation_smi.append(df['Salt Smiles'][i].split(".")[0])
anion_smi.append(df['Salt Smiles'][i].split(".")[1])
module_path = dirname(__file__)
data = df
n = data.shape[0]
f = open(join(module_path, 'data', 'Deslist'), 'r')
Deslist = []
for line in f:
Deslist.append(line.strip('\n\t'))
calc = Calculator(Deslist)
D = len(Deslist)
d = len(Deslist) * 2 + 8
X = np.zeros((n, d))
X[:, -8] = md_temperature
X[:, -7] = md_pressure
for i in range(n):
cation = Chem.MolFromSmiles(cation_smi[i])
anion = Chem.MolFromSmiles(anion_smi[i])
X[i][:D] = calc.CalcDescriptors(cation)
X[i][D:2 * D] = calc.CalcDescriptors(anion)
X[:, -5] = density
X[:, -6] = cpt
cols_cat = [s + "-cation" for s in Deslist]
cols_ani = [s + "-anion" for s in Deslist]
cols = cols_cat + cols_ani + ["Temperature, K", "Pressure, kPa",
"Heat capacity at constant pressure,"
"J/K/mol",
"Specific density, kg/m<SUP>3</SUP>",
"name-anion", "smiles-anion", "name-cation",
"smiles-cation"]
X = pd.DataFrame(X, columns=cols)
X.iloc[:, -4] = np.nan
X.iloc[:, -2] = np.nan
X.iloc[:, -3] = anion_smi
X.iloc[:, -1] = cation_smi # X is the df with the new simulation data
new_MD_data_index = X.shape[0] # plot new predictions after re-training
devmodel = salty.aggregate_data(property_to_model, T=temperature,
P=pressure, data_ranges=output_ranges,
scale_center=False)
cols = devmodel.Data.columns
new_data = pd.concat([devmodel.Data, X]) # have to sort in future version
if property_to_model == ['density']:
prop = "Specific density, kg/m<SUP>3</SUP>"
to_drop = "Heat capacity at constant pressure, J/K/mol"
elif property_to_model == ['cpt']:
to_drop = "Specific density, kg/m<SUP>3</SUP>"
prop = "Heat capacity at constant pressure, J/K/mol"
elif property_to_model == ["cpt", "density"]:
prop = ["Heat capacity at constant pressure, J/K/mol",
"Specific density, kg/m<SUP>3</SUP>"]
if property_to_model != ["cpt", "density"]:
new_data.drop(columns=[to_drop], inplace=True)
new_data = new_data[cols]
new_data.reset_index(inplace=True, drop=True)
if property_to_model == ["cpt", "density"]:
exp_data = [prop[0], prop[1], "Temperature, K", "Pressure, kPa"]
else:
exp_data = [prop, "Temperature, K", "Pressure, kPa"]
merged = new_data
unique_salts = merged["smiles-cation"] + merged["smiles-anion"]
unique_cations = repr(merged["smiles-cation"].unique())
unique_anions = repr(merged["smiles-anion"].unique())
actual_data_ranges = []
for i in range(len(exp_data)):
actual_data_ranges.append("{} - {}".format(
str(merged[exp_data[i]].min()), str(merged[exp_data[i]].max())))
a = np.array([len(unique_salts.unique()), unique_cations, unique_anions,
len(unique_salts)])
a = np.concatenate((a, actual_data_ranges))
cols1 = ["Unique salts", "Cations", "Anions", "Total datapoints"]
cols = cols1 + exp_data
data_summary = pd.DataFrame(a, cols)
merged = new_data
metaDf = merged.select_dtypes(include=["object"])
dataDf = merged.select_dtypes(include=[np.number])
cols = dataDf.columns.tolist()
instance = StandardScaler()
for i in range(1, len(property_to_model) + 1):
dataDf.iloc[:, -i] = dataDf.iloc[:, -i].apply(lambda x: log(float(x)))
scaled_data = pd.DataFrame(instance.fit_transform(
dataDf.iloc[:, :-len(property_to_model)]),
columns=cols[:-len(property_to_model)])
df = pd.concat([scaled_data, dataDf.iloc[:, -len(property_to_model):],
metaDf],
axis=1) # may have to sort in future version
mean_std_of_coeffs = pd.DataFrame([instance.mean_, instance.scale_],
columns=cols[:-len(property_to_model)])
new_model = salty.dev_model(mean_std_of_coeffs, data_summary, df)
print(new_model.Data_summary)
return new_model, new_MD_data_index
def calculate_minimum_distances(data, x3, y3):
"""
calculates the minimum distance of x3,y3 from any
boundary of the convex hull
Parameters
----------
data : pandas DataFrame
2-column DataFrame comprising the convex hull
x3 : float
data point associated with the first column
y3 : float
data point associated with the second column
Returns
-------
minimum distance : float
percent distance from the nearest edge of the convex hull
"""
instance = MinMaxScaler(feature_range=(0.1, 0.9))
data = instance.fit_transform(data)
[[x3, y3]] = instance.transform([[x3, y3]])
hull = ConvexHull(data)
distances = []
for simplex_all in hull.simplices:
x1_a, x2_a = data[simplex_all, 0]
y1_a, y2_a = data[simplex_all, 1]
m_a = (y2_a - y1_a) / (x2_a - x1_a) # slope
b_a = y2_a - (x2_a * m_a) # intercept
distances.append(
float(abs(m_a * x3 - y3 + b_a)) / float(sqrt(m_a ** 2 + 1)))
new_hull = ConvexHull(
np.append(np.array([[x3, y3]]), data, axis=0))
if hull.area >= new_hull.area:
return (-np.min(distances))
else:
return (np.min(distances))
def gaussian_pdf(column):
x = column.values
x_d = np.linspace(min(x), max(x), 10000)
# instantiate and fit the KDE model
kde = KernelDensity(bandwidth=0.01, kernel='gaussian')
kde.fit(x[:, None])
# score_samples returns the log of the probability density
return kde.score_samples(x_d[:, None]), x_d
|
from math import sqrt
from scipy.spatial import ConvexHull
from sklearn.preprocessing import MinMaxScaler
from sklearn.neighbors import KernelDensity
import numpy as np
from os.path import dirname, join
import pandas as pd
from rdkit.Chem import AllChem as Chem
import re
import salty
from rdkit.ML.Descriptors.MoleculeDescriptors import \
MolecularDescriptorCalculator as Calculator
from sklearn.preprocessing import StandardScaler
from math import log
def build_model_from_md(df, property_to_model, temperature=[298.1, 299],
pressure=[101, 102], output_ranges=[[200, 3000]],
md_temperature=298.15, md_pressure=101.325):
"""
creates new qspr models using md data
Parameters
----------
df : pandas DataFrame
salt_log data from the genetic algorithm. Contains
the headers 'Salt Smiles' and 'MD Calculation'. Current
support is only for cpt and density
property_to_model : str
current support is for 'cpt' or 'density'
temperature : array, optional
temperature bounds on experimental data to add. Default
297, 316 K
pressure : array, optional
pressure bounds on experimental data to add. Default
99, 102 kpa
output_ranges : array, optional
property bounds on experimental data to add. Default
200, 3000 (kg/m3 or kj/molK)
md_temperature : float, optional
temperature used to generate the md data. Default
298.15 K
md_pressure : float, optional
pressure used to generate the md data. Dfault
101.325 kPa
Returns
-------
newmodel : salt dev_model object
new_MD_data_index : int
start index of the newly incorporated MD data
Summary
-------
Create 4 lists from df: cation/anion smiles, cpt, density
Nans will be used for cation/anion name in the newmodel
output
"""
cpt = []
density = []
cation_smi = []
anion_smi = []
for i in range(df.shape[0]):
calculation = df["MD Calculation"][i]
cpt.append(re.findall("\d+\.\d+", calculation)[0])
density.append(re.findall("\d+\.\d+", calculation)[1])
cation_smi.append(df['Salt Smiles'][i].split(".")[0])
anion_smi.append(df['Salt Smiles'][i].split(".")[1])
module_path = dirname(__file__)
data = df
n = data.shape[0]
f = open(join(module_path, 'data', 'Deslist'), 'r')
Deslist = []
for line in f:
Deslist.append(line.strip('\n\t'))
calc = Calculator(Deslist)
D = len(Deslist)
d = len(Deslist) * 2 + 8
X = np.zeros((n, d))
X[:, -8] = md_temperature
X[:, -7] = md_pressure
for i in range(n):
cation = Chem.MolFromSmiles(cation_smi[i])
anion = Chem.MolFromSmiles(anion_smi[i])
X[i][:D] = calc.CalcDescriptors(cation)
X[i][D:2 * D] = calc.CalcDescriptors(anion)
X[:, -5] = density
X[:, -6] = cpt
cols_cat = [s + "-cation" for s in Deslist]
cols_ani = [s + "-anion" for s in Deslist]
cols = cols_cat + cols_ani + ["Temperature, K", "Pressure, kPa",
"Heat capacity at constant pressure,"
"J/K/mol",
"Specific density, kg/m<SUP>3</SUP>",
"name-anion", "smiles-anion", "name-cation",
"smiles-cation"]
X = pd.DataFrame(X, columns=cols)
X.iloc[:, -4] = np.nan
X.iloc[:, -2] = np.nan
X.iloc[:, -3] = anion_smi
X.iloc[:, -1] = cation_smi # X is the df with the new simulation data
new_MD_data_index = X.shape[0] # plot new predictions after re-training
devmodel = salty.aggregate_data(property_to_model, T=temperature,
P=pressure, data_ranges=output_ranges,
scale_center=False)
cols = devmodel.Data.columns
new_data = pd.concat([devmodel.Data, X]) # have to sort in future version
if property_to_model == ['density']:
prop = "Specific density, kg/m<SUP>3</SUP>"
to_drop = "Heat capacity at constant pressure, J/K/mol"
elif property_to_model == ['cpt']:
to_drop = "Specific density, kg/m<SUP>3</SUP>"
prop = "Heat capacity at constant pressure, J/K/mol"
elif property_to_model == ["cpt", "density"]:
prop = ["Heat capacity at constant pressure, J/K/mol",
"Specific density, kg/m<SUP>3</SUP>"]
if property_to_model != ["cpt", "density"]:
new_data.drop(columns=[to_drop], inplace=True)
new_data = new_data[cols]
new_data.reset_index(inplace=True, drop=True)
if property_to_model == ["cpt", "density"]:
exp_data = [prop[0], prop[1], "Temperature, K", "Pressure, kPa"]
else:
exp_data = [prop, "Temperature, K", "Pressure, kPa"]
merged = new_data
unique_salts = merged["smiles-cation"] + merged["smiles-anion"]
unique_cations = repr(merged["smiles-cation"].unique())
unique_anions = repr(merged["smiles-anion"].unique())
actual_data_ranges = []
for i in range(len(exp_data)):
actual_data_ranges.append("{} - {}".format(
str(merged[exp_data[i]].min()), str(merged[exp_data[i]].max())))
a = np.array([len(unique_salts.unique()), unique_cations, unique_anions,
len(unique_salts)])
a = np.concatenate((a, actual_data_ranges))
cols1 = ["Unique salts", "Cations", "Anions", "Total datapoints"]
cols = cols1 + exp_data
data_summary = pd.DataFrame(a, cols)
merged = new_data
metaDf = merged.select_dtypes(include=["object"])
dataDf = merged.select_dtypes(include=[np.number])
cols = dataDf.columns.tolist()
instance = StandardScaler()
for i in range(1, len(property_to_model) + 1):
dataDf.iloc[:, -i] = dataDf.iloc[:, -i].apply(lambda x: log(float(x)))
scaled_data = pd.DataFrame(instance.fit_transform(
dataDf.iloc[:, :-len(property_to_model)]),
columns=cols[:-len(property_to_model)])
df = pd.concat([scaled_data, dataDf.iloc[:, -len(property_to_model):],
metaDf],
axis=1) # may have to sort in future version
mean_std_of_coeffs = pd.DataFrame([instance.mean_, instance.scale_],
columns=cols[:-len(property_to_model)])
new_model = salty.dev_model(mean_std_of_coeffs, data_summary, df)
print(new_model.Data_summary)
return new_model, new_MD_data_index
def calculate_minimum_distances(data, x3, y3):
"""
calculates the minimum distance of x3,y3 from any
boundary of the convex hull
Parameters
----------
data : pandas DataFrame
2-column DataFrame comprising the convex hull
x3 : float
data point associated with the first column
y3 : float
data point associated with the second column
Returns
-------
minimum distance : float
percent distance from the nearest edge of the convex hull
"""
instance = MinMaxScaler(feature_range=(0.1, 0.9))
data = instance.fit_transform(data)
[[x3, y3]] = instance.transform([[x3, y3]])
hull = ConvexHull(data)
distances = []
for simplex_all in hull.simplices:
x1_a, x2_a = data[simplex_all, 0]
y1_a, y2_a = data[simplex_all, 1]
m_a = (y2_a - y1_a) / (x2_a - x1_a) # slope
b_a = y2_a - (x2_a * m_a) # intercept
distances.append(
float(abs(m_a * x3 - y3 + b_a)) / float(sqrt(m_a ** 2 + 1)))
new_hull = ConvexHull(
np.append(np.array([[x3, y3]]), data, axis=0))
if hull.area >= new_hull.area:
return (-np.min(distances))
else:
return (np.min(distances))
def gaussian_pdf(column):
x = column.values
x_d = np.linspace(min(x), max(x), 10000)
# instantiate and fit the KDE model
kde = KernelDensity(bandwidth=0.01, kernel='gaussian')
kde.fit(x[:, None])
# score_samples returns the log of the probability density
return kde.score_samples(x_d[:, None]), x_d
|
en
| 0.620833
|
creates new qspr models using md data Parameters ---------- df : pandas DataFrame salt_log data from the genetic algorithm. Contains the headers 'Salt Smiles' and 'MD Calculation'. Current support is only for cpt and density property_to_model : str current support is for 'cpt' or 'density' temperature : array, optional temperature bounds on experimental data to add. Default 297, 316 K pressure : array, optional pressure bounds on experimental data to add. Default 99, 102 kpa output_ranges : array, optional property bounds on experimental data to add. Default 200, 3000 (kg/m3 or kj/molK) md_temperature : float, optional temperature used to generate the md data. Default 298.15 K md_pressure : float, optional pressure used to generate the md data. Dfault 101.325 kPa Returns ------- newmodel : salt dev_model object new_MD_data_index : int start index of the newly incorporated MD data Summary ------- Create 4 lists from df: cation/anion smiles, cpt, density Nans will be used for cation/anion name in the newmodel output # X is the df with the new simulation data # plot new predictions after re-training # have to sort in future version # may have to sort in future version calculates the minimum distance of x3,y3 from any boundary of the convex hull Parameters ---------- data : pandas DataFrame 2-column DataFrame comprising the convex hull x3 : float data point associated with the first column y3 : float data point associated with the second column Returns ------- minimum distance : float percent distance from the nearest edge of the convex hull # slope # intercept # instantiate and fit the KDE model # score_samples returns the log of the probability density
| 2.559829
| 3
|
archeomagnetic_data/create_data_set.py
|
mingzhaochina/AH-RJMCMC
| 1
|
6626996
|
<reponame>mingzhaochina/AH-RJMCMC
import numpy as np
min_age = 1000.0
max_age = 2000.0
#
f = open('Lubeck.txt', 'r')
a=f.readline()
output = open('Lubeck_Paris700.txt', "w")
output.write('# Lubeck+Paris, age range ' + str(min_age)+ ' - ' + str(max_age) + ' Lat Long Age dt Int-Paris sd rep\n')
while 1:
a=f.readline()
if a=='': break
a=a.split()
a.append('\n')
if a[0] != "#":
if float(a[2]) <= max_age and float(a[2]) >= min_age:
output.writelines( " ".join(a) )
f.close()
f = open('Paris700.txt', 'r')
a=f.readline()
while 1:
a=f.readline()
if a=='': break
a=a.split()
a.append('\n')
if a[0] != "#":
if float(a[2]) <= max_age and float(a[2]) >= min_age:
output.writelines( " ".join(a) )
f.close()
output.close()
|
import numpy as np
min_age = 1000.0
max_age = 2000.0
#
f = open('Lubeck.txt', 'r')
a=f.readline()
output = open('Lubeck_Paris700.txt', "w")
output.write('# Lubeck+Paris, age range ' + str(min_age)+ ' - ' + str(max_age) + ' Lat Long Age dt Int-Paris sd rep\n')
while 1:
a=f.readline()
if a=='': break
a=a.split()
a.append('\n')
if a[0] != "#":
if float(a[2]) <= max_age and float(a[2]) >= min_age:
output.writelines( " ".join(a) )
f.close()
f = open('Paris700.txt', 'r')
a=f.readline()
while 1:
a=f.readline()
if a=='': break
a=a.split()
a.append('\n')
if a[0] != "#":
if float(a[2]) <= max_age and float(a[2]) >= min_age:
output.writelines( " ".join(a) )
f.close()
output.close()
|
none
| 1
| 3.087345
| 3
|
|
tests/test_murmurhash2.py
|
messense/murmurhash2-py
| 1
|
6626997
|
<reponame>messense/murmurhash2-py
# -*- coding: utf-8 -*-
import pytest
from murmurhash2 import murmurhash2, murmurhash3
SEED = 3242157231
@pytest.mark.parametrize(
"key, expected",
[
("", 3632506080),
("a", 455683869),
("ab", 2448092234),
("abc", 2066295634),
("abcd", 2588571162),
("abcde", 2988696942),
("abcdefghijklmnop", 2350868870),
],
)
def test_murmurhash2(key, expected):
assert murmurhash2(key.encode("utf-8"), SEED) == expected
@pytest.mark.parametrize(
"key, expected",
[
("", 36859204),
("a", 3144985375),
("ab", 3262304301),
("abc", 476091040),
("abcd", 412992581),
("abcde", 2747833956),
("abcdefghijklmnop", 2078305053),
],
)
def test_murmurhash3(key, expected):
assert murmurhash3(key.encode("utf-8"), SEED) == expected
|
# -*- coding: utf-8 -*-
import pytest
from murmurhash2 import murmurhash2, murmurhash3
SEED = 3242157231
@pytest.mark.parametrize(
"key, expected",
[
("", 3632506080),
("a", 455683869),
("ab", 2448092234),
("abc", 2066295634),
("abcd", 2588571162),
("abcde", 2988696942),
("abcdefghijklmnop", 2350868870),
],
)
def test_murmurhash2(key, expected):
assert murmurhash2(key.encode("utf-8"), SEED) == expected
@pytest.mark.parametrize(
"key, expected",
[
("", 36859204),
("a", 3144985375),
("ab", 3262304301),
("abc", 476091040),
("abcd", 412992581),
("abcde", 2747833956),
("abcdefghijklmnop", 2078305053),
],
)
def test_murmurhash3(key, expected):
assert murmurhash3(key.encode("utf-8"), SEED) == expected
|
en
| 0.769321
|
# -*- coding: utf-8 -*-
| 2.484738
| 2
|
succinctly/multi_class/crammer_singer.py
|
syncfusioncontent/Support-Vector-Machines-Succinctly
| 59
|
6626998
|
<reponame>syncfusioncontent/Support-Vector-Machines-Succinctly
from succinctly.multi_class import load_X, load_y
from sklearn.svm import LinearSVC
import numpy as np
X = load_X()
y = load_y()
clf = LinearSVC(C=1000, multi_class='crammer_singer')
clf.fit(X,y)
# Make predictions on two data points
X_to_predict = np.array([[5,5],[2,5]])
print(clf.predict(X_to_predict)) # prints [4 1]
|
from succinctly.multi_class import load_X, load_y
from sklearn.svm import LinearSVC
import numpy as np
X = load_X()
y = load_y()
clf = LinearSVC(C=1000, multi_class='crammer_singer')
clf.fit(X,y)
# Make predictions on two data points
X_to_predict = np.array([[5,5],[2,5]])
print(clf.predict(X_to_predict)) # prints [4 1]
|
en
| 0.727178
|
# Make predictions on two data points # prints [4 1]
| 3.018105
| 3
|
fuzzing_cli/fuzz/ide/truffle.py
|
ConsenSys/diligence-fuzzing
| 8
|
6626999
|
<filename>fuzzing_cli/fuzz/ide/truffle.py
import json
from os.path import abspath
from pathlib import Path
from subprocess import Popen, TimeoutExpired
from tempfile import TemporaryFile
from typing import Any, Dict, List
from fuzzing_cli.fuzz.exceptions import BuildArtifactsError
from fuzzing_cli.fuzz.ide.generic import IDEArtifacts, JobBuilder
from fuzzing_cli.util import LOGGER, sol_files_by_directory
class TruffleArtifacts(IDEArtifacts):
def __init__(self, project_dir: str, build_dir=None, targets=None):
self._include: List[str] = []
if targets:
include = []
for target in targets:
# targets could be specified using relative path. But sourcePath in truffle artifacts
# will use absolute paths, so we need to use absolute paths in targets as well
include.extend(
[abspath(file_path) for file_path in sol_files_by_directory(target)]
)
self._include = include
self._build_dir = build_dir or Path("./build/contracts")
build_files_by_source_file = self._get_build_artifacts(self._build_dir)
project_sources = self._get_project_sources(project_dir)
self._contracts, self._sources = self.fetch_data(
build_files_by_source_file, project_sources
)
def fetch_data(
self, build_files_by_source_file, project_sources: Dict[str, List[str]]
):
result_contracts = {}
result_sources = {}
for source_file, contracts in build_files_by_source_file.items():
if source_file not in self._include:
continue
result_contracts[source_file] = []
for contract in contracts:
# We get the build items from truffle and rename them into the properties used by the FaaS
try:
result_contracts[source_file] += [
{
"sourcePaths": {
i: k
for i, k in enumerate(
project_sources[contract["contractName"]]
)
},
"deployedSourceMap": contract["deployedSourceMap"],
"deployedBytecode": contract["deployedBytecode"],
"sourceMap": contract["sourceMap"],
"bytecode": contract["bytecode"],
"contractName": contract["contractName"],
"mainSourceFile": contract["sourcePath"],
}
]
except KeyError as e:
raise BuildArtifactsError(
f"Build artifact did not contain expected key. Contract: {contract}: \n{e}"
)
for file_index, source_file_dep in enumerate(
project_sources[contract["contractName"]]
):
if source_file_dep in result_sources.keys():
continue
if source_file_dep not in build_files_by_source_file:
LOGGER.debug(f"{source_file} not found.")
continue
# We can select any dict on the build_files_by_source_file[source_file] array
# because the .source and .ast values will be the same in all.
target_file = build_files_by_source_file[source_file_dep][0]
result_sources[source_file_dep] = {
"fileIndex": file_index,
"source": target_file["source"],
"ast": target_file["ast"],
}
return result_contracts, result_sources
@staticmethod
def query_truffle_db(query: str, project_dir: str) -> Dict[str, Any]:
try:
# here we're using the tempfile to overcome the subprocess.PIPE's buffer size limit (65536 bytes).
# This limit becomes a problem on a large sized output which will be truncated, resulting to an invalid json
with TemporaryFile() as stdout_file, TemporaryFile() as stderr_file:
with Popen(
["truffle", "db", "query", f"{query}"],
stdout=stdout_file,
stderr=stderr_file,
cwd=project_dir,
) as p:
p.communicate(timeout=3 * 60)
if stdout_file.tell() == 0:
error = ""
if stderr_file.tell() > 0:
stderr_file.seek(0)
error = f"\nError: {str(stderr_file.read())}"
raise BuildArtifactsError(
f'Empty response from the Truffle DB.\nQuery: "{query}"{error}'
)
stdout_file.seek(0)
result = json.load(stdout_file)
except BuildArtifactsError as e:
raise e
except TimeoutExpired:
raise BuildArtifactsError(f'Truffle DB query timeout.\nQuery: "{query}"')
except Exception as e:
raise BuildArtifactsError(
f'Truffle DB query error.\nQuery: "{query}"'
) from e
if not result.get("data"):
raise BuildArtifactsError(
f'"data" field is not found in the query result.\n Result: "{json.dumps(result)}".\nQuery: "{query}"'
)
return result.get("data")
@staticmethod
def _get_project_sources(project_dir: str) -> Dict[str, List[str]]:
result = TruffleArtifacts.query_truffle_db(
f'query {{ projectId(input: {{ directory: "{project_dir}" }}) }}',
project_dir,
)
project_id = result.get("projectId")
if not project_id:
raise BuildArtifactsError(
f'No project artifacts found. Path: "{project_dir}"'
)
result = TruffleArtifacts.query_truffle_db(
f"""
{{
project(id:"{project_id}") {{
contracts {{
name
compilation {{
processedSources {{
source {{
sourcePath
}}
}}
}}
}}
}}
}}
""",
project_dir,
)
contracts = {}
if not result.get("project") or not result["project"]["contracts"]:
raise BuildArtifactsError(
f'No project artifacts found. Path: "{project_dir}". Project ID "{project_id}"'
)
for contract in result["project"]["contracts"]:
contracts[contract["name"]] = list(
map(
lambda x: x["source"]["sourcePath"],
contract["compilation"]["processedSources"],
)
)
return contracts
@property
def contracts(self):
return self._contracts
@property
def sources(self):
return self._sources
class TruffleJob:
def __init__(self, project_dir: str, target: List[str], build_dir: Path):
artifacts = TruffleArtifacts(project_dir, build_dir, targets=target)
self._jb = JobBuilder(artifacts)
self.payload = None
def generate_payload(self):
self.payload = self._jb.payload()
|
<filename>fuzzing_cli/fuzz/ide/truffle.py
import json
from os.path import abspath
from pathlib import Path
from subprocess import Popen, TimeoutExpired
from tempfile import TemporaryFile
from typing import Any, Dict, List
from fuzzing_cli.fuzz.exceptions import BuildArtifactsError
from fuzzing_cli.fuzz.ide.generic import IDEArtifacts, JobBuilder
from fuzzing_cli.util import LOGGER, sol_files_by_directory
class TruffleArtifacts(IDEArtifacts):
def __init__(self, project_dir: str, build_dir=None, targets=None):
self._include: List[str] = []
if targets:
include = []
for target in targets:
# targets could be specified using relative path. But sourcePath in truffle artifacts
# will use absolute paths, so we need to use absolute paths in targets as well
include.extend(
[abspath(file_path) for file_path in sol_files_by_directory(target)]
)
self._include = include
self._build_dir = build_dir or Path("./build/contracts")
build_files_by_source_file = self._get_build_artifacts(self._build_dir)
project_sources = self._get_project_sources(project_dir)
self._contracts, self._sources = self.fetch_data(
build_files_by_source_file, project_sources
)
def fetch_data(
self, build_files_by_source_file, project_sources: Dict[str, List[str]]
):
result_contracts = {}
result_sources = {}
for source_file, contracts in build_files_by_source_file.items():
if source_file not in self._include:
continue
result_contracts[source_file] = []
for contract in contracts:
# We get the build items from truffle and rename them into the properties used by the FaaS
try:
result_contracts[source_file] += [
{
"sourcePaths": {
i: k
for i, k in enumerate(
project_sources[contract["contractName"]]
)
},
"deployedSourceMap": contract["deployedSourceMap"],
"deployedBytecode": contract["deployedBytecode"],
"sourceMap": contract["sourceMap"],
"bytecode": contract["bytecode"],
"contractName": contract["contractName"],
"mainSourceFile": contract["sourcePath"],
}
]
except KeyError as e:
raise BuildArtifactsError(
f"Build artifact did not contain expected key. Contract: {contract}: \n{e}"
)
for file_index, source_file_dep in enumerate(
project_sources[contract["contractName"]]
):
if source_file_dep in result_sources.keys():
continue
if source_file_dep not in build_files_by_source_file:
LOGGER.debug(f"{source_file} not found.")
continue
# We can select any dict on the build_files_by_source_file[source_file] array
# because the .source and .ast values will be the same in all.
target_file = build_files_by_source_file[source_file_dep][0]
result_sources[source_file_dep] = {
"fileIndex": file_index,
"source": target_file["source"],
"ast": target_file["ast"],
}
return result_contracts, result_sources
@staticmethod
def query_truffle_db(query: str, project_dir: str) -> Dict[str, Any]:
try:
# here we're using the tempfile to overcome the subprocess.PIPE's buffer size limit (65536 bytes).
# This limit becomes a problem on a large sized output which will be truncated, resulting to an invalid json
with TemporaryFile() as stdout_file, TemporaryFile() as stderr_file:
with Popen(
["truffle", "db", "query", f"{query}"],
stdout=stdout_file,
stderr=stderr_file,
cwd=project_dir,
) as p:
p.communicate(timeout=3 * 60)
if stdout_file.tell() == 0:
error = ""
if stderr_file.tell() > 0:
stderr_file.seek(0)
error = f"\nError: {str(stderr_file.read())}"
raise BuildArtifactsError(
f'Empty response from the Truffle DB.\nQuery: "{query}"{error}'
)
stdout_file.seek(0)
result = json.load(stdout_file)
except BuildArtifactsError as e:
raise e
except TimeoutExpired:
raise BuildArtifactsError(f'Truffle DB query timeout.\nQuery: "{query}"')
except Exception as e:
raise BuildArtifactsError(
f'Truffle DB query error.\nQuery: "{query}"'
) from e
if not result.get("data"):
raise BuildArtifactsError(
f'"data" field is not found in the query result.\n Result: "{json.dumps(result)}".\nQuery: "{query}"'
)
return result.get("data")
@staticmethod
def _get_project_sources(project_dir: str) -> Dict[str, List[str]]:
result = TruffleArtifacts.query_truffle_db(
f'query {{ projectId(input: {{ directory: "{project_dir}" }}) }}',
project_dir,
)
project_id = result.get("projectId")
if not project_id:
raise BuildArtifactsError(
f'No project artifacts found. Path: "{project_dir}"'
)
result = TruffleArtifacts.query_truffle_db(
f"""
{{
project(id:"{project_id}") {{
contracts {{
name
compilation {{
processedSources {{
source {{
sourcePath
}}
}}
}}
}}
}}
}}
""",
project_dir,
)
contracts = {}
if not result.get("project") or not result["project"]["contracts"]:
raise BuildArtifactsError(
f'No project artifacts found. Path: "{project_dir}". Project ID "{project_id}"'
)
for contract in result["project"]["contracts"]:
contracts[contract["name"]] = list(
map(
lambda x: x["source"]["sourcePath"],
contract["compilation"]["processedSources"],
)
)
return contracts
@property
def contracts(self):
return self._contracts
@property
def sources(self):
return self._sources
class TruffleJob:
def __init__(self, project_dir: str, target: List[str], build_dir: Path):
artifacts = TruffleArtifacts(project_dir, build_dir, targets=target)
self._jb = JobBuilder(artifacts)
self.payload = None
def generate_payload(self):
self.payload = self._jb.payload()
|
en
| 0.800508
|
# targets could be specified using relative path. But sourcePath in truffle artifacts # will use absolute paths, so we need to use absolute paths in targets as well # We get the build items from truffle and rename them into the properties used by the FaaS # We can select any dict on the build_files_by_source_file[source_file] array # because the .source and .ast values will be the same in all. # here we're using the tempfile to overcome the subprocess.PIPE's buffer size limit (65536 bytes). # This limit becomes a problem on a large sized output which will be truncated, resulting to an invalid json {{ project(id:"{project_id}") {{ contracts {{ name compilation {{ processedSources {{ source {{ sourcePath }} }} }} }} }} }}
| 2.069638
| 2
|
contact/views.py
|
robtagg/django-contact
| 0
|
6627000
|
from django.conf import settings
from django.shortcuts import redirect, render
from django.urls import reverse_lazy
from . import forms
def index(request):
"""
Main contact message view
"""
form = forms.ContactForm(data=request.POST or None)
if form.is_valid():
form.process(ip_address=request.META["REMOTE_ADDR"])
return redirect(
reverse_lazy(getattr(settings, "CONTACT_SUCCESS_URL", "contact:index")) + "?thanks"
)
return render(
request, "contact/contact.html", {"form": form, "thanks": "thanks" in request.GET}
)
|
from django.conf import settings
from django.shortcuts import redirect, render
from django.urls import reverse_lazy
from . import forms
def index(request):
"""
Main contact message view
"""
form = forms.ContactForm(data=request.POST or None)
if form.is_valid():
form.process(ip_address=request.META["REMOTE_ADDR"])
return redirect(
reverse_lazy(getattr(settings, "CONTACT_SUCCESS_URL", "contact:index")) + "?thanks"
)
return render(
request, "contact/contact.html", {"form": form, "thanks": "thanks" in request.GET}
)
|
en
| 0.441959
|
Main contact message view
| 1.96772
| 2
|
participant/queryset.py
|
Arpit8081/Phishtray_Edited_Version
| 2
|
6627001
|
from django.apps import apps
from django.db.models import QuerySet
from users.models import User
class ParticipantQuerySet(QuerySet):
def filter_by_user(self, user):
from .models import Organization
if not user or not isinstance(user, User):
return self.none()
if not user.is_superuser:
return self.filter(
organization__in=Organization.objects.filter_by_user(user=user)
)
return self
class OrganizationQuerySet(QuerySet):
def filter_by_user(self, user):
User = apps.get_model('users', 'User')
if not user or not isinstance(user, User):
return self.none()
if user.is_superuser:
return self
return self.filter(id=user.organization_id)
|
from django.apps import apps
from django.db.models import QuerySet
from users.models import User
class ParticipantQuerySet(QuerySet):
def filter_by_user(self, user):
from .models import Organization
if not user or not isinstance(user, User):
return self.none()
if not user.is_superuser:
return self.filter(
organization__in=Organization.objects.filter_by_user(user=user)
)
return self
class OrganizationQuerySet(QuerySet):
def filter_by_user(self, user):
User = apps.get_model('users', 'User')
if not user or not isinstance(user, User):
return self.none()
if user.is_superuser:
return self
return self.filter(id=user.organization_id)
|
none
| 1
| 2.233503
| 2
|
|
examples/amplpy/simplest_examples/netflow.py
|
adampkehoe/ticdat
| 15
|
6627002
|
# Simplest multi-commodity flow example using amplpy and ticdat
from ticdat import PanDatFactory, standard_main
try: # if you don't have amplpy installed, the code will still load and then fail on solve
from amplpy import AMPL
except:
AMPL = None
input_schema = PanDatFactory (
commodities=[["Name"], ["Volume"]],
nodes=[["Name"], []],
arcs=[["Source", "Destination"] ,["Capacity"]],
cost=[["Commodity", "Source", "Destination"], ["Cost"]],
inflow=[["Commodity", "Node"], ["Quantity"]]
)
solution_schema = PanDatFactory(
flow=[["Commodity", "Source", "Destination"], ["Quantity"]],
parameters=[["Parameter"],["Value"]])
def solve(dat):
ampl = AMPL()
ampl.setOption('solver', 'gurobi')
ampl = AMPL()
ampl.setOption('solver', 'gurobi')
ampl.eval("""
set NODES;
set ARCS within {i in NODES, j in NODES: i <> j};
set COMMODITIES;
param volume {COMMODITIES} > 0, < Infinity;
param capacity {ARCS} >= 0;
param cost {COMMODITIES,ARCS} >= 0, < Infinity;
param inflow {COMMODITIES,NODES} > -Infinity, < Infinity;
var Flow {COMMODITIES,ARCS} >= 0;
minimize TotalCost:
sum {h in COMMODITIES, (i,j) in ARCS} cost[h,i,j] * Flow[h,i,j];
subject to Capacity {(i,j) in ARCS}:
sum {h in COMMODITIES} Flow[h,i,j] * volume[h] <= capacity[i,j];
subject to Conservation {h in COMMODITIES, j in NODES}:
sum {(i,j) in ARCS} Flow[h,i,j] + inflow[h,j] = sum {(j,i) in ARCS} Flow[h,j,i];
""")
# copy the tables to amplpy.DataFrame objects, renaming the data fields as needed
dat = input_schema.copy_to_ampl(dat, field_renamings={("commodities", "Volume"): "volume",
("arcs", "Capacity"): "capacity", ("cost", "Cost"): "cost", ("inflow", "Quantity"): "inflow"})
# load the amplpy.DataFrame objects into the AMPL model, explicitly identifying how to populate the AMPL sets
input_schema.set_ampl_data(dat, ampl, {"nodes": "NODES", "arcs": "ARCS",
"commodities": "COMMODITIES"})
ampl.solve()
if ampl.getValue("solve_result") != "infeasible":
# solution tables are populated by mapping solution (table, field) to AMPL variable
sln = solution_schema.copy_from_ampl_variables(
{('flow' ,'Quantity'):ampl.getVariable("Flow")})
# append the solution KPI results to the solution parameters table
sln.parameters.loc[0] = ['Total Cost', ampl.getObjective('TotalCost').value()]
return sln
# when run from the command line, will read/write xls/csv/json/db files
if __name__ == "__main__":
standard_main(input_schema, solution_schema, solve)
|
# Simplest multi-commodity flow example using amplpy and ticdat
from ticdat import PanDatFactory, standard_main
try: # if you don't have amplpy installed, the code will still load and then fail on solve
from amplpy import AMPL
except:
AMPL = None
input_schema = PanDatFactory (
commodities=[["Name"], ["Volume"]],
nodes=[["Name"], []],
arcs=[["Source", "Destination"] ,["Capacity"]],
cost=[["Commodity", "Source", "Destination"], ["Cost"]],
inflow=[["Commodity", "Node"], ["Quantity"]]
)
solution_schema = PanDatFactory(
flow=[["Commodity", "Source", "Destination"], ["Quantity"]],
parameters=[["Parameter"],["Value"]])
def solve(dat):
ampl = AMPL()
ampl.setOption('solver', 'gurobi')
ampl = AMPL()
ampl.setOption('solver', 'gurobi')
ampl.eval("""
set NODES;
set ARCS within {i in NODES, j in NODES: i <> j};
set COMMODITIES;
param volume {COMMODITIES} > 0, < Infinity;
param capacity {ARCS} >= 0;
param cost {COMMODITIES,ARCS} >= 0, < Infinity;
param inflow {COMMODITIES,NODES} > -Infinity, < Infinity;
var Flow {COMMODITIES,ARCS} >= 0;
minimize TotalCost:
sum {h in COMMODITIES, (i,j) in ARCS} cost[h,i,j] * Flow[h,i,j];
subject to Capacity {(i,j) in ARCS}:
sum {h in COMMODITIES} Flow[h,i,j] * volume[h] <= capacity[i,j];
subject to Conservation {h in COMMODITIES, j in NODES}:
sum {(i,j) in ARCS} Flow[h,i,j] + inflow[h,j] = sum {(j,i) in ARCS} Flow[h,j,i];
""")
# copy the tables to amplpy.DataFrame objects, renaming the data fields as needed
dat = input_schema.copy_to_ampl(dat, field_renamings={("commodities", "Volume"): "volume",
("arcs", "Capacity"): "capacity", ("cost", "Cost"): "cost", ("inflow", "Quantity"): "inflow"})
# load the amplpy.DataFrame objects into the AMPL model, explicitly identifying how to populate the AMPL sets
input_schema.set_ampl_data(dat, ampl, {"nodes": "NODES", "arcs": "ARCS",
"commodities": "COMMODITIES"})
ampl.solve()
if ampl.getValue("solve_result") != "infeasible":
# solution tables are populated by mapping solution (table, field) to AMPL variable
sln = solution_schema.copy_from_ampl_variables(
{('flow' ,'Quantity'):ampl.getVariable("Flow")})
# append the solution KPI results to the solution parameters table
sln.parameters.loc[0] = ['Total Cost', ampl.getObjective('TotalCost').value()]
return sln
# when run from the command line, will read/write xls/csv/json/db files
if __name__ == "__main__":
standard_main(input_schema, solution_schema, solve)
|
en
| 0.748584
|
# Simplest multi-commodity flow example using amplpy and ticdat # if you don't have amplpy installed, the code will still load and then fail on solve set NODES; set ARCS within {i in NODES, j in NODES: i <> j}; set COMMODITIES; param volume {COMMODITIES} > 0, < Infinity; param capacity {ARCS} >= 0; param cost {COMMODITIES,ARCS} >= 0, < Infinity; param inflow {COMMODITIES,NODES} > -Infinity, < Infinity; var Flow {COMMODITIES,ARCS} >= 0; minimize TotalCost: sum {h in COMMODITIES, (i,j) in ARCS} cost[h,i,j] * Flow[h,i,j]; subject to Capacity {(i,j) in ARCS}: sum {h in COMMODITIES} Flow[h,i,j] * volume[h] <= capacity[i,j]; subject to Conservation {h in COMMODITIES, j in NODES}: sum {(i,j) in ARCS} Flow[h,i,j] + inflow[h,j] = sum {(j,i) in ARCS} Flow[h,j,i]; # copy the tables to amplpy.DataFrame objects, renaming the data fields as needed # load the amplpy.DataFrame objects into the AMPL model, explicitly identifying how to populate the AMPL sets # solution tables are populated by mapping solution (table, field) to AMPL variable # append the solution KPI results to the solution parameters table # when run from the command line, will read/write xls/csv/json/db files
| 2.764125
| 3
|
eval_retrieval_video.py
|
MikeWangWZHL/BLIP
| 473
|
6627003
|
<reponame>MikeWangWZHL/BLIP
'''
* Copyright (c) 2022, salesforce.com, inc.
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
* By <NAME>
'''
import argparse
import os
import ruamel_yaml as yaml
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from torch.utils.data import DataLoader
from models.blip_retrieval import blip_retrieval
import utils
from data.video_dataset import VideoDataset
@torch.no_grad()
def evaluation(model, data_loader, tokenizer, device, config):
# test
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Evaluation:'
print('Computing features for evaluation...')
start_time = time.time()
texts = data_loader.dataset.text
num_text = len(texts)
text_bs = 256
text_ids = []
text_embeds = []
text_atts = []
for i in range(0, num_text, text_bs):
text = texts[i: min(num_text, i+text_bs)]
text_input = tokenizer(text, padding='max_length', truncation=True, max_length=35, return_tensors="pt").to(device)
text_output = model.text_encoder(text_input.input_ids, attention_mask = text_input.attention_mask, mode='text')
text_embed = F.normalize(model.text_proj(text_output.last_hidden_state[:,0,:]))
text_embeds.append(text_embed)
text_ids.append(text_input.input_ids)
text_atts.append(text_input.attention_mask)
text_embeds = torch.cat(text_embeds,dim=0)
text_ids = torch.cat(text_ids,dim=0)
text_atts = torch.cat(text_atts,dim=0)
text_ids[:,0] = tokenizer.additional_special_tokens_ids[0]
video_feats = []
video_embeds = []
for video, video_id in data_loader:
B,N,C,W,H = video.size()
video = video.view(-1,C,W,H)
video = video.to(device,non_blocking=True)
video_feat = model.visual_encoder(video)
video_embed = model.vision_proj(video_feat[:,0,:])
video_embed = video_embed.view(B,N,-1).mean(dim=1)
video_embed = F.normalize(video_embed,dim=-1)
video_feat = video_feat.view(B,-1,video_feat.shape[-1])
video_feats.append(video_feat.cpu())
video_embeds.append(video_embed)
video_feats = torch.cat(video_feats,dim=0)
video_embeds = torch.cat(video_embeds,dim=0)
sims_matrix = video_embeds @ text_embeds.t()
score_matrix_v2t = torch.full((len(texts),len(texts)),-100.0).to(device)
num_tasks = utils.get_world_size()
rank = utils.get_rank()
step = sims_matrix.size(0)//num_tasks + 1
start = rank*step
end = min(sims_matrix.size(0),start+step)
for i,sims in enumerate(metric_logger.log_every(sims_matrix[start:end], 50, header)):
topk_sim, topk_idx = sims.topk(k=config['k_test'], dim=0)
encoder_output = video_feats[start+i].repeat(config['k_test'],1,1).to(device,non_blocking=True)
encoder_att = torch.ones(encoder_output.size()[:-1],dtype=torch.long).to(device,non_blocking=True)
output = model.text_encoder(text_ids[topk_idx],
attention_mask = text_atts[topk_idx],
encoder_hidden_states = encoder_output,
encoder_attention_mask = encoder_att,
return_dict = True,
)
score = model.itm_head(output.last_hidden_state[:,0,:])[:,1]
score_matrix_v2t[start+i,topk_idx] = score + topk_sim
sims_matrix = sims_matrix.t()
score_matrix_t2v = torch.full((len(texts),len(texts)),-100.0).to(device)
step = sims_matrix.size(0)//num_tasks + 1
start = rank*step
end = min(sims_matrix.size(0),start+step)
for i,sims in enumerate(metric_logger.log_every(sims_matrix[start:end], 50, header)):
topk_sim, topk_idx = sims.topk(k=config['k_test'], dim=0)
encoder_output = video_feats[topk_idx].to(device,non_blocking=True)
encoder_att = torch.ones(encoder_output.size()[:-1],dtype=torch.long).to(device,non_blocking=True)
output = model.text_encoder(text_ids[start+i].repeat(config['k_test'],1),
attention_mask = text_atts[start+i].repeat(config['k_test'],1),
encoder_hidden_states = encoder_output,
encoder_attention_mask = encoder_att,
return_dict = True,
)
score = model.itm_head(output.last_hidden_state[:,0,:])[:,1]
score_matrix_t2v[start+i,topk_idx] = score + topk_sim
if args.distributed:
dist.barrier()
torch.distributed.all_reduce(score_matrix_v2t, op=torch.distributed.ReduceOp.SUM)
torch.distributed.all_reduce(score_matrix_t2v, op=torch.distributed.ReduceOp.SUM)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Evaluation time {}'.format(total_time_str))
return score_matrix_v2t.cpu().numpy(), score_matrix_t2v.cpu().numpy()
@torch.no_grad()
def itm_eval(scores_v2t, scores_t2v, txt2vmg, vid2txt):
#Video->Text
ranks = np.zeros(scores_v2t.shape[0])
for index,score in enumerate(scores_v2t):
inds = np.argsort(score)[::-1]
ranks[index] = np.where(inds == vid2txt[index])[0][0]
# Compute metrics
tr1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
tr5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
tr10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
#Text->Video
ranks = np.zeros(scores_t2v.shape[0])
for index,score in enumerate(scores_t2v):
inds = np.argsort(score)[::-1]
ranks[index] = np.where(inds == txt2vmg[index])[0][0]
mdR = np.median(ranks+1)
# Compute metrics
vr1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
vr5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
vr10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
tr_mean = (tr1 + tr5 + tr10) / 3
vr_mean = (vr1 + vr5 + vr10) / 3
r_mean = (tr_mean + vr_mean) / 2
eval_result = {'txt_r1': tr1,
'txt_r5': tr5,
'txt_r10': tr10,
'txt_r_mean': tr_mean,
'vid_r1': vr1,
'vid_r5': vr5,
'vid_r10': vr10,
'vid_r_mean': vr_mean,
'vid_mdR': mdR,
'r_mean': r_mean}
return eval_result
def main(args, config):
utils.init_distributed_mode(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = True
#### Dataset ####
print("Creating retrieval dataset")
test_dataset = VideoDataset(config['video_root'],config['ann_root'],num_frm=config['num_frm_test'],
max_img_size=config['image_size'], frm_sampling_strategy='uniform')
test_loader = DataLoader(
test_dataset,
batch_size=config['batch_size'],
num_workers=4,
pin_memory=True,
drop_last=False,
shuffle=False,
)
#### Model ####
print("Creating model")
model = blip_retrieval(pretrained=config['pretrained'], image_size=config['image_size'], vit=config['vit'])
model = model.to(device)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
score_v2t, score_t2v, = evaluation(model_without_ddp, test_loader, model_without_ddp.tokenizer, device, config)
if utils.is_main_process():
test_result = itm_eval(score_v2t, score_t2v, test_loader.dataset.txt2video, test_loader.dataset.video2txt)
print(test_result)
log_stats = {**{f'{k}': v for k, v in test_result.items()},}
with open(os.path.join(args.output_dir, "test_result.txt"),"a") as f:
f.write(json.dumps(log_stats) + "\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='./configs/retrieval_msrvtt.yaml')
parser.add_argument('--output_dir', default='output/Retrieval_msrvtt')
parser.add_argument('--device', default='cuda')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--distributed', default=True, type=bool)
args = parser.parse_args()
config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
yaml.dump(config, open(os.path.join(args.output_dir, 'config.yaml'), 'w'))
main(args, config)
|
'''
* Copyright (c) 2022, salesforce.com, inc.
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
* By <NAME>
'''
import argparse
import os
import ruamel_yaml as yaml
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from torch.utils.data import DataLoader
from models.blip_retrieval import blip_retrieval
import utils
from data.video_dataset import VideoDataset
@torch.no_grad()
def evaluation(model, data_loader, tokenizer, device, config):
# test
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Evaluation:'
print('Computing features for evaluation...')
start_time = time.time()
texts = data_loader.dataset.text
num_text = len(texts)
text_bs = 256
text_ids = []
text_embeds = []
text_atts = []
for i in range(0, num_text, text_bs):
text = texts[i: min(num_text, i+text_bs)]
text_input = tokenizer(text, padding='max_length', truncation=True, max_length=35, return_tensors="pt").to(device)
text_output = model.text_encoder(text_input.input_ids, attention_mask = text_input.attention_mask, mode='text')
text_embed = F.normalize(model.text_proj(text_output.last_hidden_state[:,0,:]))
text_embeds.append(text_embed)
text_ids.append(text_input.input_ids)
text_atts.append(text_input.attention_mask)
text_embeds = torch.cat(text_embeds,dim=0)
text_ids = torch.cat(text_ids,dim=0)
text_atts = torch.cat(text_atts,dim=0)
text_ids[:,0] = tokenizer.additional_special_tokens_ids[0]
video_feats = []
video_embeds = []
for video, video_id in data_loader:
B,N,C,W,H = video.size()
video = video.view(-1,C,W,H)
video = video.to(device,non_blocking=True)
video_feat = model.visual_encoder(video)
video_embed = model.vision_proj(video_feat[:,0,:])
video_embed = video_embed.view(B,N,-1).mean(dim=1)
video_embed = F.normalize(video_embed,dim=-1)
video_feat = video_feat.view(B,-1,video_feat.shape[-1])
video_feats.append(video_feat.cpu())
video_embeds.append(video_embed)
video_feats = torch.cat(video_feats,dim=0)
video_embeds = torch.cat(video_embeds,dim=0)
sims_matrix = video_embeds @ text_embeds.t()
score_matrix_v2t = torch.full((len(texts),len(texts)),-100.0).to(device)
num_tasks = utils.get_world_size()
rank = utils.get_rank()
step = sims_matrix.size(0)//num_tasks + 1
start = rank*step
end = min(sims_matrix.size(0),start+step)
for i,sims in enumerate(metric_logger.log_every(sims_matrix[start:end], 50, header)):
topk_sim, topk_idx = sims.topk(k=config['k_test'], dim=0)
encoder_output = video_feats[start+i].repeat(config['k_test'],1,1).to(device,non_blocking=True)
encoder_att = torch.ones(encoder_output.size()[:-1],dtype=torch.long).to(device,non_blocking=True)
output = model.text_encoder(text_ids[topk_idx],
attention_mask = text_atts[topk_idx],
encoder_hidden_states = encoder_output,
encoder_attention_mask = encoder_att,
return_dict = True,
)
score = model.itm_head(output.last_hidden_state[:,0,:])[:,1]
score_matrix_v2t[start+i,topk_idx] = score + topk_sim
sims_matrix = sims_matrix.t()
score_matrix_t2v = torch.full((len(texts),len(texts)),-100.0).to(device)
step = sims_matrix.size(0)//num_tasks + 1
start = rank*step
end = min(sims_matrix.size(0),start+step)
for i,sims in enumerate(metric_logger.log_every(sims_matrix[start:end], 50, header)):
topk_sim, topk_idx = sims.topk(k=config['k_test'], dim=0)
encoder_output = video_feats[topk_idx].to(device,non_blocking=True)
encoder_att = torch.ones(encoder_output.size()[:-1],dtype=torch.long).to(device,non_blocking=True)
output = model.text_encoder(text_ids[start+i].repeat(config['k_test'],1),
attention_mask = text_atts[start+i].repeat(config['k_test'],1),
encoder_hidden_states = encoder_output,
encoder_attention_mask = encoder_att,
return_dict = True,
)
score = model.itm_head(output.last_hidden_state[:,0,:])[:,1]
score_matrix_t2v[start+i,topk_idx] = score + topk_sim
if args.distributed:
dist.barrier()
torch.distributed.all_reduce(score_matrix_v2t, op=torch.distributed.ReduceOp.SUM)
torch.distributed.all_reduce(score_matrix_t2v, op=torch.distributed.ReduceOp.SUM)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Evaluation time {}'.format(total_time_str))
return score_matrix_v2t.cpu().numpy(), score_matrix_t2v.cpu().numpy()
@torch.no_grad()
def itm_eval(scores_v2t, scores_t2v, txt2vmg, vid2txt):
#Video->Text
ranks = np.zeros(scores_v2t.shape[0])
for index,score in enumerate(scores_v2t):
inds = np.argsort(score)[::-1]
ranks[index] = np.where(inds == vid2txt[index])[0][0]
# Compute metrics
tr1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
tr5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
tr10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
#Text->Video
ranks = np.zeros(scores_t2v.shape[0])
for index,score in enumerate(scores_t2v):
inds = np.argsort(score)[::-1]
ranks[index] = np.where(inds == txt2vmg[index])[0][0]
mdR = np.median(ranks+1)
# Compute metrics
vr1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
vr5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
vr10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
tr_mean = (tr1 + tr5 + tr10) / 3
vr_mean = (vr1 + vr5 + vr10) / 3
r_mean = (tr_mean + vr_mean) / 2
eval_result = {'txt_r1': tr1,
'txt_r5': tr5,
'txt_r10': tr10,
'txt_r_mean': tr_mean,
'vid_r1': vr1,
'vid_r5': vr5,
'vid_r10': vr10,
'vid_r_mean': vr_mean,
'vid_mdR': mdR,
'r_mean': r_mean}
return eval_result
def main(args, config):
utils.init_distributed_mode(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = True
#### Dataset ####
print("Creating retrieval dataset")
test_dataset = VideoDataset(config['video_root'],config['ann_root'],num_frm=config['num_frm_test'],
max_img_size=config['image_size'], frm_sampling_strategy='uniform')
test_loader = DataLoader(
test_dataset,
batch_size=config['batch_size'],
num_workers=4,
pin_memory=True,
drop_last=False,
shuffle=False,
)
#### Model ####
print("Creating model")
model = blip_retrieval(pretrained=config['pretrained'], image_size=config['image_size'], vit=config['vit'])
model = model.to(device)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
score_v2t, score_t2v, = evaluation(model_without_ddp, test_loader, model_without_ddp.tokenizer, device, config)
if utils.is_main_process():
test_result = itm_eval(score_v2t, score_t2v, test_loader.dataset.txt2video, test_loader.dataset.video2txt)
print(test_result)
log_stats = {**{f'{k}': v for k, v in test_result.items()},}
with open(os.path.join(args.output_dir, "test_result.txt"),"a") as f:
f.write(json.dumps(log_stats) + "\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='./configs/retrieval_msrvtt.yaml')
parser.add_argument('--output_dir', default='output/Retrieval_msrvtt')
parser.add_argument('--device', default='cuda')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--distributed', default=True, type=bool)
args = parser.parse_args()
config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
yaml.dump(config, open(os.path.join(args.output_dir, 'config.yaml'), 'w'))
main(args, config)
|
en
| 0.515081
|
* Copyright (c) 2022, salesforce.com, inc. * All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause * By <NAME> # test #Video->Text # Compute metrics #Text->Video # Compute metrics # fix the seed for reproducibility #### Dataset #### #### Model ####
| 1.950219
| 2
|
pype/hosts/maya/plugins/publish/validate_no_default_camera.py
|
simonebarbieri/pype
| 0
|
6627004
|
from maya import cmds
import pyblish.api
import pype.api
import pype.hosts.maya.api.action
class ValidateNoDefaultCameras(pyblish.api.InstancePlugin):
"""Ensure no default (startup) cameras are in the instance.
This might be unnecessary. In the past there were some issues with
referencing/importing files that contained the start up cameras overriding
settings when being loaded and sometimes being skipped.
"""
order = pype.api.ValidateContentsOrder
hosts = ['maya']
families = ['camera']
version = (0, 1, 0)
label = "No Default Cameras"
actions = [pype.hosts.maya.api.action.SelectInvalidAction]
@staticmethod
def get_invalid(instance):
cameras = cmds.ls(instance, type='camera', long=True)
return [cam for cam in cameras if
cmds.camera(cam, query=True, startupCamera=True)]
def process(self, instance):
"""Process all the cameras in the instance"""
invalid = self.get_invalid(instance)
assert not invalid, "Default cameras found: {0}".format(invalid)
|
from maya import cmds
import pyblish.api
import pype.api
import pype.hosts.maya.api.action
class ValidateNoDefaultCameras(pyblish.api.InstancePlugin):
"""Ensure no default (startup) cameras are in the instance.
This might be unnecessary. In the past there were some issues with
referencing/importing files that contained the start up cameras overriding
settings when being loaded and sometimes being skipped.
"""
order = pype.api.ValidateContentsOrder
hosts = ['maya']
families = ['camera']
version = (0, 1, 0)
label = "No Default Cameras"
actions = [pype.hosts.maya.api.action.SelectInvalidAction]
@staticmethod
def get_invalid(instance):
cameras = cmds.ls(instance, type='camera', long=True)
return [cam for cam in cameras if
cmds.camera(cam, query=True, startupCamera=True)]
def process(self, instance):
"""Process all the cameras in the instance"""
invalid = self.get_invalid(instance)
assert not invalid, "Default cameras found: {0}".format(invalid)
|
en
| 0.951596
|
Ensure no default (startup) cameras are in the instance. This might be unnecessary. In the past there were some issues with referencing/importing files that contained the start up cameras overriding settings when being loaded and sometimes being skipped. Process all the cameras in the instance
| 2.49577
| 2
|
frontmatter/default_handlers.py
|
notslang/python-frontmatter
| 0
|
6627005
|
<reponame>notslang/python-frontmatter
# -*- coding: utf-8 -*-
"""
.. testsetup:: handlers
import frontmatter
By default, ``frontmatter`` reads and writes YAML metadata. But maybe
you don't like YAML. Maybe enjoy writing metadata in JSON, or TOML, or
some other exotic markup not yet invented. For this, there are handlers.
This module includes handlers for YAML, JSON and TOML, as well as a
:py:class:`BaseHandler <frontmatter.default_handlers.BaseHandler>` that
outlines the basic API and can be subclassed to deal with new formats.
Handlers
--------
Handlers do most of the underlying work parsing and exporting front matter.
When you call :py:func:`frontmatter.loads <frontmatter.loads>`, frontmatter first needs to figure out the
best handler for the format you're using (YAML, JSON, TOML, etc), then call
methods to read or write metadata.
A handler needs to do four things:
- detect whether it can parse the given piece of text
- split front matter from content, returning both as a two-tuple
- parse front matter into a Python dictionary
- export a dictionary back into text
An example:
Calling :py:func:`frontmatter.load <frontmatter.load>` (or :py:func:`loads <frontmatter.loads>`)
with the ``handler`` argument tells frontmatter which handler to use.
The handler instance gets saved as an attribute on the returned post
object. By default, calling :py:func:`frontmatter.dumps <frontmatter.dumps>`
on the post will use the attached handler.
::
>>> import frontmatter
>>> from frontmatter.default_handlers import YAMLHandler, TOMLHandler
>>> post = frontmatter.load('tests/toml/hello-toml.md', handler=TOMLHandler())
>>> post.handler #doctest: +ELLIPSIS
<frontmatter.default_handlers.TOMLHandler object at 0x...>
>>> print(frontmatter.dumps(post)) # doctest: +SKIP
+++
test = "tester"
something = "else"
author = "bob"
+++
<BLANKLINE>
Title
=====
<BLANKLINE>
title2
------
<BLANKLINE>
Hello.
<BLANKLINE>
Just need three dashes
---
<BLANKLINE>
And this shouldn't break.
Passing a new handler to :py:func:`frontmatter.dumps <frontmatter.dumps>`
(or :py:func:`dump <frontmatter.dump>`) changes the export format:
::
>>> print(frontmatter.dumps(post, handler=YAMLHandler())) # doctest: +NORMALIZE_WHITESPACE
---
author: bob
something: else
test: tester
---
<BLANKLINE>
Title
=====
<BLANKLINE>
title2
------
<BLANKLINE>
Hello.
<BLANKLINE>
Just need three dashes
---
<BLANKLINE>
And this shouldn't break.
Changing the attached ``handler`` on a post has the same effect. Setting ``handler``
to ``None`` will default the post back to :py:class:`YAMLHandler <frontmatter.default_handlers.YAMLHandler>`.
These three variations will produce the same export:
::
# set YAML format when dumping, but the old handler attached
>>> t1 = frontmatter.dumps(post, handler=YAMLHandler())
>>> post.handler = YAMLHandler() # set a new handler, changing all future exports
>>> t2 = frontmatter.dumps(post)
>>> post.handler = None # remove handler, defaulting back to YAML
>>> t3 = frontmatter.dumps(post)
>>> t1 == t2 == t3
True
All handlers use the interface defined on ``BaseHandler``. Each handler needs to know how to:
- split metadata and content, based on a boundary pattern (``handler.split``)
- parse plain text metadata into a Python dictionary (``handler.load``)
- export a dictionary back into plain text (``handler.export``)
- format exported metadata and content into a single string (``handler.format``)
"""
import json
import re
import yaml
try:
from yaml import CSafeDumper as SafeDumper
from yaml import CSafeLoader as SafeLoader
except ImportError:
from yaml import SafeDumper
from yaml import SafeLoader
try:
import toml
except ImportError:
toml = None
from .util import u
__all__ = ["BaseHandler", "YAMLHandler", "JSONHandler"]
if toml:
__all__.append("TOMLHandler")
DEFAULT_POST_TEMPLATE = """\
{start_delimiter}
{metadata}
{end_delimiter}
{content}
"""
class BaseHandler:
"""
BaseHandler lays out all the steps to detecting, splitting, parsing and
exporting front matter metadata.
All default handlers are subclassed from BaseHandler.
"""
FM_BOUNDARY = None
START_DELIMITER = None
END_DELIMITER = None
def __init__(self, fm_boundary=None, start_delimiter=None, end_delimiter=None):
self.FM_BOUNDARY = fm_boundary or self.FM_BOUNDARY
self.START_DELIMITER = start_delimiter or self.START_DELIMITER
self.END_DELIMITER = end_delimiter or self.END_DELIMITER
if self.FM_BOUNDARY is None:
raise NotImplementedError(
"No frontmatter boundary defined. "
"Please set {}.FM_BOUNDARY to a regular expression".format(
self.__class__.__name__
)
)
def detect(self, text):
"""
Decide whether this handler can parse the given ``text``,
and return True or False.
Note that this is *not* called when passing a handler instance to
:py:func:`frontmatter.load <frontmatter.load>` or :py:func:`loads <frontmatter.loads>`.
"""
if self.FM_BOUNDARY.match(text):
return True
return False
def split(self, text):
"""
Split text into frontmatter and content
"""
_, fm, content = self.FM_BOUNDARY.split(text, 2)
return fm, content
def load(self, fm):
"""
Parse frontmatter and return a dict
"""
raise NotImplementedError
def export(self, metadata, **kwargs):
"""
Turn metadata back into text
"""
raise NotImplementedError
def format(self, post, **kwargs):
"""
Turn a post into a string, used in ``frontmatter.dumps``
"""
start_delimiter = kwargs.pop("start_delimiter", self.START_DELIMITER)
end_delimiter = kwargs.pop("end_delimiter", self.END_DELIMITER)
metadata = self.export(post.metadata, **kwargs)
return DEFAULT_POST_TEMPLATE.format(
metadata=metadata,
content=post.content,
start_delimiter=start_delimiter,
end_delimiter=end_delimiter,
).strip()
class YAMLHandler(BaseHandler):
"""
Load and export YAML metadata. By default, this handler uses YAML's
"safe" mode, though it's possible to override that.
"""
FM_BOUNDARY = re.compile(r"^-{3,}\s*$", re.MULTILINE)
START_DELIMITER = END_DELIMITER = "---"
def load(self, fm, **kwargs):
"""
Parse YAML front matter. This uses yaml.SafeLoader by default.
"""
kwargs.setdefault("Loader", SafeLoader)
return yaml.load(fm, **kwargs)
def export(self, metadata, **kwargs):
"""
Export metadata as YAML. This uses yaml.SafeDumper by default.
"""
kwargs.setdefault("Dumper", SafeDumper)
kwargs.setdefault("default_flow_style", False)
kwargs.setdefault("allow_unicode", True)
metadata = yaml.dump(metadata, **kwargs).strip()
return u(metadata) # ensure unicode
class JSONHandler(BaseHandler):
"""
Load and export JSON metadata.
Note that changing ``START_DELIMITER`` or ``END_DELIMITER`` may break JSON parsing.
"""
FM_BOUNDARY = re.compile(r"^(?:{|})$", re.MULTILINE)
START_DELIMITER = ""
END_DELIMITER = ""
def split(self, text):
_, fm, content = self.FM_BOUNDARY.split(text, 2)
return "{" + fm + "}", content
def load(self, fm, **kwargs):
return json.loads(fm, **kwargs)
def export(self, metadata, **kwargs):
"Turn metadata into JSON"
kwargs.setdefault("indent", 4)
metadata = json.dumps(metadata, **kwargs)
return u(metadata)
if toml:
class TOMLHandler(BaseHandler):
"""
Load and export TOML metadata.
By default, split based on ``+++``.
"""
FM_BOUNDARY = re.compile(r"^\+{3,}\s*$", re.MULTILINE)
START_DELIMITER = END_DELIMITER = "+++"
def load(self, fm, **kwargs):
return toml.loads(fm, **kwargs)
def export(self, metadata, **kwargs):
"Turn metadata into TOML"
metadata = toml.dumps(metadata)
return u(metadata)
else:
TOMLHandler = None
|
# -*- coding: utf-8 -*-
"""
.. testsetup:: handlers
import frontmatter
By default, ``frontmatter`` reads and writes YAML metadata. But maybe
you don't like YAML. Maybe enjoy writing metadata in JSON, or TOML, or
some other exotic markup not yet invented. For this, there are handlers.
This module includes handlers for YAML, JSON and TOML, as well as a
:py:class:`BaseHandler <frontmatter.default_handlers.BaseHandler>` that
outlines the basic API and can be subclassed to deal with new formats.
Handlers
--------
Handlers do most of the underlying work parsing and exporting front matter.
When you call :py:func:`frontmatter.loads <frontmatter.loads>`, frontmatter first needs to figure out the
best handler for the format you're using (YAML, JSON, TOML, etc), then call
methods to read or write metadata.
A handler needs to do four things:
- detect whether it can parse the given piece of text
- split front matter from content, returning both as a two-tuple
- parse front matter into a Python dictionary
- export a dictionary back into text
An example:
Calling :py:func:`frontmatter.load <frontmatter.load>` (or :py:func:`loads <frontmatter.loads>`)
with the ``handler`` argument tells frontmatter which handler to use.
The handler instance gets saved as an attribute on the returned post
object. By default, calling :py:func:`frontmatter.dumps <frontmatter.dumps>`
on the post will use the attached handler.
::
>>> import frontmatter
>>> from frontmatter.default_handlers import YAMLHandler, TOMLHandler
>>> post = frontmatter.load('tests/toml/hello-toml.md', handler=TOMLHandler())
>>> post.handler #doctest: +ELLIPSIS
<frontmatter.default_handlers.TOMLHandler object at 0x...>
>>> print(frontmatter.dumps(post)) # doctest: +SKIP
+++
test = "tester"
something = "else"
author = "bob"
+++
<BLANKLINE>
Title
=====
<BLANKLINE>
title2
------
<BLANKLINE>
Hello.
<BLANKLINE>
Just need three dashes
---
<BLANKLINE>
And this shouldn't break.
Passing a new handler to :py:func:`frontmatter.dumps <frontmatter.dumps>`
(or :py:func:`dump <frontmatter.dump>`) changes the export format:
::
>>> print(frontmatter.dumps(post, handler=YAMLHandler())) # doctest: +NORMALIZE_WHITESPACE
---
author: bob
something: else
test: tester
---
<BLANKLINE>
Title
=====
<BLANKLINE>
title2
------
<BLANKLINE>
Hello.
<BLANKLINE>
Just need three dashes
---
<BLANKLINE>
And this shouldn't break.
Changing the attached ``handler`` on a post has the same effect. Setting ``handler``
to ``None`` will default the post back to :py:class:`YAMLHandler <frontmatter.default_handlers.YAMLHandler>`.
These three variations will produce the same export:
::
# set YAML format when dumping, but the old handler attached
>>> t1 = frontmatter.dumps(post, handler=YAMLHandler())
>>> post.handler = YAMLHandler() # set a new handler, changing all future exports
>>> t2 = frontmatter.dumps(post)
>>> post.handler = None # remove handler, defaulting back to YAML
>>> t3 = frontmatter.dumps(post)
>>> t1 == t2 == t3
True
All handlers use the interface defined on ``BaseHandler``. Each handler needs to know how to:
- split metadata and content, based on a boundary pattern (``handler.split``)
- parse plain text metadata into a Python dictionary (``handler.load``)
- export a dictionary back into plain text (``handler.export``)
- format exported metadata and content into a single string (``handler.format``)
"""
import json
import re
import yaml
try:
from yaml import CSafeDumper as SafeDumper
from yaml import CSafeLoader as SafeLoader
except ImportError:
from yaml import SafeDumper
from yaml import SafeLoader
try:
import toml
except ImportError:
toml = None
from .util import u
__all__ = ["BaseHandler", "YAMLHandler", "JSONHandler"]
if toml:
__all__.append("TOMLHandler")
DEFAULT_POST_TEMPLATE = """\
{start_delimiter}
{metadata}
{end_delimiter}
{content}
"""
class BaseHandler:
"""
BaseHandler lays out all the steps to detecting, splitting, parsing and
exporting front matter metadata.
All default handlers are subclassed from BaseHandler.
"""
FM_BOUNDARY = None
START_DELIMITER = None
END_DELIMITER = None
def __init__(self, fm_boundary=None, start_delimiter=None, end_delimiter=None):
self.FM_BOUNDARY = fm_boundary or self.FM_BOUNDARY
self.START_DELIMITER = start_delimiter or self.START_DELIMITER
self.END_DELIMITER = end_delimiter or self.END_DELIMITER
if self.FM_BOUNDARY is None:
raise NotImplementedError(
"No frontmatter boundary defined. "
"Please set {}.FM_BOUNDARY to a regular expression".format(
self.__class__.__name__
)
)
def detect(self, text):
"""
Decide whether this handler can parse the given ``text``,
and return True or False.
Note that this is *not* called when passing a handler instance to
:py:func:`frontmatter.load <frontmatter.load>` or :py:func:`loads <frontmatter.loads>`.
"""
if self.FM_BOUNDARY.match(text):
return True
return False
def split(self, text):
"""
Split text into frontmatter and content
"""
_, fm, content = self.FM_BOUNDARY.split(text, 2)
return fm, content
def load(self, fm):
"""
Parse frontmatter and return a dict
"""
raise NotImplementedError
def export(self, metadata, **kwargs):
"""
Turn metadata back into text
"""
raise NotImplementedError
def format(self, post, **kwargs):
"""
Turn a post into a string, used in ``frontmatter.dumps``
"""
start_delimiter = kwargs.pop("start_delimiter", self.START_DELIMITER)
end_delimiter = kwargs.pop("end_delimiter", self.END_DELIMITER)
metadata = self.export(post.metadata, **kwargs)
return DEFAULT_POST_TEMPLATE.format(
metadata=metadata,
content=post.content,
start_delimiter=start_delimiter,
end_delimiter=end_delimiter,
).strip()
class YAMLHandler(BaseHandler):
"""
Load and export YAML metadata. By default, this handler uses YAML's
"safe" mode, though it's possible to override that.
"""
FM_BOUNDARY = re.compile(r"^-{3,}\s*$", re.MULTILINE)
START_DELIMITER = END_DELIMITER = "---"
def load(self, fm, **kwargs):
"""
Parse YAML front matter. This uses yaml.SafeLoader by default.
"""
kwargs.setdefault("Loader", SafeLoader)
return yaml.load(fm, **kwargs)
def export(self, metadata, **kwargs):
"""
Export metadata as YAML. This uses yaml.SafeDumper by default.
"""
kwargs.setdefault("Dumper", SafeDumper)
kwargs.setdefault("default_flow_style", False)
kwargs.setdefault("allow_unicode", True)
metadata = yaml.dump(metadata, **kwargs).strip()
return u(metadata) # ensure unicode
class JSONHandler(BaseHandler):
"""
Load and export JSON metadata.
Note that changing ``START_DELIMITER`` or ``END_DELIMITER`` may break JSON parsing.
"""
FM_BOUNDARY = re.compile(r"^(?:{|})$", re.MULTILINE)
START_DELIMITER = ""
END_DELIMITER = ""
def split(self, text):
_, fm, content = self.FM_BOUNDARY.split(text, 2)
return "{" + fm + "}", content
def load(self, fm, **kwargs):
return json.loads(fm, **kwargs)
def export(self, metadata, **kwargs):
"Turn metadata into JSON"
kwargs.setdefault("indent", 4)
metadata = json.dumps(metadata, **kwargs)
return u(metadata)
if toml:
class TOMLHandler(BaseHandler):
"""
Load and export TOML metadata.
By default, split based on ``+++``.
"""
FM_BOUNDARY = re.compile(r"^\+{3,}\s*$", re.MULTILINE)
START_DELIMITER = END_DELIMITER = "+++"
def load(self, fm, **kwargs):
return toml.loads(fm, **kwargs)
def export(self, metadata, **kwargs):
"Turn metadata into TOML"
metadata = toml.dumps(metadata)
return u(metadata)
else:
TOMLHandler = None
|
en
| 0.629243
|
# -*- coding: utf-8 -*- .. testsetup:: handlers import frontmatter By default, ``frontmatter`` reads and writes YAML metadata. But maybe you don't like YAML. Maybe enjoy writing metadata in JSON, or TOML, or some other exotic markup not yet invented. For this, there are handlers. This module includes handlers for YAML, JSON and TOML, as well as a :py:class:`BaseHandler <frontmatter.default_handlers.BaseHandler>` that outlines the basic API and can be subclassed to deal with new formats. Handlers -------- Handlers do most of the underlying work parsing and exporting front matter. When you call :py:func:`frontmatter.loads <frontmatter.loads>`, frontmatter first needs to figure out the best handler for the format you're using (YAML, JSON, TOML, etc), then call methods to read or write metadata. A handler needs to do four things: - detect whether it can parse the given piece of text - split front matter from content, returning both as a two-tuple - parse front matter into a Python dictionary - export a dictionary back into text An example: Calling :py:func:`frontmatter.load <frontmatter.load>` (or :py:func:`loads <frontmatter.loads>`) with the ``handler`` argument tells frontmatter which handler to use. The handler instance gets saved as an attribute on the returned post object. By default, calling :py:func:`frontmatter.dumps <frontmatter.dumps>` on the post will use the attached handler. :: >>> import frontmatter >>> from frontmatter.default_handlers import YAMLHandler, TOMLHandler >>> post = frontmatter.load('tests/toml/hello-toml.md', handler=TOMLHandler()) >>> post.handler #doctest: +ELLIPSIS <frontmatter.default_handlers.TOMLHandler object at 0x...> >>> print(frontmatter.dumps(post)) # doctest: +SKIP +++ test = "tester" something = "else" author = "bob" +++ <BLANKLINE> Title ===== <BLANKLINE> title2 ------ <BLANKLINE> Hello. <BLANKLINE> Just need three dashes --- <BLANKLINE> And this shouldn't break. Passing a new handler to :py:func:`frontmatter.dumps <frontmatter.dumps>` (or :py:func:`dump <frontmatter.dump>`) changes the export format: :: >>> print(frontmatter.dumps(post, handler=YAMLHandler())) # doctest: +NORMALIZE_WHITESPACE --- author: bob something: else test: tester --- <BLANKLINE> Title ===== <BLANKLINE> title2 ------ <BLANKLINE> Hello. <BLANKLINE> Just need three dashes --- <BLANKLINE> And this shouldn't break. Changing the attached ``handler`` on a post has the same effect. Setting ``handler`` to ``None`` will default the post back to :py:class:`YAMLHandler <frontmatter.default_handlers.YAMLHandler>`. These three variations will produce the same export: :: # set YAML format when dumping, but the old handler attached >>> t1 = frontmatter.dumps(post, handler=YAMLHandler()) >>> post.handler = YAMLHandler() # set a new handler, changing all future exports >>> t2 = frontmatter.dumps(post) >>> post.handler = None # remove handler, defaulting back to YAML >>> t3 = frontmatter.dumps(post) >>> t1 == t2 == t3 True All handlers use the interface defined on ``BaseHandler``. Each handler needs to know how to: - split metadata and content, based on a boundary pattern (``handler.split``) - parse plain text metadata into a Python dictionary (``handler.load``) - export a dictionary back into plain text (``handler.export``) - format exported metadata and content into a single string (``handler.format``) \ {start_delimiter} {metadata} {end_delimiter} {content} BaseHandler lays out all the steps to detecting, splitting, parsing and exporting front matter metadata. All default handlers are subclassed from BaseHandler. Decide whether this handler can parse the given ``text``, and return True or False. Note that this is *not* called when passing a handler instance to :py:func:`frontmatter.load <frontmatter.load>` or :py:func:`loads <frontmatter.loads>`. Split text into frontmatter and content Parse frontmatter and return a dict Turn metadata back into text Turn a post into a string, used in ``frontmatter.dumps`` Load and export YAML metadata. By default, this handler uses YAML's "safe" mode, though it's possible to override that. Parse YAML front matter. This uses yaml.SafeLoader by default. Export metadata as YAML. This uses yaml.SafeDumper by default. # ensure unicode Load and export JSON metadata. Note that changing ``START_DELIMITER`` or ``END_DELIMITER`` may break JSON parsing. Load and export TOML metadata. By default, split based on ``+++``.
| 2.718309
| 3
|
hyperbo/gp_utils/slice_sampling_test.py
|
google-research/hyperbo
| 3
|
6627006
|
<filename>hyperbo/gp_utils/slice_sampling_test.py
# coding=utf-8
# Copyright 2022 HyperBO Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test gp.py.
Use the following to debug nan.
from jax.config import config
config.update('jax_debug_nans', True)
"""
import logging
import time
from absl.testing import absltest
from absl.testing import parameterized
from hyperbo.basics import definitions as defs
from hyperbo.basics import params_utils
from hyperbo.gp_utils import basis_functions as bf
from hyperbo.gp_utils import gp
from hyperbo.gp_utils import kernel
from hyperbo.gp_utils import mean
from hyperbo.gp_utils import priors
from hyperbo.gp_utils import utils
import jax
import jax.numpy as jnp
DEFAULT_WARP_FUNC = utils.DEFAULT_WARP_FUNC
GPParams = defs.GPParams
retrieve_params = params_utils.retrieve_params
class GPTest(parameterized.TestCase):
"""Tests for slice sampling method in gp.py."""
@parameterized.named_parameters(
('squared_exponential', kernel.squared_exponential),
('matern32', kernel.matern32),
('matern52', kernel.matern52),
('matern32_mlp', kernel.matern32_mlp),
('matern52_mlp', kernel.matern52_mlp),
('squared_exponential_mlp', kernel.squared_exponential_mlp),
('dot_product_mlp', kernel.dot_product_mlp),
)
def test_slice_sampling(self, cov_func):
"""Test that GP parameters can be inferred correctly."""
key = jax.random.PRNGKey(0)
key, init_key = jax.random.split(key)
n, nq = 6, 3
vx = jax.random.normal(key, (n, 2))
key, _ = jax.random.split(key)
qx = jax.random.normal(key, (nq, 2))
params = GPParams(
model={
'constant': 5.,
'lengthscale': .1,
'signal_variance': 1.0,
'noise_variance': 0.01,
})
if cov_func in [
kernel.squared_exponential_mlp, kernel.matern32_mlp, kernel.matern52_mlp
]:
params.config['mlp_features'] = (8,)
key, _ = jax.random.split(key)
bf.init_mlp_with_shape(key, params, vx.shape)
elif cov_func == kernel.dot_product_mlp:
key, _ = jax.random.split(key)
params.model['dot_prod_sigma'] = jax.random.normal(key, (8, 8 * 2))
params.model['dot_prod_bias'] = 0.
params.config['mlp_features'] = (8,)
key, _ = jax.random.split(key)
bf.init_mlp_with_shape(key, params, vx.shape)
mean_func = mean.constant
logging.info(msg=f'params = {params}')
def sample_from_gp(seed):
return gp.sample_from_gp(
jax.random.PRNGKey(seed), mean_func, cov_func, params, vx)
dataset = [(vx, sample_from_gp(i)) for i in range(10)]
# Minimize sample_mean_cov_regularizer.
nsamples = 1
init_params = GPParams(
model={
'constant': 5.1,
'lengthscale': jnp.array([0., 0.]),
'signal_variance': 0.,
'noise_variance': -4.
},
config={
'method': 'slice_sample',
'burnin': nsamples,
'nsamples': nsamples,
'maxiter': 0,
'logging_interval': 1,
'priors': priors.DEFAULT_PRIORS,
'mlp_features': (8,),
'batch_size': 100,
})
init_key, _ = jax.random.split(init_key)
# bf.init_kumar_warp_with_shape(init_key, init_params, vx.shape)
if cov_func in [
kernel.squared_exponential_mlp, kernel.matern32_mlp, kernel.matern52_mlp
]:
init_params.model['lengthscale'] = jnp.array([0.] * 8)
elif cov_func == kernel.dot_product_mlp:
init_params.model['dot_prod_sigma'] = jax.random.normal(
init_key, (8, 8 * 2))
init_params.model['dot_prod_bias'] = 0.
warp_func = DEFAULT_WARP_FUNC
model = gp.HGP(
dataset=dataset,
mean_func=mean.linear_mlp,
cov_func=cov_func,
params=init_params,
warp_func=warp_func)
model.initialize_params(init_key)
init_nll, _, _ = model.stats()
start_time = time.time()
logging.info(msg=f'init_params={init_params}')
inferred_params = model.train()
logging.info(msg=f'Elapsed training time = {time.time() - start_time}')
inferred_nll, _, _ = model.stats()
keys = params.model.keys()
retrieved_inferred_params = dict(
zip(keys, retrieve_params(inferred_params, keys, warp_func=warp_func)))
logging.info(msg=f'params.model = {retrieved_inferred_params}')
self.assertGreater(init_nll, inferred_nll)
predictions = model.predict(qx, 0, True, True)
logging.info(msg=f'predictions = {predictions}')
self.assertLen(predictions, nsamples * 2)
for i in range(nsamples * 2):
self.assertEqual(predictions[i][0].shape, (nq, 1))
self.assertEqual(predictions[i][1].shape, (nq, nq))
if __name__ == '__main__':
absltest.main()
|
<filename>hyperbo/gp_utils/slice_sampling_test.py
# coding=utf-8
# Copyright 2022 HyperBO Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test gp.py.
Use the following to debug nan.
from jax.config import config
config.update('jax_debug_nans', True)
"""
import logging
import time
from absl.testing import absltest
from absl.testing import parameterized
from hyperbo.basics import definitions as defs
from hyperbo.basics import params_utils
from hyperbo.gp_utils import basis_functions as bf
from hyperbo.gp_utils import gp
from hyperbo.gp_utils import kernel
from hyperbo.gp_utils import mean
from hyperbo.gp_utils import priors
from hyperbo.gp_utils import utils
import jax
import jax.numpy as jnp
DEFAULT_WARP_FUNC = utils.DEFAULT_WARP_FUNC
GPParams = defs.GPParams
retrieve_params = params_utils.retrieve_params
class GPTest(parameterized.TestCase):
"""Tests for slice sampling method in gp.py."""
@parameterized.named_parameters(
('squared_exponential', kernel.squared_exponential),
('matern32', kernel.matern32),
('matern52', kernel.matern52),
('matern32_mlp', kernel.matern32_mlp),
('matern52_mlp', kernel.matern52_mlp),
('squared_exponential_mlp', kernel.squared_exponential_mlp),
('dot_product_mlp', kernel.dot_product_mlp),
)
def test_slice_sampling(self, cov_func):
"""Test that GP parameters can be inferred correctly."""
key = jax.random.PRNGKey(0)
key, init_key = jax.random.split(key)
n, nq = 6, 3
vx = jax.random.normal(key, (n, 2))
key, _ = jax.random.split(key)
qx = jax.random.normal(key, (nq, 2))
params = GPParams(
model={
'constant': 5.,
'lengthscale': .1,
'signal_variance': 1.0,
'noise_variance': 0.01,
})
if cov_func in [
kernel.squared_exponential_mlp, kernel.matern32_mlp, kernel.matern52_mlp
]:
params.config['mlp_features'] = (8,)
key, _ = jax.random.split(key)
bf.init_mlp_with_shape(key, params, vx.shape)
elif cov_func == kernel.dot_product_mlp:
key, _ = jax.random.split(key)
params.model['dot_prod_sigma'] = jax.random.normal(key, (8, 8 * 2))
params.model['dot_prod_bias'] = 0.
params.config['mlp_features'] = (8,)
key, _ = jax.random.split(key)
bf.init_mlp_with_shape(key, params, vx.shape)
mean_func = mean.constant
logging.info(msg=f'params = {params}')
def sample_from_gp(seed):
return gp.sample_from_gp(
jax.random.PRNGKey(seed), mean_func, cov_func, params, vx)
dataset = [(vx, sample_from_gp(i)) for i in range(10)]
# Minimize sample_mean_cov_regularizer.
nsamples = 1
init_params = GPParams(
model={
'constant': 5.1,
'lengthscale': jnp.array([0., 0.]),
'signal_variance': 0.,
'noise_variance': -4.
},
config={
'method': 'slice_sample',
'burnin': nsamples,
'nsamples': nsamples,
'maxiter': 0,
'logging_interval': 1,
'priors': priors.DEFAULT_PRIORS,
'mlp_features': (8,),
'batch_size': 100,
})
init_key, _ = jax.random.split(init_key)
# bf.init_kumar_warp_with_shape(init_key, init_params, vx.shape)
if cov_func in [
kernel.squared_exponential_mlp, kernel.matern32_mlp, kernel.matern52_mlp
]:
init_params.model['lengthscale'] = jnp.array([0.] * 8)
elif cov_func == kernel.dot_product_mlp:
init_params.model['dot_prod_sigma'] = jax.random.normal(
init_key, (8, 8 * 2))
init_params.model['dot_prod_bias'] = 0.
warp_func = DEFAULT_WARP_FUNC
model = gp.HGP(
dataset=dataset,
mean_func=mean.linear_mlp,
cov_func=cov_func,
params=init_params,
warp_func=warp_func)
model.initialize_params(init_key)
init_nll, _, _ = model.stats()
start_time = time.time()
logging.info(msg=f'init_params={init_params}')
inferred_params = model.train()
logging.info(msg=f'Elapsed training time = {time.time() - start_time}')
inferred_nll, _, _ = model.stats()
keys = params.model.keys()
retrieved_inferred_params = dict(
zip(keys, retrieve_params(inferred_params, keys, warp_func=warp_func)))
logging.info(msg=f'params.model = {retrieved_inferred_params}')
self.assertGreater(init_nll, inferred_nll)
predictions = model.predict(qx, 0, True, True)
logging.info(msg=f'predictions = {predictions}')
self.assertLen(predictions, nsamples * 2)
for i in range(nsamples * 2):
self.assertEqual(predictions[i][0].shape, (nq, 1))
self.assertEqual(predictions[i][1].shape, (nq, nq))
if __name__ == '__main__':
absltest.main()
|
en
| 0.737064
|
# coding=utf-8 # Copyright 2022 HyperBO Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Test gp.py. Use the following to debug nan. from jax.config import config config.update('jax_debug_nans', True) Tests for slice sampling method in gp.py. Test that GP parameters can be inferred correctly. # Minimize sample_mean_cov_regularizer. # bf.init_kumar_warp_with_shape(init_key, init_params, vx.shape)
| 2.053267
| 2
|
population_estimator/geography.py
|
cruzanta/population-estimator
| 1
|
6627007
|
<reponame>cruzanta/population-estimator<gh_stars>1-10
#!/usr/bin/env python
class Geography:
"""Class for storing a geography's population data.
Attributes:
name: A string containing the name of a geography.
annual_pop_ests: A list of population estimates represented as integers.
first_pop_est: An integer that represents a geography's first population
estimate.
most_recent_pop_est: An integer that represents a geography's most
recent population estimate.
cagr: A float that represents a geography's compound annual growth rate.
"""
def __init__(self, name, annual_population_estimates):
self.name = name
self.annual_pop_ests = annual_population_estimates
self.first_pop_est = self.annual_pop_ests[0]
self.most_recent_pop_est = self.annual_pop_ests[-1]
self.cagr = self.get_compound_annual_growth_rate()
def get_compound_annual_growth_rate(self):
# Calculates and returns a geography's compound annual growth rate.
beginning_pop = float(self.first_pop_est)
ending_pop = self.most_recent_pop_est
num_years = float(len(self.annual_pop_ests))
return (ending_pop / beginning_pop)**(1 / num_years) - 1
def get_projected_population(self, most_recent_year, projected_year):
# Calculates and returns a geography's projected population estimate for
# a future year.
starting_pop = self.most_recent_pop_est
cagr_plus_one = self.cagr + 1
num_years = projected_year - most_recent_year
future_pop = starting_pop * (cagr_plus_one)**num_years
return int(round(future_pop, 0))
|
#!/usr/bin/env python
class Geography:
"""Class for storing a geography's population data.
Attributes:
name: A string containing the name of a geography.
annual_pop_ests: A list of population estimates represented as integers.
first_pop_est: An integer that represents a geography's first population
estimate.
most_recent_pop_est: An integer that represents a geography's most
recent population estimate.
cagr: A float that represents a geography's compound annual growth rate.
"""
def __init__(self, name, annual_population_estimates):
self.name = name
self.annual_pop_ests = annual_population_estimates
self.first_pop_est = self.annual_pop_ests[0]
self.most_recent_pop_est = self.annual_pop_ests[-1]
self.cagr = self.get_compound_annual_growth_rate()
def get_compound_annual_growth_rate(self):
# Calculates and returns a geography's compound annual growth rate.
beginning_pop = float(self.first_pop_est)
ending_pop = self.most_recent_pop_est
num_years = float(len(self.annual_pop_ests))
return (ending_pop / beginning_pop)**(1 / num_years) - 1
def get_projected_population(self, most_recent_year, projected_year):
# Calculates and returns a geography's projected population estimate for
# a future year.
starting_pop = self.most_recent_pop_est
cagr_plus_one = self.cagr + 1
num_years = projected_year - most_recent_year
future_pop = starting_pop * (cagr_plus_one)**num_years
return int(round(future_pop, 0))
|
en
| 0.833116
|
#!/usr/bin/env python Class for storing a geography's population data. Attributes: name: A string containing the name of a geography. annual_pop_ests: A list of population estimates represented as integers. first_pop_est: An integer that represents a geography's first population estimate. most_recent_pop_est: An integer that represents a geography's most recent population estimate. cagr: A float that represents a geography's compound annual growth rate. # Calculates and returns a geography's compound annual growth rate. # Calculates and returns a geography's projected population estimate for # a future year.
| 4.134895
| 4
|
LeetcodeAlgorithms/091. Decode Ways/decode-ways.py
|
Fenghuapiao/PyLeetcode
| 3
|
6627008
|
<gh_stars>1-10
class Solution(object):
def numDecodings(self, s):
"""
:type s: str
:rtype: int
"""
if len(s) == 0:
return 0
dp = [0] * (len(s) + 1)
dp[0] = 1
dp[1] = 0 if s[0] == "0" else 1
for i in xrange(1, len(s)):
pre = int(s[i-1])
cur = int(s[i])
num = pre * 10 + cur
if cur != 0:
dp[i+1] += dp[i]
if pre != 0 and 0 < num <= 26:
dp[i+1] += dp[i - 1]
return dp[-1]
|
class Solution(object):
def numDecodings(self, s):
"""
:type s: str
:rtype: int
"""
if len(s) == 0:
return 0
dp = [0] * (len(s) + 1)
dp[0] = 1
dp[1] = 0 if s[0] == "0" else 1
for i in xrange(1, len(s)):
pre = int(s[i-1])
cur = int(s[i])
num = pre * 10 + cur
if cur != 0:
dp[i+1] += dp[i]
if pre != 0 and 0 < num <= 26:
dp[i+1] += dp[i - 1]
return dp[-1]
|
en
| 0.328618
|
:type s: str
:rtype: int
| 2.723337
| 3
|
mosfit/modules/arrays/kernel.py
|
klukosiute/MOSFiT
| 0
|
6627009
|
"""Definitions for the `Kernel` class."""
from collections import OrderedDict
import numpy as np
from six import string_types
from mosfit.constants import ANG_CGS, C_CGS
from mosfit.modules.arrays.array import Array
# Important: Only define one ``Module`` class per file.
class Kernel(Array):
"""Calculate the kernel for use in computing the likelihood score."""
MIN_COV_TERM = 1.0e-30
def __init__(self, **kwargs):
"""Initialize module."""
super(Kernel, self).__init__(**kwargs)
self._times = np.array([])
self._codeltatime = -1
self._codeltalambda = -1
self._type = kwargs.get('type', False)
def process(self, **kwargs):
"""Process module."""
self.preprocess(**kwargs)
ret = OrderedDict()
# If we are trying to krig between observations, we need an array with
# dimensions equal to the number of intermediate observations.
if self._type == 'full':
kskey = 'kfmat'
elif self._type == 'oa':
kskey = 'koamat'
elif self._type == 'ao':
kskey = 'kaomat'
else:
kskey = 'kmat'
# Get band variances
self._variance = kwargs.get(self.key('variance'), 0.0)
# Get array of real observations.
self._observations = np.array([
ct if (t == 'countrate' or t == 'magcount') else y if (
t == 'magnitude') else fd if t == 'fluxdensity' else None
for y, ct, fd, t in zip(
self._mags, self._cts, self._fds, self._o_otypes)
])
# Get array of model observations.
self._model_observations = kwargs.get('model_observations', [])
# Handle band-specific variances if that option is enabled.
self._band_v_vars = OrderedDict()
for key in kwargs:
if key.startswith('variance-band-'):
self._band_v_vars[key.split('-')[-1]] = kwargs[key]
if self._variance_bands:
self._o_variance_bands = [
self._variance_bands[i] for i in self._all_band_indices]
self._band_vs = np.array([
self._band_v_vars.get(i, self._variance) if
isinstance(i, string_types)
else (i[0] * self._band_v_vars.get(i[1][0], self._variance) +
(1.0 - i[0]) * self._band_v_vars.get(
i[1][0], self._variance))
for i in self._o_variance_bands])
else:
self._band_vs = np.full(
len(self._all_band_indices), self._variance)
# Compute relative errors for count-based observations.
self._band_vs[self._count_inds] = (
10.0 ** (self._band_vs[self._count_inds] / 2.5) - 1.0
) * self._model_observations[self._count_inds]
self._o_band_vs = self._band_vs[self._observed]
if self._type == 'full':
self._band_vs_1 = self._band_vs
self._band_vs_2 = self._band_vs
elif self._type == 'oa':
self._band_vs_1 = self._o_band_vs
self._band_vs_2 = self._band_vs
elif self._type == 'ao':
self._band_vs_1 = self._band_vs
self._band_vs_2 = self._o_band_vs
else:
self._band_vs_1 = self._o_band_vs
self._band_vs_2 = self._o_band_vs
if self._codeltatime >= 0 or self._codeltalambda >= 0:
kmat = np.outer(self._band_vs_1, self._band_vs_2)
if self._codeltatime >= 0:
kmat *= np.exp(self._dt2mat / self._codeltatime ** 2)
if self._codeltalambda >= 0:
kmat *= np.exp(self._dl2mat / self._codeltalambda ** 2)
ret[kskey] = kmat
else:
ret['abandvs'] = self._band_vs
ret['obandvs'] = self._o_band_vs
return ret
def receive_requests(self, **requests):
"""Receive requests from other ``Module`` objects."""
self._average_wavelengths = requests.get('average_wavelengths', [])
self._variance_bands = requests.get('variance_bands', [])
def preprocess(self, **kwargs):
"""Construct kernel distance arrays."""
new_times = np.array(kwargs.get('all_times', []), dtype=float)
self._codeltatime = kwargs.get(self.key('codeltatime'), -1)
self._codeltalambda = kwargs.get(self.key('codeltalambda'), -1)
if np.array_equiv(new_times, self._times) and self._preprocessed:
return
self._times = new_times
self._all_band_indices = kwargs.get('all_band_indices', [])
self._are_bands = np.array(self._all_band_indices) >= 0
self._freqs = kwargs.get('all_frequencies', [])
self._mags = np.array(kwargs.get('magnitudes', []))
self._fds = np.array(kwargs.get('fluxdensities', []))
self._cts = np.array(kwargs.get('countrates', []))
self._u_freqs = kwargs.get('all_u_frequencies', [])
self._waves = np.array([
self._average_wavelengths[bi] if bi >= 0 else
C_CGS / self._freqs[i] / ANG_CGS for i, bi in
enumerate(self._all_band_indices)])
self._observed = np.array(kwargs.get('observed', []), dtype=bool)
self._observation_types = kwargs.get('observation_types')
self._n_obs = len(self._observed)
self._count_inds = self._observation_types != 'magnitude'
self._o_times = self._times[self._observed]
self._o_waves = self._waves[self._observed]
self._o_otypes = self._observation_types[self._observed]
if self._type == 'full':
self._times_1 = self._times
self._times_2 = self._times
self._waves_1 = self._waves
self._waves_2 = self._waves
elif self._type == 'oa':
self._times_1 = self._o_times
self._times_2 = self._times
self._waves_1 = self._o_waves
self._waves_2 = self._waves
elif self._type == 'ao':
self._times_1 = self._times
self._times_2 = self._o_times
self._waves_1 = self._waves
self._waves_2 = self._o_waves
else:
self._times_1 = self._o_times
self._times_2 = self._o_times
self._waves_1 = self._o_waves
self._waves_2 = self._o_waves
# Time deltas (radial distance) for covariance matrix.
if self._codeltatime >= 0:
self._dt2mat = self._times_1[:, None] - self._times_2[None, :]
self._dt2mat **= 2
self._dt2mat *= -0.5
# Wavelength deltas (radial distance) for covariance matrix.
if self._codeltalambda >= 0:
self._dl2mat = self._waves_1[:, None] - self._waves_2[None, :]
self._dl2mat **= 2
self._dl2mat *= -0.5
self._preprocessed = True
|
"""Definitions for the `Kernel` class."""
from collections import OrderedDict
import numpy as np
from six import string_types
from mosfit.constants import ANG_CGS, C_CGS
from mosfit.modules.arrays.array import Array
# Important: Only define one ``Module`` class per file.
class Kernel(Array):
"""Calculate the kernel for use in computing the likelihood score."""
MIN_COV_TERM = 1.0e-30
def __init__(self, **kwargs):
"""Initialize module."""
super(Kernel, self).__init__(**kwargs)
self._times = np.array([])
self._codeltatime = -1
self._codeltalambda = -1
self._type = kwargs.get('type', False)
def process(self, **kwargs):
"""Process module."""
self.preprocess(**kwargs)
ret = OrderedDict()
# If we are trying to krig between observations, we need an array with
# dimensions equal to the number of intermediate observations.
if self._type == 'full':
kskey = 'kfmat'
elif self._type == 'oa':
kskey = 'koamat'
elif self._type == 'ao':
kskey = 'kaomat'
else:
kskey = 'kmat'
# Get band variances
self._variance = kwargs.get(self.key('variance'), 0.0)
# Get array of real observations.
self._observations = np.array([
ct if (t == 'countrate' or t == 'magcount') else y if (
t == 'magnitude') else fd if t == 'fluxdensity' else None
for y, ct, fd, t in zip(
self._mags, self._cts, self._fds, self._o_otypes)
])
# Get array of model observations.
self._model_observations = kwargs.get('model_observations', [])
# Handle band-specific variances if that option is enabled.
self._band_v_vars = OrderedDict()
for key in kwargs:
if key.startswith('variance-band-'):
self._band_v_vars[key.split('-')[-1]] = kwargs[key]
if self._variance_bands:
self._o_variance_bands = [
self._variance_bands[i] for i in self._all_band_indices]
self._band_vs = np.array([
self._band_v_vars.get(i, self._variance) if
isinstance(i, string_types)
else (i[0] * self._band_v_vars.get(i[1][0], self._variance) +
(1.0 - i[0]) * self._band_v_vars.get(
i[1][0], self._variance))
for i in self._o_variance_bands])
else:
self._band_vs = np.full(
len(self._all_band_indices), self._variance)
# Compute relative errors for count-based observations.
self._band_vs[self._count_inds] = (
10.0 ** (self._band_vs[self._count_inds] / 2.5) - 1.0
) * self._model_observations[self._count_inds]
self._o_band_vs = self._band_vs[self._observed]
if self._type == 'full':
self._band_vs_1 = self._band_vs
self._band_vs_2 = self._band_vs
elif self._type == 'oa':
self._band_vs_1 = self._o_band_vs
self._band_vs_2 = self._band_vs
elif self._type == 'ao':
self._band_vs_1 = self._band_vs
self._band_vs_2 = self._o_band_vs
else:
self._band_vs_1 = self._o_band_vs
self._band_vs_2 = self._o_band_vs
if self._codeltatime >= 0 or self._codeltalambda >= 0:
kmat = np.outer(self._band_vs_1, self._band_vs_2)
if self._codeltatime >= 0:
kmat *= np.exp(self._dt2mat / self._codeltatime ** 2)
if self._codeltalambda >= 0:
kmat *= np.exp(self._dl2mat / self._codeltalambda ** 2)
ret[kskey] = kmat
else:
ret['abandvs'] = self._band_vs
ret['obandvs'] = self._o_band_vs
return ret
def receive_requests(self, **requests):
"""Receive requests from other ``Module`` objects."""
self._average_wavelengths = requests.get('average_wavelengths', [])
self._variance_bands = requests.get('variance_bands', [])
def preprocess(self, **kwargs):
"""Construct kernel distance arrays."""
new_times = np.array(kwargs.get('all_times', []), dtype=float)
self._codeltatime = kwargs.get(self.key('codeltatime'), -1)
self._codeltalambda = kwargs.get(self.key('codeltalambda'), -1)
if np.array_equiv(new_times, self._times) and self._preprocessed:
return
self._times = new_times
self._all_band_indices = kwargs.get('all_band_indices', [])
self._are_bands = np.array(self._all_band_indices) >= 0
self._freqs = kwargs.get('all_frequencies', [])
self._mags = np.array(kwargs.get('magnitudes', []))
self._fds = np.array(kwargs.get('fluxdensities', []))
self._cts = np.array(kwargs.get('countrates', []))
self._u_freqs = kwargs.get('all_u_frequencies', [])
self._waves = np.array([
self._average_wavelengths[bi] if bi >= 0 else
C_CGS / self._freqs[i] / ANG_CGS for i, bi in
enumerate(self._all_band_indices)])
self._observed = np.array(kwargs.get('observed', []), dtype=bool)
self._observation_types = kwargs.get('observation_types')
self._n_obs = len(self._observed)
self._count_inds = self._observation_types != 'magnitude'
self._o_times = self._times[self._observed]
self._o_waves = self._waves[self._observed]
self._o_otypes = self._observation_types[self._observed]
if self._type == 'full':
self._times_1 = self._times
self._times_2 = self._times
self._waves_1 = self._waves
self._waves_2 = self._waves
elif self._type == 'oa':
self._times_1 = self._o_times
self._times_2 = self._times
self._waves_1 = self._o_waves
self._waves_2 = self._waves
elif self._type == 'ao':
self._times_1 = self._times
self._times_2 = self._o_times
self._waves_1 = self._waves
self._waves_2 = self._o_waves
else:
self._times_1 = self._o_times
self._times_2 = self._o_times
self._waves_1 = self._o_waves
self._waves_2 = self._o_waves
# Time deltas (radial distance) for covariance matrix.
if self._codeltatime >= 0:
self._dt2mat = self._times_1[:, None] - self._times_2[None, :]
self._dt2mat **= 2
self._dt2mat *= -0.5
# Wavelength deltas (radial distance) for covariance matrix.
if self._codeltalambda >= 0:
self._dl2mat = self._waves_1[:, None] - self._waves_2[None, :]
self._dl2mat **= 2
self._dl2mat *= -0.5
self._preprocessed = True
|
en
| 0.771652
|
Definitions for the `Kernel` class. # Important: Only define one ``Module`` class per file. Calculate the kernel for use in computing the likelihood score. Initialize module. Process module. # If we are trying to krig between observations, we need an array with # dimensions equal to the number of intermediate observations. # Get band variances # Get array of real observations. # Get array of model observations. # Handle band-specific variances if that option is enabled. # Compute relative errors for count-based observations. Receive requests from other ``Module`` objects. Construct kernel distance arrays. # Time deltas (radial distance) for covariance matrix. # Wavelength deltas (radial distance) for covariance matrix.
| 2.804156
| 3
|
deepchem/feat/material_featurizers/element_property_fingerprint.py
|
SanjeevaRDodlapati/deepchem
| 3
|
6627010
|
import numpy as np
from deepchem.utils.typing import PymatgenComposition
from deepchem.feat import MaterialCompositionFeaturizer
from typing import Any
class ElementPropertyFingerprint(MaterialCompositionFeaturizer):
"""
Fingerprint of elemental properties from composition.
Based on the data source chosen, returns properties and statistics
(min, max, range, mean, standard deviation, mode) for a compound
based on elemental stoichiometry. E.g., the average electronegativity
of atoms in a crystal structure. The chemical fingerprint is a
vector of these statistics. For a full list of properties and statistics,
see ``matminer.featurizers.composition.ElementProperty(data_source).feature_labels()``.
This featurizer requires the optional dependencies pymatgen and
matminer. It may be useful when only crystal compositions are available
(and not 3D coordinates).
See references [1]_ [2]_ [3]_ [4]_ for more details.
References
----------
.. [1] MagPie data: Ward, L. et al. npj Comput Mater 2, 16028 (2016).
https://doi.org/10.1038/npjcompumats.2016.28
.. [2] Deml data: Deml, A. et al. Physical Review B 93, 085142 (2016).
10.1103/PhysRevB.93.085142
.. [3] Matminer: Ward, L. et al. Comput. Mater. Sci. 152, 60-69 (2018).
.. [4] Pymatgen: Ong, S.P. et al. Comput. Mater. Sci. 68, 314-319 (2013).
Examples
--------
>>> import pymatgen as mg
>>> comp = mg.Composition("Fe2O3")
>>> featurizer = ElementPropertyFingerprint()
>>> features = featurizer.featurize([comp])
Notes
-----
This class requires matminer and Pymatgen to be installed.
`NaN` feature values are automatically converted to 0 by this featurizer.
"""
def __init__(self, data_source: str = 'matminer'):
"""
Parameters
----------
data_source: str of "matminer", "magpie" or "deml" (default "matminer")
Source for element property data.
"""
self.data_source = data_source
self.ep_featurizer: Any = None
def _featurize(self, composition: PymatgenComposition) -> np.ndarray:
"""
Calculate chemical fingerprint from crystal composition.
Parameters
----------
composition: pymatgen.Composition object
Composition object.
Returns
-------
feats: np.ndarray
Vector of properties and statistics derived from chemical
stoichiometry. Some values may be NaN.
"""
if self.ep_featurizer is None:
try:
from matminer.featurizers.composition import ElementProperty
self.ep_featurizer = ElementProperty.from_preset(self.data_source)
except ModuleNotFoundError:
raise ImportError("This class requires matminer to be installed.")
try:
feats = self.ep_featurizer.featurize(composition)
except:
feats = []
return np.nan_to_num(np.array(feats))
|
import numpy as np
from deepchem.utils.typing import PymatgenComposition
from deepchem.feat import MaterialCompositionFeaturizer
from typing import Any
class ElementPropertyFingerprint(MaterialCompositionFeaturizer):
"""
Fingerprint of elemental properties from composition.
Based on the data source chosen, returns properties and statistics
(min, max, range, mean, standard deviation, mode) for a compound
based on elemental stoichiometry. E.g., the average electronegativity
of atoms in a crystal structure. The chemical fingerprint is a
vector of these statistics. For a full list of properties and statistics,
see ``matminer.featurizers.composition.ElementProperty(data_source).feature_labels()``.
This featurizer requires the optional dependencies pymatgen and
matminer. It may be useful when only crystal compositions are available
(and not 3D coordinates).
See references [1]_ [2]_ [3]_ [4]_ for more details.
References
----------
.. [1] MagPie data: Ward, L. et al. npj Comput Mater 2, 16028 (2016).
https://doi.org/10.1038/npjcompumats.2016.28
.. [2] Deml data: Deml, A. et al. Physical Review B 93, 085142 (2016).
10.1103/PhysRevB.93.085142
.. [3] Matminer: Ward, L. et al. Comput. Mater. Sci. 152, 60-69 (2018).
.. [4] Pymatgen: Ong, S.P. et al. Comput. Mater. Sci. 68, 314-319 (2013).
Examples
--------
>>> import pymatgen as mg
>>> comp = mg.Composition("Fe2O3")
>>> featurizer = ElementPropertyFingerprint()
>>> features = featurizer.featurize([comp])
Notes
-----
This class requires matminer and Pymatgen to be installed.
`NaN` feature values are automatically converted to 0 by this featurizer.
"""
def __init__(self, data_source: str = 'matminer'):
"""
Parameters
----------
data_source: str of "matminer", "magpie" or "deml" (default "matminer")
Source for element property data.
"""
self.data_source = data_source
self.ep_featurizer: Any = None
def _featurize(self, composition: PymatgenComposition) -> np.ndarray:
"""
Calculate chemical fingerprint from crystal composition.
Parameters
----------
composition: pymatgen.Composition object
Composition object.
Returns
-------
feats: np.ndarray
Vector of properties and statistics derived from chemical
stoichiometry. Some values may be NaN.
"""
if self.ep_featurizer is None:
try:
from matminer.featurizers.composition import ElementProperty
self.ep_featurizer = ElementProperty.from_preset(self.data_source)
except ModuleNotFoundError:
raise ImportError("This class requires matminer to be installed.")
try:
feats = self.ep_featurizer.featurize(composition)
except:
feats = []
return np.nan_to_num(np.array(feats))
|
en
| 0.569786
|
Fingerprint of elemental properties from composition. Based on the data source chosen, returns properties and statistics (min, max, range, mean, standard deviation, mode) for a compound based on elemental stoichiometry. E.g., the average electronegativity of atoms in a crystal structure. The chemical fingerprint is a vector of these statistics. For a full list of properties and statistics, see ``matminer.featurizers.composition.ElementProperty(data_source).feature_labels()``. This featurizer requires the optional dependencies pymatgen and matminer. It may be useful when only crystal compositions are available (and not 3D coordinates). See references [1]_ [2]_ [3]_ [4]_ for more details. References ---------- .. [1] MagPie data: Ward, L. et al. npj Comput Mater 2, 16028 (2016). https://doi.org/10.1038/npjcompumats.2016.28 .. [2] Deml data: Deml, A. et al. Physical Review B 93, 085142 (2016). 10.1103/PhysRevB.93.085142 .. [3] Matminer: Ward, L. et al. Comput. Mater. Sci. 152, 60-69 (2018). .. [4] Pymatgen: Ong, S.P. et al. Comput. Mater. Sci. 68, 314-319 (2013). Examples -------- >>> import pymatgen as mg >>> comp = mg.Composition("Fe2O3") >>> featurizer = ElementPropertyFingerprint() >>> features = featurizer.featurize([comp]) Notes ----- This class requires matminer and Pymatgen to be installed. `NaN` feature values are automatically converted to 0 by this featurizer. Parameters ---------- data_source: str of "matminer", "magpie" or "deml" (default "matminer") Source for element property data. Calculate chemical fingerprint from crystal composition. Parameters ---------- composition: pymatgen.Composition object Composition object. Returns ------- feats: np.ndarray Vector of properties and statistics derived from chemical stoichiometry. Some values may be NaN.
| 2.393363
| 2
|
sumologic_collectd_metrics/metrics_config.py
|
joy-highfidelity/sumologic-collectd-plugin
| 0
|
6627011
|
<filename>sumologic_collectd_metrics/metrics_config.py
# -*- coding: utf-8 -*-
from . metrics_util import validate_non_empty, validate_string_type, validate_positive, \
validate_non_negative, validate_field
class ConfigOptions(object):
"""
Config options
"""
types_db = 'TypesDB'
url = 'URL'
# Http header options
dimension_tags = 'Dimensions'
meta_tags = 'Metadata'
source_name = 'SourceName'
host_name = 'SourceHost'
source_category = 'SourceCategory'
# Metrics Batching options
max_batch_size = 'MaxBatchSize'
max_batch_interval = 'MaxBatchInterval'
# Http post request frequency option
http_post_interval = 'HttpPostInterval'
# Http retry options
retry_initial_delay = 'RetryInitialDelay'
retry_max_attempts = 'RetryMaxAttempts'
retry_max_delay = 'RetryMaxDelay'
retry_backoff = 'RetryBackOff'
retry_jitter_min = 'RetryJitterMin'
retry_jitter_max = 'RetryJitterMax'
# Memory option
max_requests_to_buffer = 'MaxRequestsToBuffer'
# Content encoding option
content_encoding = 'ContentEncoding'
# Static option, not configurable yet. Default is application/vnd.sumologic.carbon2
content_type = 'ContentType'
shutdown_max_wait = "ShutdownMaxWait" # seconds
class MetricsConfig:
"""
Configuration for sumologic collectd plugin
"""
_content_encoding_set = frozenset(['deflate', 'gzip', 'none'])
def __init__(self, collectd):
"""
Init MetricsConfig with default config
"""
self.collectd = collectd
self.conf = self.default_config()
self.types = {}
collectd.info('Initialized MetricsConfig with default config %s' % self.conf)
@staticmethod
def default_config():
return {
ConfigOptions.http_post_interval: 0.1,
ConfigOptions.max_batch_size: 5000,
ConfigOptions.max_batch_interval: 1,
ConfigOptions.retry_initial_delay: 0,
ConfigOptions.retry_max_attempts: 10,
ConfigOptions.retry_max_delay: 100,
ConfigOptions.retry_backoff: 2,
ConfigOptions.retry_jitter_min: 0,
ConfigOptions.retry_jitter_max: 10,
ConfigOptions.max_requests_to_buffer: 1000,
ConfigOptions.content_encoding: 'deflate',
ConfigOptions.content_type: 'application/vnd.sumologic.carbon2',
ConfigOptions.shutdown_max_wait: 5
}
def parse_config(self, config):
"""
Parse the python plugin configurations in collectd.conf
"""
try:
for child in config.children:
if child.key == ConfigOptions.types_db:
for v in child.values:
self._parse_types(v)
elif child.key == ConfigOptions.url:
url = child.values[0]
self.conf[child.key] = url
validate_non_empty(url, child.key)
elif child.key in [ConfigOptions.dimension_tags, ConfigOptions.meta_tags]:
self._parse_tags(child)
elif child.key in [ConfigOptions.source_name, ConfigOptions.host_name,
ConfigOptions.source_category]:
s = child.values[0]
validate_non_empty(s, child.key)
validate_string_type(s, child.key, 'Value', 'Key')
self.conf[child.key] = s
elif child.key == ConfigOptions.http_post_interval:
f = float(child.values[0])
validate_positive(f, child.key)
self.conf[child.key] = f
elif child.key in [ConfigOptions.max_batch_size, ConfigOptions.max_batch_interval,
ConfigOptions.retry_max_attempts, ConfigOptions.retry_max_delay,
ConfigOptions.retry_backoff,
ConfigOptions.max_requests_to_buffer]:
i = int(child.values[0])
validate_positive(i, child.key)
self.conf[child.key] = i
elif child.key in [ConfigOptions.retry_initial_delay,
ConfigOptions.retry_jitter_min, ConfigOptions.retry_jitter_max]:
i = int(child.values[0])
validate_non_negative(i, child.key)
self.conf[child.key] = i
elif child.key == ConfigOptions.content_encoding:
s = child.values[0]
validate_non_empty(s, child.key)
validate_string_type(s, child.key, 'Value', 'Key')
content_encoding = s.lower()
if content_encoding not in self._content_encoding_set:
raise Exception('Unknown ContentEncoding %s specified. ContentEncoding '
'must be deflate, gzip, or none' % s)
self.conf[child.key] = content_encoding
else:
self.collectd.warning('Unknown configuration %s, ignored.' % child.key)
except Exception as e:
self.collectd.error('Failed to parse configurations due to %s' % str(e))
raise e
if ConfigOptions.url not in self.conf:
raise Exception('Specify %s in collectd.conf.' % ConfigOptions.url)
if not self.types:
raise Exception('Specify %s in collectd.conf.' % ConfigOptions.types_db)
http_post_interval = self.conf[ConfigOptions.http_post_interval]
max_batch_interval = self.conf[ConfigOptions.max_batch_interval]
if http_post_interval > max_batch_interval:
raise Exception('Specify HttpPostInterval %f as float between 0 and '
'MaxBatchInterval %d' %(http_post_interval, max_batch_interval))
retry_jitter_min = self.conf[ConfigOptions.retry_jitter_min]
retry_jitter_max = self.conf[ConfigOptions.retry_jitter_max]
if retry_jitter_min > retry_jitter_max:
raise Exception('Specify RetryJitterMin %d to be less or equal to RetryJitterMax %d' %
(retry_jitter_min, retry_jitter_max))
self.collectd.info('Updated MetricsConfig %s with config file %s ' % (self.conf, config))
# parse types.db file
def _parse_types(self, db):
try:
f = open(db, 'r')
for line in f:
fields = line.split()
if len(fields) < 2:
continue
type_name = fields[0]
if type_name[0] == '#':
continue
v = []
for ds in fields[1:]:
ds = ds.rstrip(',')
ds_fields = ds.split(':')
if len(ds_fields) != 4:
self.collectd.warning('Cannot parse data source %s on type %s'
% (ds, type_name))
continue
v.append(ds_fields)
self.types[type_name] = v
f.close()
self.collectd.info('Parsed types %s with types_db file %s ' % (self.types, db))
except Exception as e:
self.collectd.error('Parse types %s failed with %s' %(db, str(e)))
raise e
# parse dimension_tags/meta_tags specified in collectd.conf
def _parse_tags(self, child):
if len(child.values) % 2 != 0:
raise Exception('Missing tags key/value in options %s.' % str(child.values))
for v in child.values:
validate_field(v, child.key, 'Value', 'Key')
self.conf[child.key] = zip(*(iter(child.values),) * 2)
self.collectd.info('Parsed %s tags %s' % (child.key, self.conf[child.key]))
|
<filename>sumologic_collectd_metrics/metrics_config.py
# -*- coding: utf-8 -*-
from . metrics_util import validate_non_empty, validate_string_type, validate_positive, \
validate_non_negative, validate_field
class ConfigOptions(object):
"""
Config options
"""
types_db = 'TypesDB'
url = 'URL'
# Http header options
dimension_tags = 'Dimensions'
meta_tags = 'Metadata'
source_name = 'SourceName'
host_name = 'SourceHost'
source_category = 'SourceCategory'
# Metrics Batching options
max_batch_size = 'MaxBatchSize'
max_batch_interval = 'MaxBatchInterval'
# Http post request frequency option
http_post_interval = 'HttpPostInterval'
# Http retry options
retry_initial_delay = 'RetryInitialDelay'
retry_max_attempts = 'RetryMaxAttempts'
retry_max_delay = 'RetryMaxDelay'
retry_backoff = 'RetryBackOff'
retry_jitter_min = 'RetryJitterMin'
retry_jitter_max = 'RetryJitterMax'
# Memory option
max_requests_to_buffer = 'MaxRequestsToBuffer'
# Content encoding option
content_encoding = 'ContentEncoding'
# Static option, not configurable yet. Default is application/vnd.sumologic.carbon2
content_type = 'ContentType'
shutdown_max_wait = "ShutdownMaxWait" # seconds
class MetricsConfig:
"""
Configuration for sumologic collectd plugin
"""
_content_encoding_set = frozenset(['deflate', 'gzip', 'none'])
def __init__(self, collectd):
"""
Init MetricsConfig with default config
"""
self.collectd = collectd
self.conf = self.default_config()
self.types = {}
collectd.info('Initialized MetricsConfig with default config %s' % self.conf)
@staticmethod
def default_config():
return {
ConfigOptions.http_post_interval: 0.1,
ConfigOptions.max_batch_size: 5000,
ConfigOptions.max_batch_interval: 1,
ConfigOptions.retry_initial_delay: 0,
ConfigOptions.retry_max_attempts: 10,
ConfigOptions.retry_max_delay: 100,
ConfigOptions.retry_backoff: 2,
ConfigOptions.retry_jitter_min: 0,
ConfigOptions.retry_jitter_max: 10,
ConfigOptions.max_requests_to_buffer: 1000,
ConfigOptions.content_encoding: 'deflate',
ConfigOptions.content_type: 'application/vnd.sumologic.carbon2',
ConfigOptions.shutdown_max_wait: 5
}
def parse_config(self, config):
"""
Parse the python plugin configurations in collectd.conf
"""
try:
for child in config.children:
if child.key == ConfigOptions.types_db:
for v in child.values:
self._parse_types(v)
elif child.key == ConfigOptions.url:
url = child.values[0]
self.conf[child.key] = url
validate_non_empty(url, child.key)
elif child.key in [ConfigOptions.dimension_tags, ConfigOptions.meta_tags]:
self._parse_tags(child)
elif child.key in [ConfigOptions.source_name, ConfigOptions.host_name,
ConfigOptions.source_category]:
s = child.values[0]
validate_non_empty(s, child.key)
validate_string_type(s, child.key, 'Value', 'Key')
self.conf[child.key] = s
elif child.key == ConfigOptions.http_post_interval:
f = float(child.values[0])
validate_positive(f, child.key)
self.conf[child.key] = f
elif child.key in [ConfigOptions.max_batch_size, ConfigOptions.max_batch_interval,
ConfigOptions.retry_max_attempts, ConfigOptions.retry_max_delay,
ConfigOptions.retry_backoff,
ConfigOptions.max_requests_to_buffer]:
i = int(child.values[0])
validate_positive(i, child.key)
self.conf[child.key] = i
elif child.key in [ConfigOptions.retry_initial_delay,
ConfigOptions.retry_jitter_min, ConfigOptions.retry_jitter_max]:
i = int(child.values[0])
validate_non_negative(i, child.key)
self.conf[child.key] = i
elif child.key == ConfigOptions.content_encoding:
s = child.values[0]
validate_non_empty(s, child.key)
validate_string_type(s, child.key, 'Value', 'Key')
content_encoding = s.lower()
if content_encoding not in self._content_encoding_set:
raise Exception('Unknown ContentEncoding %s specified. ContentEncoding '
'must be deflate, gzip, or none' % s)
self.conf[child.key] = content_encoding
else:
self.collectd.warning('Unknown configuration %s, ignored.' % child.key)
except Exception as e:
self.collectd.error('Failed to parse configurations due to %s' % str(e))
raise e
if ConfigOptions.url not in self.conf:
raise Exception('Specify %s in collectd.conf.' % ConfigOptions.url)
if not self.types:
raise Exception('Specify %s in collectd.conf.' % ConfigOptions.types_db)
http_post_interval = self.conf[ConfigOptions.http_post_interval]
max_batch_interval = self.conf[ConfigOptions.max_batch_interval]
if http_post_interval > max_batch_interval:
raise Exception('Specify HttpPostInterval %f as float between 0 and '
'MaxBatchInterval %d' %(http_post_interval, max_batch_interval))
retry_jitter_min = self.conf[ConfigOptions.retry_jitter_min]
retry_jitter_max = self.conf[ConfigOptions.retry_jitter_max]
if retry_jitter_min > retry_jitter_max:
raise Exception('Specify RetryJitterMin %d to be less or equal to RetryJitterMax %d' %
(retry_jitter_min, retry_jitter_max))
self.collectd.info('Updated MetricsConfig %s with config file %s ' % (self.conf, config))
# parse types.db file
def _parse_types(self, db):
try:
f = open(db, 'r')
for line in f:
fields = line.split()
if len(fields) < 2:
continue
type_name = fields[0]
if type_name[0] == '#':
continue
v = []
for ds in fields[1:]:
ds = ds.rstrip(',')
ds_fields = ds.split(':')
if len(ds_fields) != 4:
self.collectd.warning('Cannot parse data source %s on type %s'
% (ds, type_name))
continue
v.append(ds_fields)
self.types[type_name] = v
f.close()
self.collectd.info('Parsed types %s with types_db file %s ' % (self.types, db))
except Exception as e:
self.collectd.error('Parse types %s failed with %s' %(db, str(e)))
raise e
# parse dimension_tags/meta_tags specified in collectd.conf
def _parse_tags(self, child):
if len(child.values) % 2 != 0:
raise Exception('Missing tags key/value in options %s.' % str(child.values))
for v in child.values:
validate_field(v, child.key, 'Value', 'Key')
self.conf[child.key] = zip(*(iter(child.values),) * 2)
self.collectd.info('Parsed %s tags %s' % (child.key, self.conf[child.key]))
|
en
| 0.30402
|
# -*- coding: utf-8 -*- Config options # Http header options # Metrics Batching options # Http post request frequency option # Http retry options # Memory option # Content encoding option # Static option, not configurable yet. Default is application/vnd.sumologic.carbon2 # seconds Configuration for sumologic collectd plugin Init MetricsConfig with default config Parse the python plugin configurations in collectd.conf # parse types.db file # parse dimension_tags/meta_tags specified in collectd.conf
| 1.805408
| 2
|
src/python/pants/backend/jvm/tasks/coursier/coursier_subsystem.py
|
viktortnk/pants
| 1
|
6627012
|
<reponame>viktortnk/pants
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
from typing import List
import requests
from pants.base.build_environment import get_pants_cachedir
from pants.core.util_rules.external_tool import ExternalTool, ExternalToolError
from pants.engine.platform import Platform
class CoursierSubsystem(ExternalTool):
"""Common configuration items for coursier tasks.
:API: public
"""
options_scope = "coursier"
name = "coursier"
default_version = "1.1.0.cf365ea27a710d5f09db1f0a6feee129aa1fc417"
default_known_versions = [
f"1.1.0.cf365ea27a710d5f09db1f0a6feee129aa1fc417|{plat}|24945c529eaa32a16a70256ac357108edc1b51a4dd45b656a1808c0cbf00617e|27573328"
for plat in ["darwin", "linux"]
]
_default_url = "https://github.com/coursier/coursier/releases/download/pants_release_1.5.x/coursier-cli-{version}.jar"
class Error(Exception):
"""Indicates an error bootstrapping coursier."""
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--cache-dir",
type=str,
fingerprint=True,
default=os.path.join(get_pants_cachedir(), "coursier"),
help="Version paired with --bootstrap-jar-url, in order to invalidate and fetch the new version.",
)
register(
"--repos",
type=list,
fingerprint=True,
help="Maven style repos",
default=[
"https://maven-central.storage-download.googleapis.com/maven2",
"https://repo1.maven.org/maven2",
],
)
register(
"--fetch-options",
type=list,
fingerprint=True,
default=[
# Quiet mode, so coursier does not show resolve progress,
# but still prints results if --report is specified.
"-q",
# Do not use default public maven repo.
"--no-default",
# Concurrent workers
"-n",
"8",
],
help="Additional options to pass to coursier fetch. See `coursier fetch --help`",
)
register(
"--artifact-types",
type=list,
fingerprint=True,
default=["jar", "bundle", "test-jar", "maven-plugin", "src", "doc"],
help="Specify the type of artifacts to fetch. See `packaging` at https://maven.apache.org/pom.html#Maven_Coordinates, "
"except `src` and `doc` being coursier specific terms for sources and javadoc.",
)
# TODO(yic): Use a published version of Coursier. https://github.com/pantsbuild/pants/issues/6852
register(
"--bootstrap-jar-urls",
fingerprint=True,
type=list,
default=[cls._default_url],
help="Locations to download a bootstrap version of Coursier from.",
)
def generate_url(self, plat: Platform) -> str:
# We need to pick one URL to return, so we check for the first one that returns a
# 200 for a HEAD request.
# TODO: Allow ExternalTool to handle multiple URLs itself? May be overkill.
# It's not even clear that this subsystem really needs to do so.
version = self.get_options().version
urls_to_try: List[str] = [
url.format(version=version) for url in self.get_options().bootstrap_jar_urls
]
for url in urls_to_try:
if requests.head(url).ok:
return url
raise ExternalToolError() # Calling code will generate a sensible error message.
|
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
from typing import List
import requests
from pants.base.build_environment import get_pants_cachedir
from pants.core.util_rules.external_tool import ExternalTool, ExternalToolError
from pants.engine.platform import Platform
class CoursierSubsystem(ExternalTool):
"""Common configuration items for coursier tasks.
:API: public
"""
options_scope = "coursier"
name = "coursier"
default_version = "1.1.0.cf365ea27a710d5f09db1f0a6feee129aa1fc417"
default_known_versions = [
f"1.1.0.cf365ea27a710d5f09db1f0a6feee129aa1fc417|{plat}|24945c529eaa32a16a70256ac357108edc1b51a4dd45b656a1808c0cbf00617e|27573328"
for plat in ["darwin", "linux"]
]
_default_url = "https://github.com/coursier/coursier/releases/download/pants_release_1.5.x/coursier-cli-{version}.jar"
class Error(Exception):
"""Indicates an error bootstrapping coursier."""
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--cache-dir",
type=str,
fingerprint=True,
default=os.path.join(get_pants_cachedir(), "coursier"),
help="Version paired with --bootstrap-jar-url, in order to invalidate and fetch the new version.",
)
register(
"--repos",
type=list,
fingerprint=True,
help="Maven style repos",
default=[
"https://maven-central.storage-download.googleapis.com/maven2",
"https://repo1.maven.org/maven2",
],
)
register(
"--fetch-options",
type=list,
fingerprint=True,
default=[
# Quiet mode, so coursier does not show resolve progress,
# but still prints results if --report is specified.
"-q",
# Do not use default public maven repo.
"--no-default",
# Concurrent workers
"-n",
"8",
],
help="Additional options to pass to coursier fetch. See `coursier fetch --help`",
)
register(
"--artifact-types",
type=list,
fingerprint=True,
default=["jar", "bundle", "test-jar", "maven-plugin", "src", "doc"],
help="Specify the type of artifacts to fetch. See `packaging` at https://maven.apache.org/pom.html#Maven_Coordinates, "
"except `src` and `doc` being coursier specific terms for sources and javadoc.",
)
# TODO(yic): Use a published version of Coursier. https://github.com/pantsbuild/pants/issues/6852
register(
"--bootstrap-jar-urls",
fingerprint=True,
type=list,
default=[cls._default_url],
help="Locations to download a bootstrap version of Coursier from.",
)
def generate_url(self, plat: Platform) -> str:
# We need to pick one URL to return, so we check for the first one that returns a
# 200 for a HEAD request.
# TODO: Allow ExternalTool to handle multiple URLs itself? May be overkill.
# It's not even clear that this subsystem really needs to do so.
version = self.get_options().version
urls_to_try: List[str] = [
url.format(version=version) for url in self.get_options().bootstrap_jar_urls
]
for url in urls_to_try:
if requests.head(url).ok:
return url
raise ExternalToolError() # Calling code will generate a sensible error message.
|
en
| 0.738118
|
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). Common configuration items for coursier tasks. :API: public Indicates an error bootstrapping coursier. # Quiet mode, so coursier does not show resolve progress, # but still prints results if --report is specified. # Do not use default public maven repo. # Concurrent workers #Maven_Coordinates, " # TODO(yic): Use a published version of Coursier. https://github.com/pantsbuild/pants/issues/6852 # We need to pick one URL to return, so we check for the first one that returns a # 200 for a HEAD request. # TODO: Allow ExternalTool to handle multiple URLs itself? May be overkill. # It's not even clear that this subsystem really needs to do so. # Calling code will generate a sensible error message.
| 2.063622
| 2
|
mssdk/pro/cons.py
|
baierxxl/mssdk
| 0
|
6627013
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2020/12/24 16:28
Desc: API常量文件
"""
TOKEN_F_P = '<PASSWORD>'
TOKEN_ERR_MSG = '请设置 mssdk pro 的 token 凭证码,如果没有权限,请访问 https://qhkch.com/ 注册申请'
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2020/12/24 16:28
Desc: API常量文件
"""
TOKEN_F_P = '<PASSWORD>'
TOKEN_ERR_MSG = '请设置 mssdk pro 的 token 凭证码,如果没有权限,请访问 https://qhkch.com/ 注册申请'
|
en
| 0.296957
|
#!/usr/bin/env python # -*- coding:utf-8 -*- Date: 2020/12/24 16:28 Desc: API常量文件
| 1.276979
| 1
|
Intermediate/19/Project/turtleRace.py
|
Matthew1906/100DaysOfPython
| 1
|
6627014
|
<reponame>Matthew1906/100DaysOfPython<gh_stars>1-10
# Import modules
from turtle import Turtle, Screen
from random import randint
colors = ['red','purple','yellow','green','blue', 'black']
def init_turtle(index, y):
turtle = Turtle()
turtle.shape("turtle")
turtle.color(colors[index])
turtle.penup()
turtle.goto(x=-230,y=y)
return turtle
canvas = Screen()
canvas.setup(width = 500, height = 400)
color_choice = canvas.textinput(title = "Make your bet", prompt="Enter a color: ")
turtles = []
for i in range(6):
turtle = init_turtle(i, 100 -((i+1)*30))
turtles.append(turtle)
running = True
while running:
for turtle in turtles:
distance = randint(1,30)
turtle.forward(distance)
if turtle.xcor()>=220:
running = False
print(f"The {turtle.pencolor().capitalize()} Turtle won the race!")
if turtle.pencolor() == color_choice.lower():
print("You won the game!")
else:
print("You lost!")
break
canvas.exitonclick()
|
# Import modules
from turtle import Turtle, Screen
from random import randint
colors = ['red','purple','yellow','green','blue', 'black']
def init_turtle(index, y):
turtle = Turtle()
turtle.shape("turtle")
turtle.color(colors[index])
turtle.penup()
turtle.goto(x=-230,y=y)
return turtle
canvas = Screen()
canvas.setup(width = 500, height = 400)
color_choice = canvas.textinput(title = "Make your bet", prompt="Enter a color: ")
turtles = []
for i in range(6):
turtle = init_turtle(i, 100 -((i+1)*30))
turtles.append(turtle)
running = True
while running:
for turtle in turtles:
distance = randint(1,30)
turtle.forward(distance)
if turtle.xcor()>=220:
running = False
print(f"The {turtle.pencolor().capitalize()} Turtle won the race!")
if turtle.pencolor() == color_choice.lower():
print("You won the game!")
else:
print("You lost!")
break
canvas.exitonclick()
|
fa
| 0.067518
|
# Import modules
| 4.182977
| 4
|
online_test/settings.py
|
patilnayan92/etonlinetest
| 2
|
6627015
|
<filename>online_test/settings.py
"""
Django settings for online_test project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
from nayan.pipeline.settings import AUTH_PIPELINE
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# The directory where user data can be saved. This directory will be
# world-writable and all user code will be written and saved here by the
# code server with each user having their own sub-directory.
OUTPUT_DIR = os.path.join(BASE_DIR, "nayan_data", "output")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
URL_ROOT = ''
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'nayan',
'taggit',
# 'django.contrib.sites'
'social.apps.django_app.default',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'nayan.middleware.one_session_per_user.OneSessionPerUserMiddleware',
'nayan.middleware.user_time_zone.TimezoneMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
#'social.apps.django_app.middleware.SocialAuthExceptionMiddleware',
)
ROOT_URLCONF = 'online_test.urls'
WSGI_APPLICATION = 'online_test.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
},
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
LOGIN_URL = '/exam/login/'
LOGIN_REDIRECT_URL = '/exam/'
#RESET_URL = '/exam/reset/'
#SOCIAL_AUTH_LOGIN_ERROR_URL = '/exam/login/'
MEDIA_URL = "/data/"
MEDIA_ROOT = os.path.join(BASE_DIR, "nayan_data", "data")
# Set this varable to <True> if smtp-server is not allowing to send email.
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = '<EMAIL>'
EMAIL_HOST_PASSWORD = ''
EMAIL_PORT = 587
# Set EMAIL_BACKEND to 'django.core.mail.backends.smtp.EmailBackend'
# in production
# EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
# SENDER_EMAIL, REPLY_EMAIL, PRODUCTION_URL, IS_DEVELOPMENT are used in email
# verification. Set the variables accordingly to avoid errors in production
# This email id will be used as <from address> for sending emails.
# For example <EMAIL> can be used.
SENDER_EMAIL = 'your_email'
# Organisation/Indivudual Name.
SENDER_NAME = 'your_name'
# This email id will be used by users to send their queries
# For example <EMAIL> can be used.
REPLY_EMAIL = 'your_reply_email'
# This url will be used in email verification to create activation link.
# Add your hosted url to this variable.
# For example https://127.0.0.1:8000 or 127.0.0.1:8000
PRODUCTION_URL = 'https://127.0.0.1:8000'
# Set this variable to <False> once the project is in production.
# If this variable is kept <True> in production, email will not be verified.
IS_DEVELOPMENT = True
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': 'nayan/templates',
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
'django.contrib.messages.context_processors.messages',
],
'debug': False,
}
},
]
# SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = 'GOOGLE_KEY_PROVIDED'
# SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = 'GOOGLE_SECRET_PROVIDED'
# SOCIAL_AUTH_FACEBOOK_KEY = 'FACEBOOK_KEY_PROVIDED'
# SOCIAL_AUTH_FACEBOOK_SECRET = 'FACEBOOK_SECRET_PROVIDED'
AUTHENTICATION_BACKENDS = (
# 'social.backends.google.GoogleOAuth2',
# 'social.backends.facebook.FacebookOAuth2',
'django.contrib.auth.backends.ModelBackend',
# 'allauth.account.auth_backends.AuthenticationBackend',
)
"""SOCIAL_AUTH_PIPELINE = AUTH_PIPELINE
SOCIAL_AUTH_FACEBOOK_SCOPE = ['email']
SOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = {
'fields': 'id, name, email'
}
"""
|
<filename>online_test/settings.py
"""
Django settings for online_test project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
from nayan.pipeline.settings import AUTH_PIPELINE
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# The directory where user data can be saved. This directory will be
# world-writable and all user code will be written and saved here by the
# code server with each user having their own sub-directory.
OUTPUT_DIR = os.path.join(BASE_DIR, "nayan_data", "output")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
URL_ROOT = ''
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'nayan',
'taggit',
# 'django.contrib.sites'
'social.apps.django_app.default',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'nayan.middleware.one_session_per_user.OneSessionPerUserMiddleware',
'nayan.middleware.user_time_zone.TimezoneMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
#'social.apps.django_app.middleware.SocialAuthExceptionMiddleware',
)
ROOT_URLCONF = 'online_test.urls'
WSGI_APPLICATION = 'online_test.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
},
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
LOGIN_URL = '/exam/login/'
LOGIN_REDIRECT_URL = '/exam/'
#RESET_URL = '/exam/reset/'
#SOCIAL_AUTH_LOGIN_ERROR_URL = '/exam/login/'
MEDIA_URL = "/data/"
MEDIA_ROOT = os.path.join(BASE_DIR, "nayan_data", "data")
# Set this varable to <True> if smtp-server is not allowing to send email.
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = '<EMAIL>'
EMAIL_HOST_PASSWORD = ''
EMAIL_PORT = 587
# Set EMAIL_BACKEND to 'django.core.mail.backends.smtp.EmailBackend'
# in production
# EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
# SENDER_EMAIL, REPLY_EMAIL, PRODUCTION_URL, IS_DEVELOPMENT are used in email
# verification. Set the variables accordingly to avoid errors in production
# This email id will be used as <from address> for sending emails.
# For example <EMAIL> can be used.
SENDER_EMAIL = 'your_email'
# Organisation/Indivudual Name.
SENDER_NAME = 'your_name'
# This email id will be used by users to send their queries
# For example <EMAIL> can be used.
REPLY_EMAIL = 'your_reply_email'
# This url will be used in email verification to create activation link.
# Add your hosted url to this variable.
# For example https://127.0.0.1:8000 or 127.0.0.1:8000
PRODUCTION_URL = 'https://127.0.0.1:8000'
# Set this variable to <False> once the project is in production.
# If this variable is kept <True> in production, email will not be verified.
IS_DEVELOPMENT = True
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': 'nayan/templates',
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
'django.contrib.messages.context_processors.messages',
],
'debug': False,
}
},
]
# SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = 'GOOGLE_KEY_PROVIDED'
# SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = 'GOOGLE_SECRET_PROVIDED'
# SOCIAL_AUTH_FACEBOOK_KEY = 'FACEBOOK_KEY_PROVIDED'
# SOCIAL_AUTH_FACEBOOK_SECRET = 'FACEBOOK_SECRET_PROVIDED'
AUTHENTICATION_BACKENDS = (
# 'social.backends.google.GoogleOAuth2',
# 'social.backends.facebook.FacebookOAuth2',
'django.contrib.auth.backends.ModelBackend',
# 'allauth.account.auth_backends.AuthenticationBackend',
)
"""SOCIAL_AUTH_PIPELINE = AUTH_PIPELINE
SOCIAL_AUTH_FACEBOOK_SCOPE = ['email']
SOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = {
'fields': 'id, name, email'
}
"""
|
en
| 0.655548
|
Django settings for online_test project. For more information on this file, see https://docs.djangoproject.com/en/1.6/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.6/ref/settings/ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) # The directory where user data can be saved. This directory will be # world-writable and all user code will be written and saved here by the # code server with each user having their own sub-directory. # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! # SECURITY WARNING: don't run with debug turned on in production! # Application definition # 'django.contrib.sites' #'social.apps.django_app.middleware.SocialAuthExceptionMiddleware', # Database # https://docs.djangoproject.com/en/1.6/ref/settings/#databases # Internationalization # https://docs.djangoproject.com/en/1.6/topics/i18n/ # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.6/howto/static-files/ #RESET_URL = '/exam/reset/' #SOCIAL_AUTH_LOGIN_ERROR_URL = '/exam/login/' # Set this varable to <True> if smtp-server is not allowing to send email. # Set EMAIL_BACKEND to 'django.core.mail.backends.smtp.EmailBackend' # in production # EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend' # SENDER_EMAIL, REPLY_EMAIL, PRODUCTION_URL, IS_DEVELOPMENT are used in email # verification. Set the variables accordingly to avoid errors in production # This email id will be used as <from address> for sending emails. # For example <EMAIL> can be used. # Organisation/Indivudual Name. # This email id will be used by users to send their queries # For example <EMAIL> can be used. # This url will be used in email verification to create activation link. # Add your hosted url to this variable. # For example https://127.0.0.1:8000 or 127.0.0.1:8000 # Set this variable to <False> once the project is in production. # If this variable is kept <True> in production, email will not be verified. # SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = 'GOOGLE_KEY_PROVIDED' # SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = 'GOOGLE_SECRET_PROVIDED' # SOCIAL_AUTH_FACEBOOK_KEY = 'FACEBOOK_KEY_PROVIDED' # SOCIAL_AUTH_FACEBOOK_SECRET = 'FACEBOOK_SECRET_PROVIDED' # 'social.backends.google.GoogleOAuth2', # 'social.backends.facebook.FacebookOAuth2', # 'allauth.account.auth_backends.AuthenticationBackend', SOCIAL_AUTH_PIPELINE = AUTH_PIPELINE SOCIAL_AUTH_FACEBOOK_SCOPE = ['email'] SOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = { 'fields': 'id, name, email' }
| 1.891263
| 2
|
klab/bio/protein_sequence.py
|
Kortemme-Lab/klab
| 2
|
6627016
|
<gh_stars>1-10
#!/usr/bin/env python2
# encoding: utf-8
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
one_to_three = {
'A':'ALA',
'C':'CYS',
'D':'ASP',
'E':'GLU',
'F':'PHE',
'G':'GLY',
'H':'HIS',
'I':'ILE',
'K':'LYS',
'L':'LEU',
'M':'MET',
'N':'ASN',
'P':'PRO',
'Q':'GLN',
'R':'ARG',
'S':'SER',
'T':'THR',
'V':'VAL',
'W':'TRP',
'Y':'TYR',
}
three_to_one = { v : k for k, v in one_to_three.items() }
|
#!/usr/bin/env python2
# encoding: utf-8
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
one_to_three = {
'A':'ALA',
'C':'CYS',
'D':'ASP',
'E':'GLU',
'F':'PHE',
'G':'GLY',
'H':'HIS',
'I':'ILE',
'K':'LYS',
'L':'LEU',
'M':'MET',
'N':'ASN',
'P':'PRO',
'Q':'GLN',
'R':'ARG',
'S':'SER',
'T':'THR',
'V':'VAL',
'W':'TRP',
'Y':'TYR',
}
three_to_one = { v : k for k, v in one_to_three.items() }
|
en
| 0.759424
|
#!/usr/bin/env python2 # encoding: utf-8 # The MIT License (MIT) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE.
| 1.364174
| 1
|
examples/python/alldifferent_except_0.py
|
prezaei85/or-tools
| 3
|
6627017
|
<reponame>prezaei85/or-tools<filename>examples/python/alldifferent_except_0.py
# Copyright 2010 <NAME> <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All different except 0 Google CP Solver.
Decomposition of global constraint alldifferent_except_0.
From Global constraint catalogue:
http://www.emn.fr/x-info/sdemasse/gccat/Calldifferent_except_0.html
'''
Enforce all variables of the collection VARIABLES to take distinct
values, except those variables that are assigned to 0.
Example
(<5, 0, 1, 9, 0, 3>)
The alldifferent_except_0 constraint holds since all the values
(that are different from 0) 5, 1, 9 and 3 are distinct.
'''
Compare with the following models:
* Comet: http://hakank.org/comet/alldifferent_except_0.co
* ECLiPSe: http://hakank.org/eclipse/alldifferent_except_0.ecl
* Tailor/Essence': http://hakank.org/tailor/alldifferent_except_0.eprime
* Gecode: http://hakank.org/gecode/alldifferent_except_0.cpp
* Gecode/R: http://hakank.org/gecode_r/all_different_except_0.rb
* MiniZinc: http://hakank.org/minizinc/alldifferent_except_0.mzn
* SICStus_ http://hakank.org/sicstus/alldifferent_except_0.pl
* Choco: http://hakank.org/choco/AllDifferentExcept0_test.java
* JaCoP: http://hakank.org/JaCoP/AllDifferentExcept0_test.java
* Zinc: http://hakank.org/minizinc/alldifferent_except_0.zinc
This model was created by <NAME> (<EMAIL>)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from __future__ import print_function
from ortools.constraint_solver import pywrapcp
#
# Decomposition of alldifferent_except_0
# Thanks to Laurent Perron (Google) for
# suggestions of improvements.
#
def alldifferent_except_0(solver, a):
n = len(a)
for i in range(n):
for j in range(i):
solver.Add((a[i] != 0) * (a[j] != 0) <= (a[i] != a[j]))
# more compact version:
def alldifferent_except_0_b(solver, a):
n = len(a)
[solver.Add((a[i] != 0) * (a[j] != 0) <= (a[i] != a[j]))
for i in range(n) for j in range(i)]
def main(unused_argv):
# Create the solver.
solver = pywrapcp.Solver("Alldifferent except 0")
# data
n = 7
# declare variables
x = [solver.IntVar(0, n - 1, "x%i" % i) for i in range(n)]
# Number of zeros.
z = solver.Sum([x[i] == 0 for i in range(n)]).VarWithName("z")
#
# constraints
#
alldifferent_except_0(solver, x)
# we require 2 0's
solver.Add(z == 2)
#
# solution and search
#
solution = solver.Assignment()
solution.Add([x[i] for i in range(n)])
solution.Add(z)
collector = solver.AllSolutionCollector(solution)
solver.Solve(solver.Phase([x[i] for i in range(n)],
solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MIN_VALUE),
[collector])
num_solutions = collector.SolutionCount()
for s in range(num_solutions):
print("x:", [collector.Value(s, x[i]) for i in range(n)])
print("z:", collector.Value(s, z))
print()
print("num_solutions:", num_solutions)
print("failures:", solver.Failures())
print("branches:", solver.Branches())
print("WallTime:", solver.WallTime())
if __name__ == "__main__":
main("cp sample")
|
# Copyright 2010 <NAME> <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All different except 0 Google CP Solver.
Decomposition of global constraint alldifferent_except_0.
From Global constraint catalogue:
http://www.emn.fr/x-info/sdemasse/gccat/Calldifferent_except_0.html
'''
Enforce all variables of the collection VARIABLES to take distinct
values, except those variables that are assigned to 0.
Example
(<5, 0, 1, 9, 0, 3>)
The alldifferent_except_0 constraint holds since all the values
(that are different from 0) 5, 1, 9 and 3 are distinct.
'''
Compare with the following models:
* Comet: http://hakank.org/comet/alldifferent_except_0.co
* ECLiPSe: http://hakank.org/eclipse/alldifferent_except_0.ecl
* Tailor/Essence': http://hakank.org/tailor/alldifferent_except_0.eprime
* Gecode: http://hakank.org/gecode/alldifferent_except_0.cpp
* Gecode/R: http://hakank.org/gecode_r/all_different_except_0.rb
* MiniZinc: http://hakank.org/minizinc/alldifferent_except_0.mzn
* SICStus_ http://hakank.org/sicstus/alldifferent_except_0.pl
* Choco: http://hakank.org/choco/AllDifferentExcept0_test.java
* JaCoP: http://hakank.org/JaCoP/AllDifferentExcept0_test.java
* Zinc: http://hakank.org/minizinc/alldifferent_except_0.zinc
This model was created by <NAME> (<EMAIL>)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from __future__ import print_function
from ortools.constraint_solver import pywrapcp
#
# Decomposition of alldifferent_except_0
# Thanks to Laurent Perron (Google) for
# suggestions of improvements.
#
def alldifferent_except_0(solver, a):
n = len(a)
for i in range(n):
for j in range(i):
solver.Add((a[i] != 0) * (a[j] != 0) <= (a[i] != a[j]))
# more compact version:
def alldifferent_except_0_b(solver, a):
n = len(a)
[solver.Add((a[i] != 0) * (a[j] != 0) <= (a[i] != a[j]))
for i in range(n) for j in range(i)]
def main(unused_argv):
# Create the solver.
solver = pywrapcp.Solver("Alldifferent except 0")
# data
n = 7
# declare variables
x = [solver.IntVar(0, n - 1, "x%i" % i) for i in range(n)]
# Number of zeros.
z = solver.Sum([x[i] == 0 for i in range(n)]).VarWithName("z")
#
# constraints
#
alldifferent_except_0(solver, x)
# we require 2 0's
solver.Add(z == 2)
#
# solution and search
#
solution = solver.Assignment()
solution.Add([x[i] for i in range(n)])
solution.Add(z)
collector = solver.AllSolutionCollector(solution)
solver.Solve(solver.Phase([x[i] for i in range(n)],
solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MIN_VALUE),
[collector])
num_solutions = collector.SolutionCount()
for s in range(num_solutions):
print("x:", [collector.Value(s, x[i]) for i in range(n)])
print("z:", collector.Value(s, z))
print()
print("num_solutions:", num_solutions)
print("failures:", solver.Failures())
print("branches:", solver.Branches())
print("WallTime:", solver.WallTime())
if __name__ == "__main__":
main("cp sample")
|
en
| 0.685446
|
# Copyright 2010 <NAME> <EMAIL> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. All different except 0 Google CP Solver. Decomposition of global constraint alldifferent_except_0. From Global constraint catalogue: http://www.emn.fr/x-info/sdemasse/gccat/Calldifferent_except_0.html ''' Enforce all variables of the collection VARIABLES to take distinct values, except those variables that are assigned to 0. Example (<5, 0, 1, 9, 0, 3>) The alldifferent_except_0 constraint holds since all the values (that are different from 0) 5, 1, 9 and 3 are distinct. ''' Compare with the following models: * Comet: http://hakank.org/comet/alldifferent_except_0.co * ECLiPSe: http://hakank.org/eclipse/alldifferent_except_0.ecl * Tailor/Essence': http://hakank.org/tailor/alldifferent_except_0.eprime * Gecode: http://hakank.org/gecode/alldifferent_except_0.cpp * Gecode/R: http://hakank.org/gecode_r/all_different_except_0.rb * MiniZinc: http://hakank.org/minizinc/alldifferent_except_0.mzn * SICStus_ http://hakank.org/sicstus/alldifferent_except_0.pl * Choco: http://hakank.org/choco/AllDifferentExcept0_test.java * JaCoP: http://hakank.org/JaCoP/AllDifferentExcept0_test.java * Zinc: http://hakank.org/minizinc/alldifferent_except_0.zinc This model was created by <NAME> (<EMAIL>) Also see my other Google CP Solver models: http://www.hakank.org/google_or_tools/ # # Decomposition of alldifferent_except_0 # Thanks to Laurent Perron (Google) for # suggestions of improvements. # # more compact version: # Create the solver. # data # declare variables # Number of zeros. # # constraints # # we require 2 0's # # solution and search #
| 1.938441
| 2
|
driving_school/motio.py
|
psorus/anogen
| 0
|
6627018
|
<reponame>psorus/anogen
from learnfrom import *
import numpy as np
from tqdm import tqdm
dphi=0.01
dr=0.01
def findbestnext(x0,y0,xm1,ym1,p):
phi=np.arange(np.random.random()*dphi,2*np.pi,dphi)
x=[x0+dr*np.sin(p) for p in phi]
y=[y0+dr*np.cos(p) for p in phi]
val=[lossbyparam(xx,yy,*p) for xx,yy in zip(x,y)]
q=[]
# q=val
drsq=dr**2
for zw,xx,yy in zip(val,x,y):
dra=(xx-xm1)**2+(yy-ym1)**2
if dra<drsq/3:
zw+=100#move in the same direction
# zw-=dra*0.1
q.append(zw)
i=np.argmin(q)
# print("minidex",i,len(x),len(y),len(q),len(phi))
return x[i],y[i],q[i]
def loopfor(x0,y0,xm1,ym1,p,n=10000):
x=[xm1,x0]
y=[ym1,y0]
loss=[-1,-1]
for i in tqdm(range(n)):
ax,ay,al=findbestnext(x[-1],y[-1],x[-2],y[-2],p)
x.append(ax)
y.append(ay)
loss.append(al)
return x,y,loss
|
from learnfrom import *
import numpy as np
from tqdm import tqdm
dphi=0.01
dr=0.01
def findbestnext(x0,y0,xm1,ym1,p):
phi=np.arange(np.random.random()*dphi,2*np.pi,dphi)
x=[x0+dr*np.sin(p) for p in phi]
y=[y0+dr*np.cos(p) for p in phi]
val=[lossbyparam(xx,yy,*p) for xx,yy in zip(x,y)]
q=[]
# q=val
drsq=dr**2
for zw,xx,yy in zip(val,x,y):
dra=(xx-xm1)**2+(yy-ym1)**2
if dra<drsq/3:
zw+=100#move in the same direction
# zw-=dra*0.1
q.append(zw)
i=np.argmin(q)
# print("minidex",i,len(x),len(y),len(q),len(phi))
return x[i],y[i],q[i]
def loopfor(x0,y0,xm1,ym1,p,n=10000):
x=[xm1,x0]
y=[ym1,y0]
loss=[-1,-1]
for i in tqdm(range(n)):
ax,ay,al=findbestnext(x[-1],y[-1],x[-2],y[-2],p)
x.append(ax)
y.append(ay)
loss.append(al)
return x,y,loss
|
en
| 0.210606
|
# q=val #move in the same direction # zw-=dra*0.1 # print("minidex",i,len(x),len(y),len(q),len(phi))
| 2.686715
| 3
|
databricks/koalas/tests/test_dataframe.py
|
akarsh3007/koalas
| 1
|
6627019
|
<reponame>akarsh3007/koalas
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect
import unittest
import numpy as np
import pandas as pd
from databricks import koalas
from databricks.koalas.testing.utils import ReusedSQLTestCase, TestUtils
from databricks.koalas.exceptions import PandasNotImplementedError
from databricks.koalas.missing.frame import _MissingPandasLikeDataFrame
from databricks.koalas.missing.series import _MissingPandasLikeSeries
from databricks.koalas.series import Series
class DataFrameTest(ReusedSQLTestCase, TestUtils):
@property
def full(self):
return pd.DataFrame({
'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [4, 5, 6, 3, 2, 1, 0, 0, 0],
}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9])
@property
def df(self):
return koalas.from_pandas(self.full)
def test_Dataframe(self):
d = self.df
full = self.full
expected = pd.Series([2, 3, 4, 5, 6, 7, 8, 9, 10],
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
name='(a + 1)') # TODO: name='a'
self.assert_eq(d['a'] + 1, expected)
self.assert_eq(d.columns, pd.Index(['a', 'b']))
self.assert_eq(d[d['b'] > 2], full[full['b'] > 2])
self.assert_eq(d[['a', 'b']], full[['a', 'b']])
self.assert_eq(d.a, full.a)
# TODO: assert d.b.mean().compute() == full.b.mean()
# TODO: assert np.allclose(d.b.var().compute(), full.b.var())
# TODO: assert np.allclose(d.b.std().compute(), full.b.std())
assert repr(d)
df = pd.DataFrame({
'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [4, 5, 6, 3, 2, 1, 0, 0, 0],
})
ddf = koalas.from_pandas(df)
self.assert_eq(df[['a', 'b']], ddf[['a', 'b']])
self.assertEqual(ddf.a.notnull().alias("x").name, "x")
def test_head_tail(self):
d = self.df
full = self.full
self.assert_eq(d.head(2), full.head(2))
self.assert_eq(d.head(3), full.head(3))
self.assert_eq(d['a'].head(2), full['a'].head(2))
self.assert_eq(d['a'].head(3), full['a'].head(3))
# TODO: self.assert_eq(d.tail(2), full.tail(2))
# TODO: self.assert_eq(d.tail(3), full.tail(3))
# TODO: self.assert_eq(d['a'].tail(2), full['a'].tail(2))
# TODO: self.assert_eq(d['a'].tail(3), full['a'].tail(3))
def test_index_head(self):
d = self.df
full = self.full
self.assert_eq(list(d.index.head(2).toPandas()), list(full.index[:2]))
self.assert_eq(list(d.index.head(3).toPandas()), list(full.index[:3]))
def test_Series(self):
d = self.df
full = self.full
self.assertTrue(isinstance(d.a, Series))
self.assertTrue(isinstance(d.a + 1, Series))
self.assertTrue(isinstance(1 + d.a, Series))
# TODO: self.assert_eq(d + 1, full + 1)
def test_Index(self):
for case in [pd.DataFrame(np.random.randn(10, 5), index=list('abcdefghij')),
pd.DataFrame(np.random.randn(10, 5),
index=pd.date_range('2011-01-01', freq='D',
periods=10))]:
ddf = koalas.from_pandas(case)
self.assert_eq(list(ddf.index.toPandas()), list(case.index))
def test_attributes(self):
d = self.df
self.assertIn('a', dir(d))
self.assertNotIn('foo', dir(d))
self.assertRaises(AttributeError, lambda: d.foo)
df = koalas.DataFrame({'a b c': [1, 2, 3]})
self.assertNotIn('a b c', dir(df))
df = koalas.DataFrame({'a': [1, 2], 5: [1, 2]})
self.assertIn('a', dir(df))
self.assertNotIn(5, dir(df))
def test_column_names(self):
d = self.df
self.assert_eq(d.columns, pd.Index(['a', 'b']))
self.assert_eq(d[['b', 'a']].columns, pd.Index(['b', 'a']))
self.assertEqual(d['a'].name, 'a')
self.assertEqual((d['a'] + 1).name, '(a + 1)') # TODO: 'a'
self.assertEqual((d['a'] + d['b']).name, '(a + b)') # TODO: None
def test_index_names(self):
d = self.df
# TODO?: self.assertIsNone(d.index.name)
idx = pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name='x')
df = pd.DataFrame(np.random.randn(10, 5), idx)
ddf = koalas.from_pandas(df)
self.assertEqual(ddf.index.name, 'x')
def test_rename_columns(self):
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7],
'b': [7, 6, 5, 4, 3, 2, 1]})
ddf = koalas.from_pandas(df)
ddf.columns = ['x', 'y']
df.columns = ['x', 'y']
self.assert_eq(ddf.columns, pd.Index(['x', 'y']))
self.assert_eq(ddf, df)
msg = "Length mismatch: Expected axis has 2 elements, new values have 4 elements"
with self.assertRaisesRegex(ValueError, msg):
ddf.columns = [1, 2, 3, 4]
# Multi-index columns
df = pd.DataFrame({('A', '0'): [1, 2, 2, 3], ('B', 1): [1, 2, 3, 4]})
ddf = koalas.from_pandas(df)
df.columns = ['x', 'y']
ddf.columns = ['x', 'y']
self.assert_eq(ddf.columns, pd.Index(['x', 'y']))
self.assert_eq(ddf, df)
def test_rename_series(self):
s = pd.Series([1, 2, 3, 4, 5, 6, 7], name='x')
ds = koalas.from_pandas(s)
s.name = 'renamed'
ds.name = 'renamed'
self.assertEqual(ds.name, 'renamed')
self.assert_eq(ds, s)
ind = s.index
dind = ds.index
ind.name = 'renamed'
dind.name = 'renamed'
self.assertEqual(ind.name, 'renamed')
self.assert_eq(list(dind.toPandas()), list(ind))
def test_rename_series_method(self):
# Series name
s = pd.Series([1, 2, 3, 4, 5, 6, 7], name='x')
ds = koalas.from_pandas(s)
self.assert_eq(ds.rename('y'), s.rename('y'))
self.assertEqual(ds.name, 'x') # no mutation
# self.assert_eq(ds.rename(), s.rename())
ds.rename('z', inplace=True)
s.rename('z', inplace=True)
self.assertEqual(ds.name, 'z')
self.assert_eq(ds, s)
# Series index
s = pd.Series(['a', 'b', 'c', 'd', 'e', 'f', 'g'], name='x')
ds = koalas.from_pandas(s)
# TODO: index
# res = ds.rename(lambda x: x ** 2)
# self.assert_eq(res, s.rename(lambda x: x ** 2))
# res = ds.rename(s)
# self.assert_eq(res, s.rename(s))
# res = ds.rename(ds)
# self.assert_eq(res, s.rename(s))
# res = ds.rename(lambda x: x**2, inplace=True)
# self.assertis(res, ds)
# s.rename(lambda x: x**2, inplace=True)
# self.assert_eq(ds, s)
def test_dropna(self):
df = pd.DataFrame({'x': [np.nan, 2, 3, 4, np.nan, 6],
'y': [1, 2, np.nan, 4, np.nan, np.nan],
'z': [1, 2, 3, 4, np.nan, np.nan]},
index=[10, 20, 30, 40, 50, 60])
ddf = koalas.from_pandas(df)
self.assert_eq(ddf.x.dropna(), df.x.dropna())
self.assert_eq(ddf.y.dropna(), df.y.dropna())
self.assert_eq(ddf.z.dropna(), df.z.dropna())
self.assert_eq(ddf.dropna(), df.dropna())
self.assert_eq(ddf.dropna(how='all'), df.dropna(how='all'))
self.assert_eq(ddf.dropna(subset=['x']), df.dropna(subset=['x']))
self.assert_eq(ddf.dropna(subset=['y', 'z']), df.dropna(subset=['y', 'z']))
self.assert_eq(ddf.dropna(subset=['y', 'z'], how='all'),
df.dropna(subset=['y', 'z'], how='all'))
self.assert_eq(ddf.dropna(thresh=2), df.dropna(thresh=2))
self.assert_eq(ddf.dropna(thresh=1, subset=['y', 'z']),
df.dropna(thresh=1, subset=['y', 'z']))
ddf2 = ddf.copy()
x = ddf2.x
x.dropna(inplace=True)
self.assert_eq(x, df.x.dropna())
ddf2.dropna(inplace=True)
self.assert_eq(ddf2, df.dropna())
msg = "dropna currently only works for axis=0 or axis='index'"
with self.assertRaisesRegex(NotImplementedError, msg):
ddf.dropna(axis=1)
with self.assertRaisesRegex(NotImplementedError, msg):
ddf.dropna(axis='column')
with self.assertRaisesRegex(NotImplementedError, msg):
ddf.dropna(axis='foo')
def test_dtype(self):
pdf = pd.DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('i1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('20130101', periods=3)})
kdf = koalas.from_pandas(pdf)
self.assert_eq(kdf, pdf)
self.assertTrue((kdf.dtypes == pdf.dtypes).all())
def test_value_counts(self):
df = pd.DataFrame({'x': [1, 2, 1, 3, 3, np.nan, 1, 4]})
ddf = koalas.from_pandas(df)
exp = df.x.value_counts()
res = ddf.x.value_counts()
self.assertEqual(res.name, exp.name)
self.assertPandasAlmostEqual(res.toPandas(), exp)
self.assertPandasAlmostEqual(ddf.x.value_counts(normalize=True).toPandas(),
df.x.value_counts(normalize=True))
self.assertPandasAlmostEqual(ddf.x.value_counts(ascending=True).toPandas(),
df.x.value_counts(ascending=True))
self.assertPandasAlmostEqual(ddf.x.value_counts(normalize=True, dropna=False).toPandas(),
df.x.value_counts(normalize=True, dropna=False))
self.assertPandasAlmostEqual(ddf.x.value_counts(ascending=True, dropna=False).toPandas(),
df.x.value_counts(ascending=True, dropna=False))
with self.assertRaisesRegex(NotImplementedError,
"value_counts currently does not support bins"):
ddf.x.value_counts(bins=3)
s = df.x
s.name = 'index'
ds = ddf.x
ds.name = 'index'
self.assertPandasAlmostEqual(ds.value_counts().toPandas(), s.value_counts())
def test_isnull(self):
df = pd.DataFrame({'x': [1, 2, 3, 4, None, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = koalas.from_pandas(df)
self.assert_eq(a.x.notnull(), df.x.notnull())
self.assert_eq(a.x.isnull(), df.x.isnull())
self.assert_eq(a.notnull(), df.notnull())
self.assert_eq(a.isnull(), df.isnull())
def test_to_datetime(self):
df = pd.DataFrame({'year': [2015, 2016],
'month': [2, 3],
'day': [4, 5]})
ddf = koalas.from_pandas(df)
self.assert_eq(pd.to_datetime(df), koalas.to_datetime(ddf))
s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000'] * 100)
ds = koalas.from_pandas(s)
self.assert_eq(pd.to_datetime(s, infer_datetime_format=True),
koalas.to_datetime(ds, infer_datetime_format=True))
def test_missing(self):
d = self.df
missing_functions = inspect.getmembers(_MissingPandasLikeDataFrame, inspect.isfunction)
for name, _ in missing_functions:
with self.assertRaisesRegex(PandasNotImplementedError,
"DataFrame.*{}.*not implemented".format(name)):
getattr(d, name)()
missing_functions = inspect.getmembers(_MissingPandasLikeSeries, inspect.isfunction)
for name, _ in missing_functions:
with self.assertRaisesRegex(PandasNotImplementedError,
"Series.*{}.*not implemented".format(name)):
getattr(d.a, name)()
def test_to_numpy(self):
df = pd.DataFrame({'a': [4, 2, 3, 4, 8, 6],
'b': [1, 2, 9, 4, 2, 4],
'c': ["one", "three", "six", "seven", "one", "5"]},
index=[10, 20, 30, 40, 50, 60])
ddf = koalas.from_pandas(df)
np.testing.assert_equal(ddf.to_numpy(), df.values)
s = pd.Series([1, 2, 3, 4, 5, 6, 7], name='x')
ddf = koalas.from_pandas(s)
np.testing.assert_equal(ddf.to_numpy(), s.values)
if __name__ == "__main__":
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect
import unittest
import numpy as np
import pandas as pd
from databricks import koalas
from databricks.koalas.testing.utils import ReusedSQLTestCase, TestUtils
from databricks.koalas.exceptions import PandasNotImplementedError
from databricks.koalas.missing.frame import _MissingPandasLikeDataFrame
from databricks.koalas.missing.series import _MissingPandasLikeSeries
from databricks.koalas.series import Series
class DataFrameTest(ReusedSQLTestCase, TestUtils):
@property
def full(self):
return pd.DataFrame({
'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [4, 5, 6, 3, 2, 1, 0, 0, 0],
}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9])
@property
def df(self):
return koalas.from_pandas(self.full)
def test_Dataframe(self):
d = self.df
full = self.full
expected = pd.Series([2, 3, 4, 5, 6, 7, 8, 9, 10],
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
name='(a + 1)') # TODO: name='a'
self.assert_eq(d['a'] + 1, expected)
self.assert_eq(d.columns, pd.Index(['a', 'b']))
self.assert_eq(d[d['b'] > 2], full[full['b'] > 2])
self.assert_eq(d[['a', 'b']], full[['a', 'b']])
self.assert_eq(d.a, full.a)
# TODO: assert d.b.mean().compute() == full.b.mean()
# TODO: assert np.allclose(d.b.var().compute(), full.b.var())
# TODO: assert np.allclose(d.b.std().compute(), full.b.std())
assert repr(d)
df = pd.DataFrame({
'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [4, 5, 6, 3, 2, 1, 0, 0, 0],
})
ddf = koalas.from_pandas(df)
self.assert_eq(df[['a', 'b']], ddf[['a', 'b']])
self.assertEqual(ddf.a.notnull().alias("x").name, "x")
def test_head_tail(self):
d = self.df
full = self.full
self.assert_eq(d.head(2), full.head(2))
self.assert_eq(d.head(3), full.head(3))
self.assert_eq(d['a'].head(2), full['a'].head(2))
self.assert_eq(d['a'].head(3), full['a'].head(3))
# TODO: self.assert_eq(d.tail(2), full.tail(2))
# TODO: self.assert_eq(d.tail(3), full.tail(3))
# TODO: self.assert_eq(d['a'].tail(2), full['a'].tail(2))
# TODO: self.assert_eq(d['a'].tail(3), full['a'].tail(3))
def test_index_head(self):
d = self.df
full = self.full
self.assert_eq(list(d.index.head(2).toPandas()), list(full.index[:2]))
self.assert_eq(list(d.index.head(3).toPandas()), list(full.index[:3]))
def test_Series(self):
d = self.df
full = self.full
self.assertTrue(isinstance(d.a, Series))
self.assertTrue(isinstance(d.a + 1, Series))
self.assertTrue(isinstance(1 + d.a, Series))
# TODO: self.assert_eq(d + 1, full + 1)
def test_Index(self):
for case in [pd.DataFrame(np.random.randn(10, 5), index=list('abcdefghij')),
pd.DataFrame(np.random.randn(10, 5),
index=pd.date_range('2011-01-01', freq='D',
periods=10))]:
ddf = koalas.from_pandas(case)
self.assert_eq(list(ddf.index.toPandas()), list(case.index))
def test_attributes(self):
d = self.df
self.assertIn('a', dir(d))
self.assertNotIn('foo', dir(d))
self.assertRaises(AttributeError, lambda: d.foo)
df = koalas.DataFrame({'a b c': [1, 2, 3]})
self.assertNotIn('a b c', dir(df))
df = koalas.DataFrame({'a': [1, 2], 5: [1, 2]})
self.assertIn('a', dir(df))
self.assertNotIn(5, dir(df))
def test_column_names(self):
d = self.df
self.assert_eq(d.columns, pd.Index(['a', 'b']))
self.assert_eq(d[['b', 'a']].columns, pd.Index(['b', 'a']))
self.assertEqual(d['a'].name, 'a')
self.assertEqual((d['a'] + 1).name, '(a + 1)') # TODO: 'a'
self.assertEqual((d['a'] + d['b']).name, '(a + b)') # TODO: None
def test_index_names(self):
d = self.df
# TODO?: self.assertIsNone(d.index.name)
idx = pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name='x')
df = pd.DataFrame(np.random.randn(10, 5), idx)
ddf = koalas.from_pandas(df)
self.assertEqual(ddf.index.name, 'x')
def test_rename_columns(self):
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7],
'b': [7, 6, 5, 4, 3, 2, 1]})
ddf = koalas.from_pandas(df)
ddf.columns = ['x', 'y']
df.columns = ['x', 'y']
self.assert_eq(ddf.columns, pd.Index(['x', 'y']))
self.assert_eq(ddf, df)
msg = "Length mismatch: Expected axis has 2 elements, new values have 4 elements"
with self.assertRaisesRegex(ValueError, msg):
ddf.columns = [1, 2, 3, 4]
# Multi-index columns
df = pd.DataFrame({('A', '0'): [1, 2, 2, 3], ('B', 1): [1, 2, 3, 4]})
ddf = koalas.from_pandas(df)
df.columns = ['x', 'y']
ddf.columns = ['x', 'y']
self.assert_eq(ddf.columns, pd.Index(['x', 'y']))
self.assert_eq(ddf, df)
def test_rename_series(self):
s = pd.Series([1, 2, 3, 4, 5, 6, 7], name='x')
ds = koalas.from_pandas(s)
s.name = 'renamed'
ds.name = 'renamed'
self.assertEqual(ds.name, 'renamed')
self.assert_eq(ds, s)
ind = s.index
dind = ds.index
ind.name = 'renamed'
dind.name = 'renamed'
self.assertEqual(ind.name, 'renamed')
self.assert_eq(list(dind.toPandas()), list(ind))
def test_rename_series_method(self):
# Series name
s = pd.Series([1, 2, 3, 4, 5, 6, 7], name='x')
ds = koalas.from_pandas(s)
self.assert_eq(ds.rename('y'), s.rename('y'))
self.assertEqual(ds.name, 'x') # no mutation
# self.assert_eq(ds.rename(), s.rename())
ds.rename('z', inplace=True)
s.rename('z', inplace=True)
self.assertEqual(ds.name, 'z')
self.assert_eq(ds, s)
# Series index
s = pd.Series(['a', 'b', 'c', 'd', 'e', 'f', 'g'], name='x')
ds = koalas.from_pandas(s)
# TODO: index
# res = ds.rename(lambda x: x ** 2)
# self.assert_eq(res, s.rename(lambda x: x ** 2))
# res = ds.rename(s)
# self.assert_eq(res, s.rename(s))
# res = ds.rename(ds)
# self.assert_eq(res, s.rename(s))
# res = ds.rename(lambda x: x**2, inplace=True)
# self.assertis(res, ds)
# s.rename(lambda x: x**2, inplace=True)
# self.assert_eq(ds, s)
def test_dropna(self):
df = pd.DataFrame({'x': [np.nan, 2, 3, 4, np.nan, 6],
'y': [1, 2, np.nan, 4, np.nan, np.nan],
'z': [1, 2, 3, 4, np.nan, np.nan]},
index=[10, 20, 30, 40, 50, 60])
ddf = koalas.from_pandas(df)
self.assert_eq(ddf.x.dropna(), df.x.dropna())
self.assert_eq(ddf.y.dropna(), df.y.dropna())
self.assert_eq(ddf.z.dropna(), df.z.dropna())
self.assert_eq(ddf.dropna(), df.dropna())
self.assert_eq(ddf.dropna(how='all'), df.dropna(how='all'))
self.assert_eq(ddf.dropna(subset=['x']), df.dropna(subset=['x']))
self.assert_eq(ddf.dropna(subset=['y', 'z']), df.dropna(subset=['y', 'z']))
self.assert_eq(ddf.dropna(subset=['y', 'z'], how='all'),
df.dropna(subset=['y', 'z'], how='all'))
self.assert_eq(ddf.dropna(thresh=2), df.dropna(thresh=2))
self.assert_eq(ddf.dropna(thresh=1, subset=['y', 'z']),
df.dropna(thresh=1, subset=['y', 'z']))
ddf2 = ddf.copy()
x = ddf2.x
x.dropna(inplace=True)
self.assert_eq(x, df.x.dropna())
ddf2.dropna(inplace=True)
self.assert_eq(ddf2, df.dropna())
msg = "dropna currently only works for axis=0 or axis='index'"
with self.assertRaisesRegex(NotImplementedError, msg):
ddf.dropna(axis=1)
with self.assertRaisesRegex(NotImplementedError, msg):
ddf.dropna(axis='column')
with self.assertRaisesRegex(NotImplementedError, msg):
ddf.dropna(axis='foo')
def test_dtype(self):
pdf = pd.DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('i1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('20130101', periods=3)})
kdf = koalas.from_pandas(pdf)
self.assert_eq(kdf, pdf)
self.assertTrue((kdf.dtypes == pdf.dtypes).all())
def test_value_counts(self):
df = pd.DataFrame({'x': [1, 2, 1, 3, 3, np.nan, 1, 4]})
ddf = koalas.from_pandas(df)
exp = df.x.value_counts()
res = ddf.x.value_counts()
self.assertEqual(res.name, exp.name)
self.assertPandasAlmostEqual(res.toPandas(), exp)
self.assertPandasAlmostEqual(ddf.x.value_counts(normalize=True).toPandas(),
df.x.value_counts(normalize=True))
self.assertPandasAlmostEqual(ddf.x.value_counts(ascending=True).toPandas(),
df.x.value_counts(ascending=True))
self.assertPandasAlmostEqual(ddf.x.value_counts(normalize=True, dropna=False).toPandas(),
df.x.value_counts(normalize=True, dropna=False))
self.assertPandasAlmostEqual(ddf.x.value_counts(ascending=True, dropna=False).toPandas(),
df.x.value_counts(ascending=True, dropna=False))
with self.assertRaisesRegex(NotImplementedError,
"value_counts currently does not support bins"):
ddf.x.value_counts(bins=3)
s = df.x
s.name = 'index'
ds = ddf.x
ds.name = 'index'
self.assertPandasAlmostEqual(ds.value_counts().toPandas(), s.value_counts())
def test_isnull(self):
df = pd.DataFrame({'x': [1, 2, 3, 4, None, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = koalas.from_pandas(df)
self.assert_eq(a.x.notnull(), df.x.notnull())
self.assert_eq(a.x.isnull(), df.x.isnull())
self.assert_eq(a.notnull(), df.notnull())
self.assert_eq(a.isnull(), df.isnull())
def test_to_datetime(self):
df = pd.DataFrame({'year': [2015, 2016],
'month': [2, 3],
'day': [4, 5]})
ddf = koalas.from_pandas(df)
self.assert_eq(pd.to_datetime(df), koalas.to_datetime(ddf))
s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000'] * 100)
ds = koalas.from_pandas(s)
self.assert_eq(pd.to_datetime(s, infer_datetime_format=True),
koalas.to_datetime(ds, infer_datetime_format=True))
def test_missing(self):
d = self.df
missing_functions = inspect.getmembers(_MissingPandasLikeDataFrame, inspect.isfunction)
for name, _ in missing_functions:
with self.assertRaisesRegex(PandasNotImplementedError,
"DataFrame.*{}.*not implemented".format(name)):
getattr(d, name)()
missing_functions = inspect.getmembers(_MissingPandasLikeSeries, inspect.isfunction)
for name, _ in missing_functions:
with self.assertRaisesRegex(PandasNotImplementedError,
"Series.*{}.*not implemented".format(name)):
getattr(d.a, name)()
def test_to_numpy(self):
df = pd.DataFrame({'a': [4, 2, 3, 4, 8, 6],
'b': [1, 2, 9, 4, 2, 4],
'c': ["one", "three", "six", "seven", "one", "5"]},
index=[10, 20, 30, 40, 50, 60])
ddf = koalas.from_pandas(df)
np.testing.assert_equal(ddf.to_numpy(), df.values)
s = pd.Series([1, 2, 3, 4, 5, 6, 7], name='x')
ddf = koalas.from_pandas(s)
np.testing.assert_equal(ddf.to_numpy(), s.values)
if __name__ == "__main__":
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
en
| 0.446369
|
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # TODO: name='a' # TODO: assert d.b.mean().compute() == full.b.mean() # TODO: assert np.allclose(d.b.var().compute(), full.b.var()) # TODO: assert np.allclose(d.b.std().compute(), full.b.std()) # TODO: self.assert_eq(d.tail(2), full.tail(2)) # TODO: self.assert_eq(d.tail(3), full.tail(3)) # TODO: self.assert_eq(d['a'].tail(2), full['a'].tail(2)) # TODO: self.assert_eq(d['a'].tail(3), full['a'].tail(3)) # TODO: self.assert_eq(d + 1, full + 1) # TODO: 'a' # TODO: None # TODO?: self.assertIsNone(d.index.name) # Multi-index columns # Series name # no mutation # self.assert_eq(ds.rename(), s.rename()) # Series index # TODO: index # res = ds.rename(lambda x: x ** 2) # self.assert_eq(res, s.rename(lambda x: x ** 2)) # res = ds.rename(s) # self.assert_eq(res, s.rename(s)) # res = ds.rename(ds) # self.assert_eq(res, s.rename(s)) # res = ds.rename(lambda x: x**2, inplace=True) # self.assertis(res, ds) # s.rename(lambda x: x**2, inplace=True) # self.assert_eq(ds, s)
| 2.544967
| 3
|
pytorch_lightning/core/root_module.py
|
ammaraskar/pytorch-lightning
| 0
|
6627020
|
<reponame>ammaraskar/pytorch-lightning
"""
.. warning:: `root_module` module has been renamed to `lightning` since v0.6.0.
The deprecated module name will be removed in v0.8.0.
"""
import warnings
warnings.warn("`root_module` module has been renamed to `lightning` since v0.6.0."
" The deprecated module name will be removed in v0.8.0.", DeprecationWarning)
|
"""
.. warning:: `root_module` module has been renamed to `lightning` since v0.6.0.
The deprecated module name will be removed in v0.8.0.
"""
import warnings
warnings.warn("`root_module` module has been renamed to `lightning` since v0.6.0."
" The deprecated module name will be removed in v0.8.0.", DeprecationWarning)
|
en
| 0.731688
|
.. warning:: `root_module` module has been renamed to `lightning` since v0.6.0. The deprecated module name will be removed in v0.8.0.
| 1.259982
| 1
|
looking_for_group/games/migrations/0014_auto_20181017_1629.py
|
andrlik/looking-for-group
| 0
|
6627021
|
# Generated by Django 2.1.2 on 2018-10-17 20:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('games', '0013_auto_20181017_1307'),
]
operations = [
migrations.AlterModelOptions(
name='gameposting',
options={'ordering': ['status', 'start_time', '-end_date', '-created']},
),
migrations.AddField(
model_name='gameposting',
name='status',
field=models.CharField(choices=[('open', 'Open'), ('started', 'In Progress'), ('replace', 'Seeking replacement player'), ('cancel', 'Cancelled'), ('closed', 'Completed')], db_index=True, default='open', help_text='Current game status', max_length=10),
),
migrations.AlterField(
model_name='gameposting',
name='game_frequency',
field=models.CharField(choices=[('weekly', 'Every week'), ('biweekly', 'Every other week'), ('monthly', 'Every month'), ('na', 'N/A'), ('custom', 'Custom: See description for details')], db_index=True, default='na', help_text='How often will this be played?', max_length=15),
),
]
|
# Generated by Django 2.1.2 on 2018-10-17 20:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('games', '0013_auto_20181017_1307'),
]
operations = [
migrations.AlterModelOptions(
name='gameposting',
options={'ordering': ['status', 'start_time', '-end_date', '-created']},
),
migrations.AddField(
model_name='gameposting',
name='status',
field=models.CharField(choices=[('open', 'Open'), ('started', 'In Progress'), ('replace', 'Seeking replacement player'), ('cancel', 'Cancelled'), ('closed', 'Completed')], db_index=True, default='open', help_text='Current game status', max_length=10),
),
migrations.AlterField(
model_name='gameposting',
name='game_frequency',
field=models.CharField(choices=[('weekly', 'Every week'), ('biweekly', 'Every other week'), ('monthly', 'Every month'), ('na', 'N/A'), ('custom', 'Custom: See description for details')], db_index=True, default='na', help_text='How often will this be played?', max_length=15),
),
]
|
en
| 0.758008
|
# Generated by Django 2.1.2 on 2018-10-17 20:29
| 1.883553
| 2
|
tests/test_teamSeasonRange.py
|
jaebradley/nba_data
| 8
|
6627022
|
from unittest import TestCase
from nba_data.data.season import Season
from nba_data.data.season_range import SeasonRange
from nba_data.data.team import Team
from nba_data.data.team_season_range import TeamSeasonRange
class TestTeamSeasonRange(TestCase):
def test_instantiation(self):
team = Team.atlanta_hawks
start_season = Season.season_2015
end_season = Season.season_2016
season_range = SeasonRange(start=start_season, end=end_season)
self.assertIsNotNone(TeamSeasonRange(team=team, season_range=season_range))
|
from unittest import TestCase
from nba_data.data.season import Season
from nba_data.data.season_range import SeasonRange
from nba_data.data.team import Team
from nba_data.data.team_season_range import TeamSeasonRange
class TestTeamSeasonRange(TestCase):
def test_instantiation(self):
team = Team.atlanta_hawks
start_season = Season.season_2015
end_season = Season.season_2016
season_range = SeasonRange(start=start_season, end=end_season)
self.assertIsNotNone(TeamSeasonRange(team=team, season_range=season_range))
|
none
| 1
| 3.065618
| 3
|
|
hatbot.py
|
mikro6/hatbot
| 6
|
6627023
|
# hatbot - a very basic Owncast chat bot
# Public Domain, written 2021 by hatniX
# https://github.com/hatniX/hatbot
#
# Requirements:
# Owncast - https://owncast.online
# Python3 - https://www.python.org/
# Flask - https://flask.palletsprojects.com/
#
# Setup Owncast webhook url: http://localhost:5000/webhook
# Setup Flask using python3-pip: python3 -m pip install Flask
# Run this script: FLASK_APP=hatbot.py python3 -m flask run
import os, random, json, requests
from requests.structures import CaseInsensitiveDict
from flask import Flask, request, Response, jsonify
# the version number of this bot
bot_version = "0.0.7.1"
# reading the config file
with open("config.json") as json_config:
data_config = json.load(json_config)
# reading the alias list
with open("alias.json") as json_alias:
data_alias = json.load(json_alias)
# reading the command list
with open("commands.json") as json_commands:
data_commands = json.load(json_commands)
# reading 8-ball's responses array
with open("ball.json") as json_ball:
ball_responses = json.load(json_ball)
# init gun roulette
gunEmpty = False
gunTries = 0
# the url of the Owncast API for bot posts
owncast_url = data_config["owncast_server"] + "/api/integrations/chat/send"
# prepare the header for the bot posts
headers = CaseInsensitiveDict()
headers["Content-Type"] = "application/json"
headers["Authorization"] = "Bearer " + data_config["access_token"]
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def respond():
# use the global variables for the gun roulette
global gunEmpty
global gunTries
# in case of a CHAT event... (no other events are handled at the moment)
if request.json["type"] == "CHAT":
print("\n" + request.json["eventData"]["timestamp"][0:10] + " " + request.json["eventData"]["timestamp"][11:19] + " - " + request.json["eventData"]["user"]["displayName"] + " - " + request.json["eventData"]["body"]);
isComm = request.json["eventData"]["body"].partition(' ')[0].lower()
# check for aliases
if (data_alias.get(isComm) != None):
isComm = data_alias[isComm]
if (data_commands.get(isComm) != None):
answer = data_commands[isComm]
# extract the parameter (everything that was provided after the actual command)
parameter = ""
if (isComm != request.json["eventData"]["body"].lower()):
para = request.json["eventData"]["body"].split(" ", 1)
if (len(para) > 1):
parameter = para[1]
# remove the @ at the beginning of a parameter (may have been used for autocomplete names)
if (parameter[0] == "@"):
parameter = parameter[1:]
# replace variables in the command responses:
# {sender} - the sender's user name
# {tohost} - a given parameter or the host's user name (as set in config.json)
# {touser} - a given parameter or the sender's user name
# {parameter} - a given parameter (required)
# {random} - a random number between 1 and 100
# {cmdlist} - the list of all available commands
# {aliaslist} - the list of all available commands
# {botver} - the version number of this bot
answer = answer.replace("{sender}", request.json["eventData"]["user"]["displayName"])
if ("{tohost}" in answer):
if (parameter != ""):
answer = answer.replace("{tohost}", parameter)
else:
answer = answer.replace("{tohost}", data_config["host_name"])
if ("{touser}" in answer):
if (parameter != ""):
answer = answer.replace("{touser}", parameter)
else:
answer = answer.replace("{touser}", request.json["eventData"]["user"]["displayName"])
if ("{parameter}" in answer):
if (parameter != ""):
answer = answer.replace("{parameter}", parameter)
else:
answer = "This command needs something to work with. Try: **" + isComm + " WhatEverYouLike**"
answer = answer.replace("{random}", str(random.randrange(1, 100, 1)))
if ("{cmdlist}" in answer):
cmds = ""
for cmd in data_commands.keys():
cmds = cmds + cmd +" "
answer = answer.replace("{cmdlist}", cmds)
if ("{aliaslist}" in answer):
cmds = ""
for cmd in data_alias.keys():
cmds = cmds + cmd +" "
answer = answer.replace("{aliaslist}", cmds)
answer = answer.replace("{botver}", bot_version)
# /me workaround
if (isComm == "/me"):
if (parameter != ""):
answer = "**" + request.json["eventData"]["user"]["displayName"] + "** " + parameter
else:
answer = "**" + request.json["eventData"]["user"]["displayName"] + "** gesticulates energetically"
# 8-ball routine
if (isComm == "!8ball"):
if (parameter != ""):
answer += ball_responses[random.randint(0, len(ball_responses) - 1)]
else:
answer += "Ask me a question that can be answered with yes or no. Try: **" + isComm + " WhatEverNeedsAnAnswer**"
# BEGIN gun roulette aka. russian roulette
if (isComm == "!roulette"):
if gunEmpty:
answer = "Gun is empty..you can reload with `!reloadgun`"
else:
if (random.randint(1, 6 - gunTries) == 1): # a random 1 means we found the bullet
answer += " **BANG!** ... " + request.json["eventData"]["user"]["displayName"] + " lies dead on the chat floor :("
gunEmpty = True
else:
answer += " Click! ... " + request.json["eventData"]["user"]["displayName"] + " is a lucky survivor :)"
gunTries += 1
if (isComm == "!checkgun"):
if gunEmpty:
answer = "Gun is empty..you can reload it with `!reloadgun`"
else:
answer = "Gun still has a live round and trigger has been pulled " + str(gunTries) + " times..do you feel lucky?"
if (isComm == "!reloadgun"):
gunEmpty = False
gunTries = 0
# END gun roulette aka. russian roulette
# building the response's body and sending it
data = '{"body": "' + answer + '"}'
resp = requests.post(owncast_url, headers=headers, data=data.encode('utf-8'))
if resp.status_code != 200:
print("Can't respond, error code: " + str(resp.status_code))
else:
print("RESPONSE: " + answer)
return Response(status=200)
|
# hatbot - a very basic Owncast chat bot
# Public Domain, written 2021 by hatniX
# https://github.com/hatniX/hatbot
#
# Requirements:
# Owncast - https://owncast.online
# Python3 - https://www.python.org/
# Flask - https://flask.palletsprojects.com/
#
# Setup Owncast webhook url: http://localhost:5000/webhook
# Setup Flask using python3-pip: python3 -m pip install Flask
# Run this script: FLASK_APP=hatbot.py python3 -m flask run
import os, random, json, requests
from requests.structures import CaseInsensitiveDict
from flask import Flask, request, Response, jsonify
# the version number of this bot
bot_version = "0.0.7.1"
# reading the config file
with open("config.json") as json_config:
data_config = json.load(json_config)
# reading the alias list
with open("alias.json") as json_alias:
data_alias = json.load(json_alias)
# reading the command list
with open("commands.json") as json_commands:
data_commands = json.load(json_commands)
# reading 8-ball's responses array
with open("ball.json") as json_ball:
ball_responses = json.load(json_ball)
# init gun roulette
gunEmpty = False
gunTries = 0
# the url of the Owncast API for bot posts
owncast_url = data_config["owncast_server"] + "/api/integrations/chat/send"
# prepare the header for the bot posts
headers = CaseInsensitiveDict()
headers["Content-Type"] = "application/json"
headers["Authorization"] = "Bearer " + data_config["access_token"]
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def respond():
# use the global variables for the gun roulette
global gunEmpty
global gunTries
# in case of a CHAT event... (no other events are handled at the moment)
if request.json["type"] == "CHAT":
print("\n" + request.json["eventData"]["timestamp"][0:10] + " " + request.json["eventData"]["timestamp"][11:19] + " - " + request.json["eventData"]["user"]["displayName"] + " - " + request.json["eventData"]["body"]);
isComm = request.json["eventData"]["body"].partition(' ')[0].lower()
# check for aliases
if (data_alias.get(isComm) != None):
isComm = data_alias[isComm]
if (data_commands.get(isComm) != None):
answer = data_commands[isComm]
# extract the parameter (everything that was provided after the actual command)
parameter = ""
if (isComm != request.json["eventData"]["body"].lower()):
para = request.json["eventData"]["body"].split(" ", 1)
if (len(para) > 1):
parameter = para[1]
# remove the @ at the beginning of a parameter (may have been used for autocomplete names)
if (parameter[0] == "@"):
parameter = parameter[1:]
# replace variables in the command responses:
# {sender} - the sender's user name
# {tohost} - a given parameter or the host's user name (as set in config.json)
# {touser} - a given parameter or the sender's user name
# {parameter} - a given parameter (required)
# {random} - a random number between 1 and 100
# {cmdlist} - the list of all available commands
# {aliaslist} - the list of all available commands
# {botver} - the version number of this bot
answer = answer.replace("{sender}", request.json["eventData"]["user"]["displayName"])
if ("{tohost}" in answer):
if (parameter != ""):
answer = answer.replace("{tohost}", parameter)
else:
answer = answer.replace("{tohost}", data_config["host_name"])
if ("{touser}" in answer):
if (parameter != ""):
answer = answer.replace("{touser}", parameter)
else:
answer = answer.replace("{touser}", request.json["eventData"]["user"]["displayName"])
if ("{parameter}" in answer):
if (parameter != ""):
answer = answer.replace("{parameter}", parameter)
else:
answer = "This command needs something to work with. Try: **" + isComm + " WhatEverYouLike**"
answer = answer.replace("{random}", str(random.randrange(1, 100, 1)))
if ("{cmdlist}" in answer):
cmds = ""
for cmd in data_commands.keys():
cmds = cmds + cmd +" "
answer = answer.replace("{cmdlist}", cmds)
if ("{aliaslist}" in answer):
cmds = ""
for cmd in data_alias.keys():
cmds = cmds + cmd +" "
answer = answer.replace("{aliaslist}", cmds)
answer = answer.replace("{botver}", bot_version)
# /me workaround
if (isComm == "/me"):
if (parameter != ""):
answer = "**" + request.json["eventData"]["user"]["displayName"] + "** " + parameter
else:
answer = "**" + request.json["eventData"]["user"]["displayName"] + "** gesticulates energetically"
# 8-ball routine
if (isComm == "!8ball"):
if (parameter != ""):
answer += ball_responses[random.randint(0, len(ball_responses) - 1)]
else:
answer += "Ask me a question that can be answered with yes or no. Try: **" + isComm + " WhatEverNeedsAnAnswer**"
# BEGIN gun roulette aka. russian roulette
if (isComm == "!roulette"):
if gunEmpty:
answer = "Gun is empty..you can reload with `!reloadgun`"
else:
if (random.randint(1, 6 - gunTries) == 1): # a random 1 means we found the bullet
answer += " **BANG!** ... " + request.json["eventData"]["user"]["displayName"] + " lies dead on the chat floor :("
gunEmpty = True
else:
answer += " Click! ... " + request.json["eventData"]["user"]["displayName"] + " is a lucky survivor :)"
gunTries += 1
if (isComm == "!checkgun"):
if gunEmpty:
answer = "Gun is empty..you can reload it with `!reloadgun`"
else:
answer = "Gun still has a live round and trigger has been pulled " + str(gunTries) + " times..do you feel lucky?"
if (isComm == "!reloadgun"):
gunEmpty = False
gunTries = 0
# END gun roulette aka. russian roulette
# building the response's body and sending it
data = '{"body": "' + answer + '"}'
resp = requests.post(owncast_url, headers=headers, data=data.encode('utf-8'))
if resp.status_code != 200:
print("Can't respond, error code: " + str(resp.status_code))
else:
print("RESPONSE: " + answer)
return Response(status=200)
|
en
| 0.612625
|
# hatbot - a very basic Owncast chat bot # Public Domain, written 2021 by hatniX # https://github.com/hatniX/hatbot # # Requirements: # Owncast - https://owncast.online # Python3 - https://www.python.org/ # Flask - https://flask.palletsprojects.com/ # # Setup Owncast webhook url: http://localhost:5000/webhook # Setup Flask using python3-pip: python3 -m pip install Flask # Run this script: FLASK_APP=hatbot.py python3 -m flask run # the version number of this bot # reading the config file # reading the alias list # reading the command list # reading 8-ball's responses array # init gun roulette # the url of the Owncast API for bot posts # prepare the header for the bot posts # use the global variables for the gun roulette # in case of a CHAT event... (no other events are handled at the moment) # check for aliases # extract the parameter (everything that was provided after the actual command) # remove the @ at the beginning of a parameter (may have been used for autocomplete names) # replace variables in the command responses: # {sender} - the sender's user name # {tohost} - a given parameter or the host's user name (as set in config.json) # {touser} - a given parameter or the sender's user name # {parameter} - a given parameter (required) # {random} - a random number between 1 and 100 # {cmdlist} - the list of all available commands # {aliaslist} - the list of all available commands # {botver} - the version number of this bot # /me workaround # 8-ball routine # BEGIN gun roulette aka. russian roulette # a random 1 means we found the bullet # END gun roulette aka. russian roulette # building the response's body and sending it
| 2.356518
| 2
|
migration/versions/016_UserArtwork_columns_need_not_be_nullable.py
|
eevee/floof
| 2
|
6627024
|
from sqlalchemy import *
import sqlalchemy.exc
from migrate import *
from sqlalchemy.ext.declarative import declarative_base
TableBase = declarative_base()
from migrate.changeset import schema # monkeypatches columns
# Stubs for old tables
class User(TableBase):
__tablename__ = 'users'
id = Column(Integer, primary_key=True, nullable=False)
class Artwork(TableBase):
__tablename__ = 'Artwork'
id = Column(Integer, primary_key=True, nullable=False)
# Old tables
user_artwork_types = (u'by', u'for', u'of')
class UserArtwork(TableBase):
__tablename__ = 'user_artwork'
user_id = Column(Integer, ForeignKey('users.id'), primary_key=True, nullable=True)
artwork_id = Column(Integer, ForeignKey('artwork.id'), primary_key=True, nullable=True)
relationship_type = Column(Enum(*user_artwork_types, name='user_artwork_relationship_type'), primary_key=True, nullable=False)
def upgrade(migrate_engine):
TableBase.metadata.bind = migrate_engine
UserArtwork.__table__.c.user_id.alter(nullable=False)
UserArtwork.__table__.c.artwork_id.alter(nullable=False)
UserArtwork.__table__.c.relationship_type.alter(nullable=False)
def downgrade(migrate_engine):
TableBase.metadata.bind = migrate_engine
# Primary keys can't actually be NULLable in pg, so this will raise
try:
UserArtwork.__table__.c.user_id.alter(nullable=True)
UserArtwork.__table__.c.artwork_id.alter(nullable=True)
UserArtwork.__table__.c.relationship_type.alter(nullable=True)
except sqlalchemy.exc.ProgrammingError:
pass
|
from sqlalchemy import *
import sqlalchemy.exc
from migrate import *
from sqlalchemy.ext.declarative import declarative_base
TableBase = declarative_base()
from migrate.changeset import schema # monkeypatches columns
# Stubs for old tables
class User(TableBase):
__tablename__ = 'users'
id = Column(Integer, primary_key=True, nullable=False)
class Artwork(TableBase):
__tablename__ = 'Artwork'
id = Column(Integer, primary_key=True, nullable=False)
# Old tables
user_artwork_types = (u'by', u'for', u'of')
class UserArtwork(TableBase):
__tablename__ = 'user_artwork'
user_id = Column(Integer, ForeignKey('users.id'), primary_key=True, nullable=True)
artwork_id = Column(Integer, ForeignKey('artwork.id'), primary_key=True, nullable=True)
relationship_type = Column(Enum(*user_artwork_types, name='user_artwork_relationship_type'), primary_key=True, nullable=False)
def upgrade(migrate_engine):
TableBase.metadata.bind = migrate_engine
UserArtwork.__table__.c.user_id.alter(nullable=False)
UserArtwork.__table__.c.artwork_id.alter(nullable=False)
UserArtwork.__table__.c.relationship_type.alter(nullable=False)
def downgrade(migrate_engine):
TableBase.metadata.bind = migrate_engine
# Primary keys can't actually be NULLable in pg, so this will raise
try:
UserArtwork.__table__.c.user_id.alter(nullable=True)
UserArtwork.__table__.c.artwork_id.alter(nullable=True)
UserArtwork.__table__.c.relationship_type.alter(nullable=True)
except sqlalchemy.exc.ProgrammingError:
pass
|
en
| 0.851335
|
# monkeypatches columns # Stubs for old tables # Old tables # Primary keys can't actually be NULLable in pg, so this will raise
| 2.13107
| 2
|
main.py
|
rcourivaud/query_categorizer
| 0
|
6627025
|
<reponame>rcourivaud/query_categorizer
from query_categorizer.query_categorizer import QueryCategorizer
qc = QueryCategorizer()
print(qc.predict_query("rone"))
|
from query_categorizer.query_categorizer import QueryCategorizer
qc = QueryCategorizer()
print(qc.predict_query("rone"))
|
none
| 1
| 1.813373
| 2
|
|
scrabbler.py
|
novakeith/scrabbler
| 0
|
6627026
|
<reponame>novakeith/scrabbler<filename>scrabbler.py
# coding: UTF-8
# Scrabbler by KNOVA -- v1.3
# submitted as part of the reddit.com/r/dailyprogrammer challenge #294
# see here: https://www.reddit.com/r/dailyprogrammer/comments/5go843/20161205_challenge_294_easy_rack_management_1/
import timeit
# scrabble letter values
ltrVal = {'?':0, 'a':1, 'b':3, 'c':3, 'd':2, 'e':1, 'f':4, 'g':2, 'h':4, 'i':1, 'j':8, 'k':5, 'l':1, 'm':3, 'n':1, 'o':1, 'p':3, 'q':10, 'r':1, 's':1, 't':1, 'u':1, 'v':4, 'w':4, 'x':8, 'y':4, 'z':10}
# returns a dictionary of the letters in a given word and the frequency of that letter
def countletters(words):
newDict = {'?':0}
for x in words:
if x in newDict:
newDict[x] = newDict[x] + 1
else:
newDict[x] = 1
return newDict
# get the total scrabble value of a given word
def countwordval(word):
pointval = 0
for x in word:
pointval += ltrVal[x]
return pointval
# compare to see how many wildcards are needed to play A, given set of B letters
def wordcompare(letters, dicword):
# cError is the distance between the two word dicts; devalue is the point loss due to that.
cError = 0
devalue = 0
for z in dicword:
if z in letters:
if letters[z] >= dicword[z]:
cError += 0
else:
cError = cError + (dicword[z] - letters[z])
devalue += ltrVal[z] * (dicword[z] - letters[z])
else:
cError += dicword[z]
devalue += ltrVal[z] * dicword[z]
return {'distance':cError, 'devalue':devalue}
def findbestword(letters, userword):
bestscore = 0
bestwords = {}
f = open('enable1.txt', 'r')
for line in f:
xline = line.lower().strip("\r\n")
linedict = countletters(xline)
compx = wordcompare(letters, linedict)
if compx['distance'] - letters['?'] <= 0 and countwordval(xline) - compx['devalue'] >= bestscore:
bestscore = countwordval(xline)
bestwords[xline] = countwordval(xline)
f.close()
#pick top word(s) with available given letters
betterword = {}
for words in bestwords:
if countwordval(words) >= bestscore:
betterword[words] = countwordval(words)
return betterword
def main():
print "\n**Scrabble Finder v1.0\n" \
"**Enter the letters you have and the word you want to play.\n" \
"**This will tell you if you can play the word or not.\n" \
"**Use a ? (question mark) for wildcard/blank tiles.\n"
uLetters = raw_input("What letters do you have:>")
uWord = raw_input("What word do you want to create?>")
# count up letters in the letter list - using our shiny new function 'countletters'.
wDict = countletters(uLetters.lower())
xDict = countletters(uWord.lower())
# lastly, compare the number of letters in the desired word to the count of the letter list
# cError is the number of letters not matching; if the number of wildcards is equal or exceeds that number,
# the word is valid to be played
comp = wordcompare(wDict,xDict)
wildcards = comp['distance'] - wDict['?']
best = findbestword(wDict, uWord)
# let the user know the good or bad news
if wildcards <= 0:
print "\nYou can play that word! Point Value: " + str(countwordval(uWord) - comp['devalue'])
print "Given your letters, an even better word would be: " + str(best)
else:
print "\nSorry, that word is not possible with your current letters."
print "Number of wildcards available: " + str(wDict['?'])
print "Number of additional wildcards needed: " + str(wildcards)
print "The best word(s) you can play: " + str(best)
main()
print "\nExecuted in " + str(timeit.timeit())[:6] + " seconds\n"
|
# coding: UTF-8
# Scrabbler by KNOVA -- v1.3
# submitted as part of the reddit.com/r/dailyprogrammer challenge #294
# see here: https://www.reddit.com/r/dailyprogrammer/comments/5go843/20161205_challenge_294_easy_rack_management_1/
import timeit
# scrabble letter values
ltrVal = {'?':0, 'a':1, 'b':3, 'c':3, 'd':2, 'e':1, 'f':4, 'g':2, 'h':4, 'i':1, 'j':8, 'k':5, 'l':1, 'm':3, 'n':1, 'o':1, 'p':3, 'q':10, 'r':1, 's':1, 't':1, 'u':1, 'v':4, 'w':4, 'x':8, 'y':4, 'z':10}
# returns a dictionary of the letters in a given word and the frequency of that letter
def countletters(words):
newDict = {'?':0}
for x in words:
if x in newDict:
newDict[x] = newDict[x] + 1
else:
newDict[x] = 1
return newDict
# get the total scrabble value of a given word
def countwordval(word):
pointval = 0
for x in word:
pointval += ltrVal[x]
return pointval
# compare to see how many wildcards are needed to play A, given set of B letters
def wordcompare(letters, dicword):
# cError is the distance between the two word dicts; devalue is the point loss due to that.
cError = 0
devalue = 0
for z in dicword:
if z in letters:
if letters[z] >= dicword[z]:
cError += 0
else:
cError = cError + (dicword[z] - letters[z])
devalue += ltrVal[z] * (dicword[z] - letters[z])
else:
cError += dicword[z]
devalue += ltrVal[z] * dicword[z]
return {'distance':cError, 'devalue':devalue}
def findbestword(letters, userword):
bestscore = 0
bestwords = {}
f = open('enable1.txt', 'r')
for line in f:
xline = line.lower().strip("\r\n")
linedict = countletters(xline)
compx = wordcompare(letters, linedict)
if compx['distance'] - letters['?'] <= 0 and countwordval(xline) - compx['devalue'] >= bestscore:
bestscore = countwordval(xline)
bestwords[xline] = countwordval(xline)
f.close()
#pick top word(s) with available given letters
betterword = {}
for words in bestwords:
if countwordval(words) >= bestscore:
betterword[words] = countwordval(words)
return betterword
def main():
print "\n**Scrabble Finder v1.0\n" \
"**Enter the letters you have and the word you want to play.\n" \
"**This will tell you if you can play the word or not.\n" \
"**Use a ? (question mark) for wildcard/blank tiles.\n"
uLetters = raw_input("What letters do you have:>")
uWord = raw_input("What word do you want to create?>")
# count up letters in the letter list - using our shiny new function 'countletters'.
wDict = countletters(uLetters.lower())
xDict = countletters(uWord.lower())
# lastly, compare the number of letters in the desired word to the count of the letter list
# cError is the number of letters not matching; if the number of wildcards is equal or exceeds that number,
# the word is valid to be played
comp = wordcompare(wDict,xDict)
wildcards = comp['distance'] - wDict['?']
best = findbestword(wDict, uWord)
# let the user know the good or bad news
if wildcards <= 0:
print "\nYou can play that word! Point Value: " + str(countwordval(uWord) - comp['devalue'])
print "Given your letters, an even better word would be: " + str(best)
else:
print "\nSorry, that word is not possible with your current letters."
print "Number of wildcards available: " + str(wDict['?'])
print "Number of additional wildcards needed: " + str(wildcards)
print "The best word(s) you can play: " + str(best)
main()
print "\nExecuted in " + str(timeit.timeit())[:6] + " seconds\n"
|
en
| 0.831696
|
# coding: UTF-8 # Scrabbler by KNOVA -- v1.3 # submitted as part of the reddit.com/r/dailyprogrammer challenge #294 # see here: https://www.reddit.com/r/dailyprogrammer/comments/5go843/20161205_challenge_294_easy_rack_management_1/ # scrabble letter values # returns a dictionary of the letters in a given word and the frequency of that letter # get the total scrabble value of a given word # compare to see how many wildcards are needed to play A, given set of B letters # cError is the distance between the two word dicts; devalue is the point loss due to that. #pick top word(s) with available given letters # count up letters in the letter list - using our shiny new function 'countletters'. # lastly, compare the number of letters in the desired word to the count of the letter list # cError is the number of letters not matching; if the number of wildcards is equal or exceeds that number, # the word is valid to be played # let the user know the good or bad news
| 3.591404
| 4
|
centimani/__init__.py
|
NoZip/centimani
| 1
|
6627027
|
__all__ = ("log")
__version__ = "0.4"
|
__all__ = ("log")
__version__ = "0.4"
|
none
| 1
| 1.003848
| 1
|
|
1stRound/Hard/42-Trapping Rain Water/TwoPointers.py
|
ericchen12377/Leetcode-Algorithm-Python
| 2
|
6627028
|
<gh_stars>1-10
class Solution(object):
def trap(self, height):
"""
:type height: List[int]
:rtype: int
"""
Total = 0
left, right = 0, len(height) - 1
maxL = 0
maxR = 0
while left < right:
if height[left] <= height[right]:
if height[left] > maxL:
maxL = height[left]
else:
Total += maxL - height[left]
left += 1
else:
if height[right] > maxR:
maxR = height[right]
else:
Total += maxR - height[right]
right -= 1
return Total
|
class Solution(object):
def trap(self, height):
"""
:type height: List[int]
:rtype: int
"""
Total = 0
left, right = 0, len(height) - 1
maxL = 0
maxR = 0
while left < right:
if height[left] <= height[right]:
if height[left] > maxL:
maxL = height[left]
else:
Total += maxL - height[left]
left += 1
else:
if height[right] > maxR:
maxR = height[right]
else:
Total += maxR - height[right]
right -= 1
return Total
|
en
| 0.365502
|
:type height: List[int] :rtype: int
| 3.323012
| 3
|
students/K33401/Fomenko_Ivan/Lr1/task_1/server.py
|
aytakr/ITMO_ICT_WebDevelopment_2021-2022
| 7
|
6627029
|
<filename>students/K33401/Fomenko_Ivan/Lr1/task_1/server.py
import socket
host = "localhost"
port = 14900
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((host, port))
sock.listen(10)
clientsocket, address = sock.accept()
data = clientsocket.recv(16384)
udata = data.decode("utf-8")
print(udata)
clientsocket.send(b"Hello, client! \n")
sock.close()
|
<filename>students/K33401/Fomenko_Ivan/Lr1/task_1/server.py
import socket
host = "localhost"
port = 14900
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((host, port))
sock.listen(10)
clientsocket, address = sock.accept()
data = clientsocket.recv(16384)
udata = data.decode("utf-8")
print(udata)
clientsocket.send(b"Hello, client! \n")
sock.close()
|
none
| 1
| 2.939884
| 3
|
|
python-icmperror/icmperror/icmperror/icmperror.py
|
igarny/pscheduler
| 47
|
6627030
|
<filename>python-icmperror/icmperror/icmperror/icmperror.py
"""
Functions for translating ICMP error codes to enumerated values.
"""
import json
import sys
icmp_errors = {
# Strings produced by traceroute, same as ICMP error code counterparts
'H': 'host-unreachable',
'N': 'net-unreachable',
'P': 'protocol-unreachable',
'S': 'source-route-failed',
'F': 'fragmentation-needed-and-df-set',
'X': 'communication-administratively-prohibited',
'V': 'host-precedence-violation',
'C': 'precedence-cutoff-in-effect',
# ICMP Type 3 Error Codes, from RFC 792
'0': 'net-unreachable',
'1': 'host-unreachable',
'2': 'protocol-unreachable',
'3': 'port-unreachable',
'4': 'fragmentation-needed-and-df-set',
'5': 'source-route-failed',
# ICMP Type 3 Error Codes, from RFC 1122
'6': 'destination-network-unknown',
'7': 'destination-host-unknown',
'8': 'source-host-isolated',
'9': 'destination-network-administratively-prohibited',
'10': 'destination-host-administratively-prohibited',
'11': 'network-unreachable-for-type-of-service',
'12': 'icmp-destination-host-unreachable-tos',
# ICMP Type 3 Error Codes, from RFC 1122
'13': 'communication-administratively-prohibited',
'14': 'host-precedence-violation',
'15': 'precedence-cutoff-in-effect'
}
def translate(code):
"""
Translate a code which is either a number or single letter,
optionally preceded by an exclamation point into a string with a
standardized enumeration of the error.
For example, 5 or !5 translates to source-route-failed.
A ValueError is thrown if the code is not valid.
"""
code = str(code)
if len(code) and code[0] == '!':
code = code[1:]
try:
return icmp_errors[code]
except KeyError:
raise ValueError("Code is not valid")
|
<filename>python-icmperror/icmperror/icmperror/icmperror.py
"""
Functions for translating ICMP error codes to enumerated values.
"""
import json
import sys
icmp_errors = {
# Strings produced by traceroute, same as ICMP error code counterparts
'H': 'host-unreachable',
'N': 'net-unreachable',
'P': 'protocol-unreachable',
'S': 'source-route-failed',
'F': 'fragmentation-needed-and-df-set',
'X': 'communication-administratively-prohibited',
'V': 'host-precedence-violation',
'C': 'precedence-cutoff-in-effect',
# ICMP Type 3 Error Codes, from RFC 792
'0': 'net-unreachable',
'1': 'host-unreachable',
'2': 'protocol-unreachable',
'3': 'port-unreachable',
'4': 'fragmentation-needed-and-df-set',
'5': 'source-route-failed',
# ICMP Type 3 Error Codes, from RFC 1122
'6': 'destination-network-unknown',
'7': 'destination-host-unknown',
'8': 'source-host-isolated',
'9': 'destination-network-administratively-prohibited',
'10': 'destination-host-administratively-prohibited',
'11': 'network-unreachable-for-type-of-service',
'12': 'icmp-destination-host-unreachable-tos',
# ICMP Type 3 Error Codes, from RFC 1122
'13': 'communication-administratively-prohibited',
'14': 'host-precedence-violation',
'15': 'precedence-cutoff-in-effect'
}
def translate(code):
"""
Translate a code which is either a number or single letter,
optionally preceded by an exclamation point into a string with a
standardized enumeration of the error.
For example, 5 or !5 translates to source-route-failed.
A ValueError is thrown if the code is not valid.
"""
code = str(code)
if len(code) and code[0] == '!':
code = code[1:]
try:
return icmp_errors[code]
except KeyError:
raise ValueError("Code is not valid")
|
en
| 0.814656
|
Functions for translating ICMP error codes to enumerated values. # Strings produced by traceroute, same as ICMP error code counterparts # ICMP Type 3 Error Codes, from RFC 792 # ICMP Type 3 Error Codes, from RFC 1122 # ICMP Type 3 Error Codes, from RFC 1122 Translate a code which is either a number or single letter, optionally preceded by an exclamation point into a string with a standardized enumeration of the error. For example, 5 or !5 translates to source-route-failed. A ValueError is thrown if the code is not valid.
| 3.286378
| 3
|
terminalAI/gamelib/algocore.py
|
umerhasan17/mlds
| 1
|
6627031
|
import json
from .game_state import GameState
from .util import get_command, debug_write, BANNER_TEXT, send_command
class AlgoCore(object):
"""This class handles communication with the game itself. Your strategy should subclass it.
Attributes:
* config (JSON): json object containing information about the game
"""
def __init__(self):
self.config = None
def on_game_start(self, config):
"""
Override this to perform initial setup at the start of the game, based
on the config, a json file which contains information about the game.
"""
self.config = config
def on_turn(self, game_state):
"""
This step function is called every turn and is passed a string containing
the current game state, which can be used to initialize a new GameMap
"""
self.submit_default_turn()
def submit_default_turn(self):
send_command("")
send_command("")
def start(self):
"""
Start the parsing loop.
Python will hang on the readline() statement so actually this program will run forever unless manually stopped or
it receives the "End" turn message from the game.
"""
debug_write(BANNER_TEXT)
while True:
# Note: Python blocks and hangs on stdin. Can cause issues if connections aren't setup properly and may need to
# manually kill this Python program.
game_state_string = get_command()
if "replaySave" in game_state_string:
"""
This means this must be the config file. So, load in the config file as a json and add it to your AlgoStrategy class.
"""
parsed_config = json.loads(game_state_string)
self.on_game_start(parsed_config)
elif "turnInfo" in game_state_string:
state = json.loads(game_state_string)
stateType = int(state.get("turnInfo")[0])
if stateType == 0:
"""
This is the game turn game state message. Algo must now print to stdout 2 lines, one for build phase one for
deploy phase. Printing is handled by the provided functions.
"""
self.on_turn(game_state_string)
elif stateType == 1:
"""
If stateType == 1, this game_state_string string represents the results of an action phase
"""
continue
elif stateType == 2:
"""
This is the end game message. This means the game is over so break and finish the program.
"""
debug_write("Got end state quitting bot.")
break
else:
"""
Something is wrong? Recieved an incorrect or imporperly formatted string.
"""
debug_write("Got unexpected string with turnInfo: {}".format(game_state_string))
else:
"""
Something is wrong? Recieved an incorrect or imporperly formatted string.
"""
debug_write("Got unexpected string : {}".format(game_state_string))
|
import json
from .game_state import GameState
from .util import get_command, debug_write, BANNER_TEXT, send_command
class AlgoCore(object):
"""This class handles communication with the game itself. Your strategy should subclass it.
Attributes:
* config (JSON): json object containing information about the game
"""
def __init__(self):
self.config = None
def on_game_start(self, config):
"""
Override this to perform initial setup at the start of the game, based
on the config, a json file which contains information about the game.
"""
self.config = config
def on_turn(self, game_state):
"""
This step function is called every turn and is passed a string containing
the current game state, which can be used to initialize a new GameMap
"""
self.submit_default_turn()
def submit_default_turn(self):
send_command("")
send_command("")
def start(self):
"""
Start the parsing loop.
Python will hang on the readline() statement so actually this program will run forever unless manually stopped or
it receives the "End" turn message from the game.
"""
debug_write(BANNER_TEXT)
while True:
# Note: Python blocks and hangs on stdin. Can cause issues if connections aren't setup properly and may need to
# manually kill this Python program.
game_state_string = get_command()
if "replaySave" in game_state_string:
"""
This means this must be the config file. So, load in the config file as a json and add it to your AlgoStrategy class.
"""
parsed_config = json.loads(game_state_string)
self.on_game_start(parsed_config)
elif "turnInfo" in game_state_string:
state = json.loads(game_state_string)
stateType = int(state.get("turnInfo")[0])
if stateType == 0:
"""
This is the game turn game state message. Algo must now print to stdout 2 lines, one for build phase one for
deploy phase. Printing is handled by the provided functions.
"""
self.on_turn(game_state_string)
elif stateType == 1:
"""
If stateType == 1, this game_state_string string represents the results of an action phase
"""
continue
elif stateType == 2:
"""
This is the end game message. This means the game is over so break and finish the program.
"""
debug_write("Got end state quitting bot.")
break
else:
"""
Something is wrong? Recieved an incorrect or imporperly formatted string.
"""
debug_write("Got unexpected string with turnInfo: {}".format(game_state_string))
else:
"""
Something is wrong? Recieved an incorrect or imporperly formatted string.
"""
debug_write("Got unexpected string : {}".format(game_state_string))
|
en
| 0.872103
|
This class handles communication with the game itself. Your strategy should subclass it. Attributes: * config (JSON): json object containing information about the game Override this to perform initial setup at the start of the game, based on the config, a json file which contains information about the game. This step function is called every turn and is passed a string containing the current game state, which can be used to initialize a new GameMap Start the parsing loop. Python will hang on the readline() statement so actually this program will run forever unless manually stopped or it receives the "End" turn message from the game. # Note: Python blocks and hangs on stdin. Can cause issues if connections aren't setup properly and may need to # manually kill this Python program. This means this must be the config file. So, load in the config file as a json and add it to your AlgoStrategy class. This is the game turn game state message. Algo must now print to stdout 2 lines, one for build phase one for deploy phase. Printing is handled by the provided functions. If stateType == 1, this game_state_string string represents the results of an action phase This is the end game message. This means the game is over so break and finish the program. Something is wrong? Recieved an incorrect or imporperly formatted string. Something is wrong? Recieved an incorrect or imporperly formatted string.
| 3.405058
| 3
|
API/waypi/routes/__init__.py
|
OdegaOmie/WayApp
| 0
|
6627032
|
__all__ = ["users", "test_routes"]
|
__all__ = ["users", "test_routes"]
|
none
| 1
| 1.042558
| 1
|
|
archives/oldtwitch2youtubeold.py
|
Hagnor/twitch2youtube
| 0
|
6627033
|
import os
def parseOK(path):
tab_init=[]
tab_all=[]
nline=0
nblien=0
block=0
with open (path, 'rt') as infile:
for line in infile:
nline=nline+1
if 'lien' in line:
if block!=0:
print("Input file error, lien")
exit()
temp=(line.split('='))[1].rstrip()
tab_init.append(temp)
block=nline
nblien=nblien+1
if 'mode' in line:
if block==0:
print("Input file error, mode")
exit()
temp=(line.split('='))[1].rstrip()
temp1=nline-(block)
tab_init.append(temp1-2)
tab_init.append(temp)
tab_all.append(tab_init)
tab_init=[]
block=0
if 'nom' in line:
if block==0:
print("Input file error, nom")
exit()
temp=(line.split('='))[1].rstrip()
tab_init.append(temp)
tab_init.insert(0,nblien)
return tab_all
def parsetime(time):
tabtemp=[]
tabok=[]
if '-' in time:
tabtemp=(time.split('-'))
for i in range (2):
if "." in tabtemp[i]:
tabtemp2=(tabtemp[i].split('.'))
if len(tabtemp2)==2 or len(tabtemp2)==3:
for j in range (len(tabtemp2)):
if len(tabtemp2[j])==1:
tabtemp2[j]='0'+tabtemp2[j]
if len(tabtemp2)==2:
tabok.append('00:'+tabtemp2[0]+':'+tabtemp2[1])
if len(tabtemp2)==3:
tabok.append(tabtemp2[0]+':'+tabtemp2[1]+':'+tabtemp2[2])
else:
print("Input file error, plus de 2 .")
exit()
else:
a=tabtemp[i]
if len(a)==1:
a='0'+a
tabok.append('00:'+a+':00')
retour=tabok[0]+'-'+tabok[1]
return retour
else:
print("Input file error, pas de -")
exit()
link="time.txt"
linkvideo=""
list_fic=[]
pos_nom=[]
firstparse=parseOK(link)
with open (link) as f:
temp_fic=list(f)
for element in temp_fic:
list_fic.append(element.strip())
#print(firstparse)
#print(list_fic)
for x in range(len(list_fic)):
temp=list_fic[x]
if 'nom' in temp:
pos_nom.append(x)
#print(pos_nom)
if len(pos_nom)!=len(firstparse):
print("Input file error")
exit
for i in range (len(firstparse)):
posdepart=pos_nom[i]+1
nombretime=firstparse[i][2]
for j in range (posdepart, posdepart+nombretime):
firstparse[i].append(parsetime(list_fic[j]))
print(firstparse)
|
import os
def parseOK(path):
tab_init=[]
tab_all=[]
nline=0
nblien=0
block=0
with open (path, 'rt') as infile:
for line in infile:
nline=nline+1
if 'lien' in line:
if block!=0:
print("Input file error, lien")
exit()
temp=(line.split('='))[1].rstrip()
tab_init.append(temp)
block=nline
nblien=nblien+1
if 'mode' in line:
if block==0:
print("Input file error, mode")
exit()
temp=(line.split('='))[1].rstrip()
temp1=nline-(block)
tab_init.append(temp1-2)
tab_init.append(temp)
tab_all.append(tab_init)
tab_init=[]
block=0
if 'nom' in line:
if block==0:
print("Input file error, nom")
exit()
temp=(line.split('='))[1].rstrip()
tab_init.append(temp)
tab_init.insert(0,nblien)
return tab_all
def parsetime(time):
tabtemp=[]
tabok=[]
if '-' in time:
tabtemp=(time.split('-'))
for i in range (2):
if "." in tabtemp[i]:
tabtemp2=(tabtemp[i].split('.'))
if len(tabtemp2)==2 or len(tabtemp2)==3:
for j in range (len(tabtemp2)):
if len(tabtemp2[j])==1:
tabtemp2[j]='0'+tabtemp2[j]
if len(tabtemp2)==2:
tabok.append('00:'+tabtemp2[0]+':'+tabtemp2[1])
if len(tabtemp2)==3:
tabok.append(tabtemp2[0]+':'+tabtemp2[1]+':'+tabtemp2[2])
else:
print("Input file error, plus de 2 .")
exit()
else:
a=tabtemp[i]
if len(a)==1:
a='0'+a
tabok.append('00:'+a+':00')
retour=tabok[0]+'-'+tabok[1]
return retour
else:
print("Input file error, pas de -")
exit()
link="time.txt"
linkvideo=""
list_fic=[]
pos_nom=[]
firstparse=parseOK(link)
with open (link) as f:
temp_fic=list(f)
for element in temp_fic:
list_fic.append(element.strip())
#print(firstparse)
#print(list_fic)
for x in range(len(list_fic)):
temp=list_fic[x]
if 'nom' in temp:
pos_nom.append(x)
#print(pos_nom)
if len(pos_nom)!=len(firstparse):
print("Input file error")
exit
for i in range (len(firstparse)):
posdepart=pos_nom[i]+1
nombretime=firstparse[i][2]
for j in range (posdepart, posdepart+nombretime):
firstparse[i].append(parsetime(list_fic[j]))
print(firstparse)
|
ru
| 0.180727
|
#print(firstparse) #print(list_fic) #print(pos_nom)
| 2.820347
| 3
|
1030.py
|
TheLurkingCat/TIOJ
| 0
|
6627034
|
<filename>1030.py
n = int(input())
while n:
k = sorted([int(x) for x in input().split()])
floor = 1 + sum(k)
floor += k[-1]
print(floor)
n = int(input())
|
<filename>1030.py
n = int(input())
while n:
k = sorted([int(x) for x in input().split()])
floor = 1 + sum(k)
floor += k[-1]
print(floor)
n = int(input())
|
none
| 1
| 3.099143
| 3
|
|
wukong/tools/python/djadump.py
|
BigstarPie/WuKongProject
| 0
|
6627035
|
<filename>wukong/tools/python/djadump.py
# <property name="djarchive_type_lib_infusion" value="0"/>
# <property name="djarchive_type_app_infusion" value="1"/>
# <property name="djarchive_type_wkpf_link_table" value="2"/>
# <property name="djarchive_type_wkpf_component_map" value="3"/>
# <property name="djarchive_type_wkpf_initvalues" value="4"/>
import sys
def filetype2string(type):
return [
'library infusion',
'application infusion',
'wkpf link table',
'wkpf component map',
'wkpf initvalues'
][type]
def parseLinkTable(filedata):
number_of_links = filedata[0]+256*filedata[1]
print "\t%s: \t\t\t%d links" % (str(filedata[0:2]), number_of_links)
for i in range(number_of_links):
link = filedata[2+i*6:2+i*6+6]
fromcomponent = link[0]+link[1]*256
fromport = link[2]
tocomponent = link[3]+link[4]*256
toport = link[5]
print "\t%s: \t\tlink from (%d,%d) to (%d,%d)" % (str(link),
fromcomponent,
fromport,
tocomponent,
toport)
def parseComponentMap(filedata):
number_of_components = filedata[0]+256*filedata[1]
print "\t%s: \t\t\t%d components" % (str(filedata[0:2]), number_of_components)
offsettable = filedata[2:2+(number_of_components)*2]
print "\t\t\t\t\toffset table:%s" % (str(offsettable))
for i in range(number_of_components):
offset = offsettable[2*i]+offsettable[2*i+1]*256
print "\t%s: \t\t\t\tcomponent %d at offset %d" % (str(offsettable[2*i:2*i+2]), i, offset)
componenttable = filedata[2+(number_of_components)*2:]
pos = 0
print "\t\t\t\t\tcomponents:"
for i in range(number_of_components):
number_of_endpoints = componenttable[pos]
wuclass = componenttable[pos+1]+componenttable[pos+2]*256
print "\t%s: \t\t\t\tcomponent %d, wuclass %d, %d endpoint(s):" % (str(componenttable[pos:pos+3]), i, wuclass, number_of_endpoints)
pos += 3
for j in range(number_of_endpoints):
node = componenttable[pos]+componenttable[pos+1]*256
port = componenttable[pos+2]
print "\t%s: \t\t\t\t\tnode %d, port %d" % (str(componenttable[pos:pos+3]), node, port)
pos += 3
def parseInitvalues(filedata):
number_of_initvalues = filedata[0]+256*filedata[1]
print "\t%s: \t\t\t%d initvalues" % (str(filedata[0:2]), number_of_initvalues)
initvalues = filedata[2:]
pos = 0
for i in range(number_of_initvalues):
component_id = initvalues[pos]+initvalues[pos+1]*256
property_number = initvalues[pos+2]
value_size = initvalues[pos+3]
print "\t%s: \t\t\tcomponent %d, property %d, size %d" % (str(initvalues[pos:pos+4]),
component_id,
property_number,
value_size)
value = initvalues[pos+4:pos+4+value_size]
if value_size == 1:
valuestr = str(value[0])
elif value_size == 2:
valuestr = str(value[0]+value[1]*256)
else:
valuestr = str(value)
print "\t%s: \t\t\t\tvalue %s" % (str(value), valuestr)
pos += 4+value_size
filename = sys.argv[1]
with open(filename, "rb") as f:
while True:
filelength = ord(f.read(1)) + ord(f.read(1))*256
if filelength == 0:
break
filetype = ord(f.read(1))
print "FILE length %d, type '%s'" % (filelength, filetype2string(filetype))
filedata = [ord(x) for x in f.read(filelength)]
if filetype == 0:
print "\t Java archive"
elif filetype == 1:
print "\t Java archive"
elif filetype == 2:
parseLinkTable(filedata)
elif filetype == 3:
parseComponentMap(filedata)
elif filetype == 4:
parseInitvalues(filedata)
print ""
# file should be empty here
remaining = f.read()
if len(remaining) == 0:
print "END OF ARCHIVE"
else:
print "UNEXPECTED DATA AFTER END OF ARCHIVE:"
print " ".join([str(x) for x in remaining])
|
<filename>wukong/tools/python/djadump.py
# <property name="djarchive_type_lib_infusion" value="0"/>
# <property name="djarchive_type_app_infusion" value="1"/>
# <property name="djarchive_type_wkpf_link_table" value="2"/>
# <property name="djarchive_type_wkpf_component_map" value="3"/>
# <property name="djarchive_type_wkpf_initvalues" value="4"/>
import sys
def filetype2string(type):
return [
'library infusion',
'application infusion',
'wkpf link table',
'wkpf component map',
'wkpf initvalues'
][type]
def parseLinkTable(filedata):
number_of_links = filedata[0]+256*filedata[1]
print "\t%s: \t\t\t%d links" % (str(filedata[0:2]), number_of_links)
for i in range(number_of_links):
link = filedata[2+i*6:2+i*6+6]
fromcomponent = link[0]+link[1]*256
fromport = link[2]
tocomponent = link[3]+link[4]*256
toport = link[5]
print "\t%s: \t\tlink from (%d,%d) to (%d,%d)" % (str(link),
fromcomponent,
fromport,
tocomponent,
toport)
def parseComponentMap(filedata):
number_of_components = filedata[0]+256*filedata[1]
print "\t%s: \t\t\t%d components" % (str(filedata[0:2]), number_of_components)
offsettable = filedata[2:2+(number_of_components)*2]
print "\t\t\t\t\toffset table:%s" % (str(offsettable))
for i in range(number_of_components):
offset = offsettable[2*i]+offsettable[2*i+1]*256
print "\t%s: \t\t\t\tcomponent %d at offset %d" % (str(offsettable[2*i:2*i+2]), i, offset)
componenttable = filedata[2+(number_of_components)*2:]
pos = 0
print "\t\t\t\t\tcomponents:"
for i in range(number_of_components):
number_of_endpoints = componenttable[pos]
wuclass = componenttable[pos+1]+componenttable[pos+2]*256
print "\t%s: \t\t\t\tcomponent %d, wuclass %d, %d endpoint(s):" % (str(componenttable[pos:pos+3]), i, wuclass, number_of_endpoints)
pos += 3
for j in range(number_of_endpoints):
node = componenttable[pos]+componenttable[pos+1]*256
port = componenttable[pos+2]
print "\t%s: \t\t\t\t\tnode %d, port %d" % (str(componenttable[pos:pos+3]), node, port)
pos += 3
def parseInitvalues(filedata):
number_of_initvalues = filedata[0]+256*filedata[1]
print "\t%s: \t\t\t%d initvalues" % (str(filedata[0:2]), number_of_initvalues)
initvalues = filedata[2:]
pos = 0
for i in range(number_of_initvalues):
component_id = initvalues[pos]+initvalues[pos+1]*256
property_number = initvalues[pos+2]
value_size = initvalues[pos+3]
print "\t%s: \t\t\tcomponent %d, property %d, size %d" % (str(initvalues[pos:pos+4]),
component_id,
property_number,
value_size)
value = initvalues[pos+4:pos+4+value_size]
if value_size == 1:
valuestr = str(value[0])
elif value_size == 2:
valuestr = str(value[0]+value[1]*256)
else:
valuestr = str(value)
print "\t%s: \t\t\t\tvalue %s" % (str(value), valuestr)
pos += 4+value_size
filename = sys.argv[1]
with open(filename, "rb") as f:
while True:
filelength = ord(f.read(1)) + ord(f.read(1))*256
if filelength == 0:
break
filetype = ord(f.read(1))
print "FILE length %d, type '%s'" % (filelength, filetype2string(filetype))
filedata = [ord(x) for x in f.read(filelength)]
if filetype == 0:
print "\t Java archive"
elif filetype == 1:
print "\t Java archive"
elif filetype == 2:
parseLinkTable(filedata)
elif filetype == 3:
parseComponentMap(filedata)
elif filetype == 4:
parseInitvalues(filedata)
print ""
# file should be empty here
remaining = f.read()
if len(remaining) == 0:
print "END OF ARCHIVE"
else:
print "UNEXPECTED DATA AFTER END OF ARCHIVE:"
print " ".join([str(x) for x in remaining])
|
en
| 0.139171
|
# <property name="djarchive_type_lib_infusion" value="0"/> # <property name="djarchive_type_app_infusion" value="1"/> # <property name="djarchive_type_wkpf_link_table" value="2"/> # <property name="djarchive_type_wkpf_component_map" value="3"/> # <property name="djarchive_type_wkpf_initvalues" value="4"/> # file should be empty here
| 2.432236
| 2
|
kernel_hmc/tools/file.py
|
karlnapf/kernel_hmc
| 27
|
6627036
|
import hashlib
from kernel_hmc.tools.log import logger
def sha1sum(fname, blocksize=65536):
"""
Computes sha1sum of the given file. Same as the unix command line hash.
Returns: string with the hex-formatted sha1sum hash
"""
hasher = hashlib.sha1()
with open(fname, 'rb') as afile:
logger.debug("Hasing %s" % fname)
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
return hasher.hexdigest()
|
import hashlib
from kernel_hmc.tools.log import logger
def sha1sum(fname, blocksize=65536):
"""
Computes sha1sum of the given file. Same as the unix command line hash.
Returns: string with the hex-formatted sha1sum hash
"""
hasher = hashlib.sha1()
with open(fname, 'rb') as afile:
logger.debug("Hasing %s" % fname)
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
return hasher.hexdigest()
|
en
| 0.781766
|
Computes sha1sum of the given file. Same as the unix command line hash. Returns: string with the hex-formatted sha1sum hash
| 3.237165
| 3
|
backend/work/migrations/0007_workitem_worktag.py
|
ecto0310/groupware
| 3
|
6627037
|
<reponame>ecto0310/groupware
# Generated by Django 3.0.3 on 2021-09-21 09:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tool', '0007_auto_20200521_0031'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('storage', '0001_initial'),
('work', '0006_auto_20200521_0031'),
]
operations = [
migrations.CreateModel(
name='WorkTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('intro', models.TextField()),
],
),
migrations.CreateModel(
name='WorkItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('intro', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('files', models.ManyToManyField(to='storage.FileObject')),
('tags', models.ManyToManyField(blank=True, to='work.WorkTag')),
('tools', models.ManyToManyField(blank=True, to='tool.Tool')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
|
# Generated by Django 3.0.3 on 2021-09-21 09:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tool', '0007_auto_20200521_0031'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('storage', '0001_initial'),
('work', '0006_auto_20200521_0031'),
]
operations = [
migrations.CreateModel(
name='WorkTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('intro', models.TextField()),
],
),
migrations.CreateModel(
name='WorkItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('intro', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('files', models.ManyToManyField(to='storage.FileObject')),
('tags', models.ManyToManyField(blank=True, to='work.WorkTag')),
('tools', models.ManyToManyField(blank=True, to='tool.Tool')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
|
en
| 0.796275
|
# Generated by Django 3.0.3 on 2021-09-21 09:11
| 1.714786
| 2
|
seisflows/system/parallel.py
|
umairbinwaheed/seisflows
| 2
|
6627038
|
import os
from os.path import abspath, join
from subprocess import Popen
from time import sleep
import numpy as np
from seisflows.tools import unix
from seisflows.tools.code import saveobj
from seisflows.tools.config import SeisflowsParameters, SeisflowsPaths, \
ParameterError, findpath, loadclass
PAR = SeisflowsParameters()
PATH = SeisflowsPaths()
class parallel(loadclass('system', 'serial')):
""" An interface through which to submit workflows, run tasks in serial or
parallel, and perform other system functions.
By hiding environment details behind a python interface layer, these
classes provide a consistent command set across different computing
environments.
For more informations, see
http://seisflows.readthedocs.org/en/latest/manual/manual.html#system-interfaces
"""
def check(self):
""" Checks parameters and paths
"""
super(parallel, self).check()
if 'NPROCMAX' not in PAR:
raise Exception
def run(self, classname, funcname, hosts='all', **kwargs):
""" Runs tasks in serial or parallel on specified hosts
"""
unix.mkdir(PATH.SYSTEM)
self.checkpoint()
self.save_kwargs(classname, funcname, kwargs)
if hosts == 'all':
running_tasks = dict()
queued_tasks = range(PAR.NTASK)
while 1:
# check running tasks
for i, p in running_tasks.items():
if p.poll() != None:
running_tasks.pop(i)
# launch new tasks
while len(running_tasks) < PAR.NPROCMAX and queued_tasks:
i = queued_tasks.pop(0)
p = self._launch(classname, funcname, itask=i)
running_tasks[i] = p
if running_tasks:
sleep(1)
continue
if not queued_tasks:
break
print ''
elif hosts == 'head':
self.setnode(0)
func = getattr(__import__(classname), funcname)
func(**kwargs)
else:
task(**kwargs)
### private methods
def _launch(self, classname, funcname, itask=0):
self.progress(itask)
env = os.environ.copy().items()
env += [['SEISFLOWS_TASKID', str(itask)]]
p = Popen(
findpath('system') +'/'+ 'slurm/wrapper_srun '
+ PATH.OUTPUT + ' '
+ classname + ' '
+ funcname,
shell=True,
env=dict(env))
return p
def save_kwargs(self, classname, funcname, kwargs):
kwargspath = join(PATH.OUTPUT, 'SeisflowsObjects', classname+'_kwargs')
kwargsfile = join(kwargspath, funcname+'.p')
unix.mkdir(kwargspath)
saveobj(kwargsfile, kwargs)
|
import os
from os.path import abspath, join
from subprocess import Popen
from time import sleep
import numpy as np
from seisflows.tools import unix
from seisflows.tools.code import saveobj
from seisflows.tools.config import SeisflowsParameters, SeisflowsPaths, \
ParameterError, findpath, loadclass
PAR = SeisflowsParameters()
PATH = SeisflowsPaths()
class parallel(loadclass('system', 'serial')):
""" An interface through which to submit workflows, run tasks in serial or
parallel, and perform other system functions.
By hiding environment details behind a python interface layer, these
classes provide a consistent command set across different computing
environments.
For more informations, see
http://seisflows.readthedocs.org/en/latest/manual/manual.html#system-interfaces
"""
def check(self):
""" Checks parameters and paths
"""
super(parallel, self).check()
if 'NPROCMAX' not in PAR:
raise Exception
def run(self, classname, funcname, hosts='all', **kwargs):
""" Runs tasks in serial or parallel on specified hosts
"""
unix.mkdir(PATH.SYSTEM)
self.checkpoint()
self.save_kwargs(classname, funcname, kwargs)
if hosts == 'all':
running_tasks = dict()
queued_tasks = range(PAR.NTASK)
while 1:
# check running tasks
for i, p in running_tasks.items():
if p.poll() != None:
running_tasks.pop(i)
# launch new tasks
while len(running_tasks) < PAR.NPROCMAX and queued_tasks:
i = queued_tasks.pop(0)
p = self._launch(classname, funcname, itask=i)
running_tasks[i] = p
if running_tasks:
sleep(1)
continue
if not queued_tasks:
break
print ''
elif hosts == 'head':
self.setnode(0)
func = getattr(__import__(classname), funcname)
func(**kwargs)
else:
task(**kwargs)
### private methods
def _launch(self, classname, funcname, itask=0):
self.progress(itask)
env = os.environ.copy().items()
env += [['SEISFLOWS_TASKID', str(itask)]]
p = Popen(
findpath('system') +'/'+ 'slurm/wrapper_srun '
+ PATH.OUTPUT + ' '
+ classname + ' '
+ funcname,
shell=True,
env=dict(env))
return p
def save_kwargs(self, classname, funcname, kwargs):
kwargspath = join(PATH.OUTPUT, 'SeisflowsObjects', classname+'_kwargs')
kwargsfile = join(kwargspath, funcname+'.p')
unix.mkdir(kwargspath)
saveobj(kwargsfile, kwargs)
|
en
| 0.788776
|
An interface through which to submit workflows, run tasks in serial or parallel, and perform other system functions. By hiding environment details behind a python interface layer, these classes provide a consistent command set across different computing environments. For more informations, see http://seisflows.readthedocs.org/en/latest/manual/manual.html#system-interfaces Checks parameters and paths Runs tasks in serial or parallel on specified hosts # check running tasks # launch new tasks ### private methods
| 2.36676
| 2
|
src/utility/constant.py
|
William9923/IF4072-SentimentClassification
| 0
|
6627039
|
<filename>src/utility/constant.py
# --- [Constant running process] ---
SEED = 123
CONFIG_CLS = "config"
OPTION_CLS = "option"
# --- [Preprocessor Component] ---
LOWERCASE_COMPONENT = "lower"
MASK_URL_COMPONENT = "mask.url"
REMOVE_HTML_TAG_COMPONENT = "remove.html.tag"
MASK_EMOJI_COMPONENT = "mask.emoji"
REMOVE_PUNCT_COMPONENT = "remove.punct"
NORMALIZATION_COMPONENT = "normalization"
EMOJI_MASK = "emoji"
# --- [Classification Related] ---
TARGET = "sentiment"
PRETRAINED_BERT_EMBEDDING_DIM = 512
PRETRAINED_BERT_MODEL_NAME = "distilbert-base-uncased"
# --- [Experiment Option Related] ---
COUNT_FE_OPTION = "count"
TFIDF_FE_OPTION = "tfidf"
FASTTEXT_FE_OPTION = "fasttext"
BERT_FE_OPTION = "bert"
ROBERTA_FE_OPTION = "roberta"
NB_CLF_OPTION = "nb"
LGBM_CLF_OPTION = "lgbm"
LSTM_CLF_OPTION = "lstm"
BERT_CLF_OPTION = "bert"
ROBERTA_CLF_OPTION = "roberta"
|
<filename>src/utility/constant.py
# --- [Constant running process] ---
SEED = 123
CONFIG_CLS = "config"
OPTION_CLS = "option"
# --- [Preprocessor Component] ---
LOWERCASE_COMPONENT = "lower"
MASK_URL_COMPONENT = "mask.url"
REMOVE_HTML_TAG_COMPONENT = "remove.html.tag"
MASK_EMOJI_COMPONENT = "mask.emoji"
REMOVE_PUNCT_COMPONENT = "remove.punct"
NORMALIZATION_COMPONENT = "normalization"
EMOJI_MASK = "emoji"
# --- [Classification Related] ---
TARGET = "sentiment"
PRETRAINED_BERT_EMBEDDING_DIM = 512
PRETRAINED_BERT_MODEL_NAME = "distilbert-base-uncased"
# --- [Experiment Option Related] ---
COUNT_FE_OPTION = "count"
TFIDF_FE_OPTION = "tfidf"
FASTTEXT_FE_OPTION = "fasttext"
BERT_FE_OPTION = "bert"
ROBERTA_FE_OPTION = "roberta"
NB_CLF_OPTION = "nb"
LGBM_CLF_OPTION = "lgbm"
LSTM_CLF_OPTION = "lstm"
BERT_CLF_OPTION = "bert"
ROBERTA_CLF_OPTION = "roberta"
|
en
| 0.897017
|
# --- [Constant running process] --- # --- [Preprocessor Component] --- # --- [Classification Related] --- # --- [Experiment Option Related] ---
| 1.658358
| 2
|
backend/alembic/versions/20200930112537_cf4962b43209_add_pipeline_table.py
|
BodenmillerGroup/histocat-web
| 4
|
6627040
|
<filename>backend/alembic/versions/20200930112537_cf4962b43209_add_pipeline_table.py<gh_stars>1-10
"""Add pipeline table
Revision ID: cf4962b43209
Revises: <PASSWORD>
Create Date: 2020-09-30 11:25:37.650804
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import JSONB
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = 'aba61a88be54'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'pipeline',
sa.Column('id', sa.Integer(), primary_key=True, index=True),
sa.Column('project_id', sa.Integer(), sa.ForeignKey("project.id", ondelete="CASCADE"), index=True, nullable=False),
sa.Column('name', sa.String()),
sa.Column('description', sa.String()),
sa.Column('steps', JSONB()),
sa.Column('created_at', sa.DateTime(), default=sa.sql.func.now(), nullable=False),
)
def downgrade():
op.drop_table('pipeline')
|
<filename>backend/alembic/versions/20200930112537_cf4962b43209_add_pipeline_table.py<gh_stars>1-10
"""Add pipeline table
Revision ID: cf4962b43209
Revises: <PASSWORD>
Create Date: 2020-09-30 11:25:37.650804
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import JSONB
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = 'aba61a88be54'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'pipeline',
sa.Column('id', sa.Integer(), primary_key=True, index=True),
sa.Column('project_id', sa.Integer(), sa.ForeignKey("project.id", ondelete="CASCADE"), index=True, nullable=False),
sa.Column('name', sa.String()),
sa.Column('description', sa.String()),
sa.Column('steps', JSONB()),
sa.Column('created_at', sa.DateTime(), default=sa.sql.func.now(), nullable=False),
)
def downgrade():
op.drop_table('pipeline')
|
en
| 0.457857
|
Add pipeline table Revision ID: cf4962b43209 Revises: <PASSWORD> Create Date: 2020-09-30 11:25:37.650804 # revision identifiers, used by Alembic.
| 1.52097
| 2
|
last_numeral.py
|
nikitadragaa/---
| 1
|
6627041
|
<filename>last_numeral.py
a=str(input())
print(a[-1])
|
<filename>last_numeral.py
a=str(input())
print(a[-1])
|
none
| 1
| 2.787212
| 3
|
|
pypy/module/binascii/interp_hexlify.py
|
nanjekyejoannah/pypy
| 381
|
6627042
|
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import unwrap_spec
from rpython.rlib.rstring import StringBuilder
from rpython.rlib.rarithmetic import ovfcheck
# ____________________________________________________________
def _value2char(value):
if value < 10:
return chr(ord('0') + value)
else:
return chr((ord('a')-10) + value)
_value2char._always_inline_ = True
@unwrap_spec(data='bufferstr')
def hexlify(space, data):
'''Hexadecimal representation of binary data.
This function is also available as "hexlify()".'''
try:
newlength = ovfcheck(len(data) * 2)
except OverflowError:
raise OperationError(space.w_MemoryError, space.w_None)
res = StringBuilder(newlength)
for c in data:
res.append(_value2char(ord(c) >> 4))
res.append(_value2char(ord(c) & 0xf))
return space.newbytes(res.build())
# ____________________________________________________________
def _char2value(space, c):
if c <= '9':
if c >= '0':
return ord(c) - ord('0')
elif c <= 'F':
if c >= 'A':
return ord(c) - (ord('A')-10)
elif c <= 'f':
if c >= 'a':
return ord(c) - (ord('a')-10)
raise oefmt(space.w_TypeError, "Non-hexadecimal digit found")
_char2value._always_inline_ = True
@unwrap_spec(hexstr='bufferstr')
def unhexlify(space, hexstr):
'''Binary data of hexadecimal representation.
hexstr must contain an even number of hex digits (upper or lower case).
This function is also available as "unhexlify()".'''
if len(hexstr) & 1:
raise oefmt(space.w_TypeError, "Odd-length string")
res = StringBuilder(len(hexstr) >> 1)
for i in range(0, len(hexstr), 2):
a = _char2value(space, hexstr[i])
b = _char2value(space, hexstr[i+1])
res.append(chr((a << 4) | b))
return space.newbytes(res.build())
|
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import unwrap_spec
from rpython.rlib.rstring import StringBuilder
from rpython.rlib.rarithmetic import ovfcheck
# ____________________________________________________________
def _value2char(value):
if value < 10:
return chr(ord('0') + value)
else:
return chr((ord('a')-10) + value)
_value2char._always_inline_ = True
@unwrap_spec(data='bufferstr')
def hexlify(space, data):
'''Hexadecimal representation of binary data.
This function is also available as "hexlify()".'''
try:
newlength = ovfcheck(len(data) * 2)
except OverflowError:
raise OperationError(space.w_MemoryError, space.w_None)
res = StringBuilder(newlength)
for c in data:
res.append(_value2char(ord(c) >> 4))
res.append(_value2char(ord(c) & 0xf))
return space.newbytes(res.build())
# ____________________________________________________________
def _char2value(space, c):
if c <= '9':
if c >= '0':
return ord(c) - ord('0')
elif c <= 'F':
if c >= 'A':
return ord(c) - (ord('A')-10)
elif c <= 'f':
if c >= 'a':
return ord(c) - (ord('a')-10)
raise oefmt(space.w_TypeError, "Non-hexadecimal digit found")
_char2value._always_inline_ = True
@unwrap_spec(hexstr='bufferstr')
def unhexlify(space, hexstr):
'''Binary data of hexadecimal representation.
hexstr must contain an even number of hex digits (upper or lower case).
This function is also available as "unhexlify()".'''
if len(hexstr) & 1:
raise oefmt(space.w_TypeError, "Odd-length string")
res = StringBuilder(len(hexstr) >> 1)
for i in range(0, len(hexstr), 2):
a = _char2value(space, hexstr[i])
b = _char2value(space, hexstr[i+1])
res.append(chr((a << 4) | b))
return space.newbytes(res.build())
|
en
| 0.73251
|
# ____________________________________________________________ Hexadecimal representation of binary data. This function is also available as "hexlify()". # ____________________________________________________________ Binary data of hexadecimal representation. hexstr must contain an even number of hex digits (upper or lower case). This function is also available as "unhexlify()".
| 2.286622
| 2
|
src/pyig/commandline/execution_deprec.py
|
jwillis0720/PyIg
| 0
|
6627043
|
#!/usr/bin/env python
import subprocess as sp
import multiprocessing as mp
import glob
import os
import gzip
import datetime
from shutil import copytree
#Non Standard Library
from pyig.backend import split_fasta
from pyig.backend import output_parser
from pyig.commandline import arg_parse
def run_mp_and_delete(manager):
'''main method to run igblast through multiprocessing protocol,
takes in a list of dictionaires each with a seperate set of arguments'''
# bools
_zip_bool = manager['zip_bool']
_json_bool = manager['json_bool']
_concat_bool = manager['concat_bool']
_output_options = manager['output_options']
if _json_bool:
_output_type = "json"
else:
_output_type = "csv"
# file name outputs, these will all be temp files to be parsed later
_file = manager['split_file']
_blast_out = _file.split('.')[0] + ".blast_out"
_output_file = _file.split('.')[0] + "." + _output_type
# temporary path
_temporary_path = manager['tmp_path']
# check on internal data
_internal_data = manager['internal_data']
_current_directory = os.getcwd()
_species = manager['species']
if not os.path.exists(os.path.join(_current_directory, os.path.basename(_internal_data))):
print "Copying internal data to current directory"
copytree(_internal_data, os.getcwd())
# set up the command line
_blast_options = manager['blast_options']
# add executable
_cline = [manager['executable']]
# add all blast options
for argument in _blast_options:
arg = _blast_options[argument]
current_argument = [argument, arg]
_cline += current_argument
# change them to strings
_cline = [str(i) for i in _cline]
# add query and output to command line
_cline += ["-query", _file]
_cline += ["-out", _blast_out]
# run command line
print "Running BLAST on processor {0} for split file {1}".format(manager['proc_number'], _file)
sub = sp.Popen(_cline, stdout=sp.PIPE, stderr=sp.PIPE)
# If we have stderr and stdout, lets print it
stderr, stdout = sub.communicate()
if stderr or stdout:
print stderr, stdout
# Now parse the output
print "Parsing BLAST output to {0} on Processor {1}".format(_output_type, manager['proc_number'])
op = output_parser.igblast_output(_blast_out, _file,
_temporary_path, _output_options, species=_species, gui=False, zip_bool=_zip_bool, germ_properties=manager['germ_properties'])
op.parse_blast_file_to_type(_output_file, _output_type)
print "Done parsing {0} type".format(_output_type)
if _concat_bool:
print "Removing {0} and {1}".format(_file, _blast_out)
os.remove(_file)
os.remove(_blast_out)
def concat(_manager_dict):
out_file = _manager_dict['output_prefix']
zip_bool = _manager_dict['zip_bool']
json_bool = _manager_dict['json_bool']
concat_bool = _manager_dict['concat_bool']
# join the tmp file path with the query name to get all files that should be concatenated
file_names = os.path.join(_manager_dict['tmp_path'],
os.path.basename(_manager_dict['non_split']).split('.')[0])
if zip_bool and json_bool:
zipped_and_json = glob.glob(file_names + "*.json.gz")
with gzip.open(out_file + ".json.gz", 'wb') as gf:
for file in zipped_and_json:
f_in = gzip.open(file, 'rb')
gf.writelines(f_in)
f_in.close()
if concat_bool:
os.remove(file)
elif json_bool and not zip_bool:
just_json = glob.glob(file_names + "*.json")
with open(out_file + ".json", 'w') as gf:
for file in just_json:
f_in = open(file, 'r')
gf.writelines(f_in)
f_in.close()
if concat_bool:
os.remove(file)
elif zip_bool:
csv_zip = glob.glob(file_names + "*.csv.gz")
with gzip.open(out_file + ".csv.gz", 'wb') as gf:
for line in gzip.open(csv_zip[0], 'rb'):
gf.write(line)
for files in csv_zip[1:]:
f = gzip.open(files, 'rb')
f.next()
for line in f:
gf.write(line)
f.close()
if concat_bool:
for file in csv_zip:
os.remove(file)
else:
just_csv = glob.glob(file_names + "*.csv")
with open(out_file + ".csv", 'w') as gf:
for line in open(just_csv[0]):
gf.write(line)
for files in just_csv[1:]:
f = open(files)
f.next()
for line in f:
gf.write(line)
f.close()
if concat_bool:
for file in just_csv:
os.remove(file)
def execute(argument_class):
'''A function that takes in and executes options from the gui widgets'''
# variables
ts = datetime.time()
#fomatted_time = datetime.datetime.fromtimestamp(float(ts)).strftime('%Y-%m-%d %H:%M:%S')
print "Process Started {0}".format(ts)
# query, path and internal database
query_name = argument_class.get_query()
tmp_path = argument_class.get_tmp_dir()
processors = argument_class.get_procs()
internal_data = argument_class.get_internal_directory()
# output options
zip_bool = argument_class.get_zip_bool()
json_bool = argument_class.get_json_bool()
concat_bool = argument_class.get_concat_bool()
output_prefix = argument_class.get_output_prefix()
output_options = argument_class.get_output_options()
# blast options - all the options specific to blast executable
executable = argument_class.get_command()
blast_options = argument_class.get_blast_options()
# species
species = argument_class.get_organism()
#germ_properties_files
germ_properties = argument_class.get_germ_file()
# split fasta file up
print "Splitting up file {0} into {1}".format(os.path.abspath(query_name), tmp_path)
split_fasta.split_fasta(processors, tmp_path, query_name, suffix=".tmp_fasta")
glob_path = os.path.join(tmp_path, os.path.basename(query_name).split('.')[0] + "*.tmp_fasta")
# now grab all the temporary files in the temporary directory
split_up_starting_files = glob.glob(glob_path)
# manager_dict and list, holds all our values so we can pass them to varias processors
_manager_list = []
_manager_dict = {}
for i, _file in enumerate(split_up_starting_files, start=1): # the full file name
_manager_dict['executable'] = executable
_manager_dict['non_split'] = query_name
_manager_dict['split_file'] = _file
_manager_dict['zip_bool'] = zip_bool
_manager_dict['json_bool'] = json_bool
_manager_dict['concat_bool'] = concat_bool
_manager_dict['output_prefix'] = output_prefix
_manager_dict['tmp_path'] = tmp_path
_manager_dict['internal_data'] = internal_data
_manager_dict['blast_options'] = blast_options
_manager_dict['output_options'] = output_options
_manager_dict['species'] = species
_manager_dict['proc_number'] = i
_manager_dict['germ_properties'] = germ_properties
_manager_list.append(_manager_dict)
_manager_dict = {}
# run_protocol
for i in _manager_list:
run_mp_and_delete(i)
#pool = mp.Pool(processes=processors)
#pool.map(run_mp_and_delete, _manager_list)
concat(_manager_list[0])
print "Process is done"
print "Took {0}".format(datetime.time() - ts)
os.removedirs(_manager_dict['tmp_path'])
def main():
execute(arg_parse.argument_parser())
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
import subprocess as sp
import multiprocessing as mp
import glob
import os
import gzip
import datetime
from shutil import copytree
#Non Standard Library
from pyig.backend import split_fasta
from pyig.backend import output_parser
from pyig.commandline import arg_parse
def run_mp_and_delete(manager):
'''main method to run igblast through multiprocessing protocol,
takes in a list of dictionaires each with a seperate set of arguments'''
# bools
_zip_bool = manager['zip_bool']
_json_bool = manager['json_bool']
_concat_bool = manager['concat_bool']
_output_options = manager['output_options']
if _json_bool:
_output_type = "json"
else:
_output_type = "csv"
# file name outputs, these will all be temp files to be parsed later
_file = manager['split_file']
_blast_out = _file.split('.')[0] + ".blast_out"
_output_file = _file.split('.')[0] + "." + _output_type
# temporary path
_temporary_path = manager['tmp_path']
# check on internal data
_internal_data = manager['internal_data']
_current_directory = os.getcwd()
_species = manager['species']
if not os.path.exists(os.path.join(_current_directory, os.path.basename(_internal_data))):
print "Copying internal data to current directory"
copytree(_internal_data, os.getcwd())
# set up the command line
_blast_options = manager['blast_options']
# add executable
_cline = [manager['executable']]
# add all blast options
for argument in _blast_options:
arg = _blast_options[argument]
current_argument = [argument, arg]
_cline += current_argument
# change them to strings
_cline = [str(i) for i in _cline]
# add query and output to command line
_cline += ["-query", _file]
_cline += ["-out", _blast_out]
# run command line
print "Running BLAST on processor {0} for split file {1}".format(manager['proc_number'], _file)
sub = sp.Popen(_cline, stdout=sp.PIPE, stderr=sp.PIPE)
# If we have stderr and stdout, lets print it
stderr, stdout = sub.communicate()
if stderr or stdout:
print stderr, stdout
# Now parse the output
print "Parsing BLAST output to {0} on Processor {1}".format(_output_type, manager['proc_number'])
op = output_parser.igblast_output(_blast_out, _file,
_temporary_path, _output_options, species=_species, gui=False, zip_bool=_zip_bool, germ_properties=manager['germ_properties'])
op.parse_blast_file_to_type(_output_file, _output_type)
print "Done parsing {0} type".format(_output_type)
if _concat_bool:
print "Removing {0} and {1}".format(_file, _blast_out)
os.remove(_file)
os.remove(_blast_out)
def concat(_manager_dict):
out_file = _manager_dict['output_prefix']
zip_bool = _manager_dict['zip_bool']
json_bool = _manager_dict['json_bool']
concat_bool = _manager_dict['concat_bool']
# join the tmp file path with the query name to get all files that should be concatenated
file_names = os.path.join(_manager_dict['tmp_path'],
os.path.basename(_manager_dict['non_split']).split('.')[0])
if zip_bool and json_bool:
zipped_and_json = glob.glob(file_names + "*.json.gz")
with gzip.open(out_file + ".json.gz", 'wb') as gf:
for file in zipped_and_json:
f_in = gzip.open(file, 'rb')
gf.writelines(f_in)
f_in.close()
if concat_bool:
os.remove(file)
elif json_bool and not zip_bool:
just_json = glob.glob(file_names + "*.json")
with open(out_file + ".json", 'w') as gf:
for file in just_json:
f_in = open(file, 'r')
gf.writelines(f_in)
f_in.close()
if concat_bool:
os.remove(file)
elif zip_bool:
csv_zip = glob.glob(file_names + "*.csv.gz")
with gzip.open(out_file + ".csv.gz", 'wb') as gf:
for line in gzip.open(csv_zip[0], 'rb'):
gf.write(line)
for files in csv_zip[1:]:
f = gzip.open(files, 'rb')
f.next()
for line in f:
gf.write(line)
f.close()
if concat_bool:
for file in csv_zip:
os.remove(file)
else:
just_csv = glob.glob(file_names + "*.csv")
with open(out_file + ".csv", 'w') as gf:
for line in open(just_csv[0]):
gf.write(line)
for files in just_csv[1:]:
f = open(files)
f.next()
for line in f:
gf.write(line)
f.close()
if concat_bool:
for file in just_csv:
os.remove(file)
def execute(argument_class):
'''A function that takes in and executes options from the gui widgets'''
# variables
ts = datetime.time()
#fomatted_time = datetime.datetime.fromtimestamp(float(ts)).strftime('%Y-%m-%d %H:%M:%S')
print "Process Started {0}".format(ts)
# query, path and internal database
query_name = argument_class.get_query()
tmp_path = argument_class.get_tmp_dir()
processors = argument_class.get_procs()
internal_data = argument_class.get_internal_directory()
# output options
zip_bool = argument_class.get_zip_bool()
json_bool = argument_class.get_json_bool()
concat_bool = argument_class.get_concat_bool()
output_prefix = argument_class.get_output_prefix()
output_options = argument_class.get_output_options()
# blast options - all the options specific to blast executable
executable = argument_class.get_command()
blast_options = argument_class.get_blast_options()
# species
species = argument_class.get_organism()
#germ_properties_files
germ_properties = argument_class.get_germ_file()
# split fasta file up
print "Splitting up file {0} into {1}".format(os.path.abspath(query_name), tmp_path)
split_fasta.split_fasta(processors, tmp_path, query_name, suffix=".tmp_fasta")
glob_path = os.path.join(tmp_path, os.path.basename(query_name).split('.')[0] + "*.tmp_fasta")
# now grab all the temporary files in the temporary directory
split_up_starting_files = glob.glob(glob_path)
# manager_dict and list, holds all our values so we can pass them to varias processors
_manager_list = []
_manager_dict = {}
for i, _file in enumerate(split_up_starting_files, start=1): # the full file name
_manager_dict['executable'] = executable
_manager_dict['non_split'] = query_name
_manager_dict['split_file'] = _file
_manager_dict['zip_bool'] = zip_bool
_manager_dict['json_bool'] = json_bool
_manager_dict['concat_bool'] = concat_bool
_manager_dict['output_prefix'] = output_prefix
_manager_dict['tmp_path'] = tmp_path
_manager_dict['internal_data'] = internal_data
_manager_dict['blast_options'] = blast_options
_manager_dict['output_options'] = output_options
_manager_dict['species'] = species
_manager_dict['proc_number'] = i
_manager_dict['germ_properties'] = germ_properties
_manager_list.append(_manager_dict)
_manager_dict = {}
# run_protocol
for i in _manager_list:
run_mp_and_delete(i)
#pool = mp.Pool(processes=processors)
#pool.map(run_mp_and_delete, _manager_list)
concat(_manager_list[0])
print "Process is done"
print "Took {0}".format(datetime.time() - ts)
os.removedirs(_manager_dict['tmp_path'])
def main():
execute(arg_parse.argument_parser())
if __name__ == '__main__':
main()
|
en
| 0.693238
|
#!/usr/bin/env python #Non Standard Library main method to run igblast through multiprocessing protocol, takes in a list of dictionaires each with a seperate set of arguments # bools # file name outputs, these will all be temp files to be parsed later # temporary path # check on internal data # set up the command line # add executable # add all blast options # change them to strings # add query and output to command line # run command line # If we have stderr and stdout, lets print it # Now parse the output # join the tmp file path with the query name to get all files that should be concatenated A function that takes in and executes options from the gui widgets # variables #fomatted_time = datetime.datetime.fromtimestamp(float(ts)).strftime('%Y-%m-%d %H:%M:%S') # query, path and internal database # output options # blast options - all the options specific to blast executable # species #germ_properties_files # split fasta file up # now grab all the temporary files in the temporary directory # manager_dict and list, holds all our values so we can pass them to varias processors # the full file name # run_protocol #pool = mp.Pool(processes=processors) #pool.map(run_mp_and_delete, _manager_list)
| 2.444128
| 2
|
app.py
|
ChristianJohn0/swift-post-api
| 0
|
6627044
|
import flask
import easypost
app = flask.Flask(__name__)
easypost.api_key = "<KEY>"
@app.route("/", methods=['GET'])
def home():
return '''<h1>Swift Post API</h1>
<hr>
<p>This is the Application Programming Interface for the iOS application (SwiftPost) which is an application for tracking packages from multiple delivery services such as Canada Post, FedEX, UPS, Purolator, DHL, etc.</p>
<p>This API makes use of the <a href="https://www.easypost.com/docs/api">easypost</a> which suppliers Shipping API, Tracking API, and Address Verification API for USPS, FedEx, UPS, DHL, and many more. This API is in its development stage.</p>'''
@app.route('/dev/v1/track', methods=['GET'])
def retrieveTrackingData():
tracking_code = flask.request.args.get('tracking_code')
carrier = flask.request.args.get('carrier')
tracking_data = easypost.Tracker.create(tracking_code = tracking_code, carrier = carrier)
temp = [tracking_data.to_dict()]
return flask.jsonify(temp)
|
import flask
import easypost
app = flask.Flask(__name__)
easypost.api_key = "<KEY>"
@app.route("/", methods=['GET'])
def home():
return '''<h1>Swift Post API</h1>
<hr>
<p>This is the Application Programming Interface for the iOS application (SwiftPost) which is an application for tracking packages from multiple delivery services such as Canada Post, FedEX, UPS, Purolator, DHL, etc.</p>
<p>This API makes use of the <a href="https://www.easypost.com/docs/api">easypost</a> which suppliers Shipping API, Tracking API, and Address Verification API for USPS, FedEx, UPS, DHL, and many more. This API is in its development stage.</p>'''
@app.route('/dev/v1/track', methods=['GET'])
def retrieveTrackingData():
tracking_code = flask.request.args.get('tracking_code')
carrier = flask.request.args.get('carrier')
tracking_data = easypost.Tracker.create(tracking_code = tracking_code, carrier = carrier)
temp = [tracking_data.to_dict()]
return flask.jsonify(temp)
|
en
| 0.863776
|
<h1>Swift Post API</h1> <hr> <p>This is the Application Programming Interface for the iOS application (SwiftPost) which is an application for tracking packages from multiple delivery services such as Canada Post, FedEX, UPS, Purolator, DHL, etc.</p> <p>This API makes use of the <a href="https://www.easypost.com/docs/api">easypost</a> which suppliers Shipping API, Tracking API, and Address Verification API for USPS, FedEx, UPS, DHL, and many more. This API is in its development stage.</p>
| 2.819191
| 3
|
preprocess/preprocess_pipeline.py
|
weishengtoh/machinelearning_assignment
| 0
|
6627045
|
<reponame>weishengtoh/machinelearning_assignment
'''
Define the pipeline component that is used to clean the raw data used for training
and evaluation.
This pipeline component uses the raw data artifact generated by the dataloader_pipeline
component. The raw data artifact student-mat.csv is retrieved from Weights & Biases.
This pipeline component generates the cleaned data artifact as student-maths-clean.csv
which is stored locally in data/clean and in Weights & Biases
The artifact generated from this pipeline component is to be used by the
test_data_pipeline component.
'''
import logging
import os
import hydra
import pandas as pd
import wandb
from omegaconf import DictConfig, OmegaConf
from sklearn.preprocessing import label_binarize
logging.basicConfig(level=logging.INFO, format="%(asctime)-15s %(message)s")
logger = logging.getLogger()
@hydra.main(config_path=os.path.join(os.pardir, 'configs'),
config_name='preprocess_config')
def start_pipeline(preprocess_config: DictConfig):
preprocess_config = OmegaConf.to_container(preprocess_config)
params = preprocess_config['parameters']
# Downloads the raw data artifact from W&B, and read it into a pandas Dataframe
logger.info('Download artifact from W&B')
run = wandb.init(project=preprocess_config['main']['project_name'],
group=preprocess_config['main']['experiment_name'],
job_type=preprocess_config['main']['job_type'])
artifact = run.use_artifact(params['input_artifact'])
artifact_dir = artifact.file()
df = pd.read_csv(artifact_dir)
# Bin the labels for binary classification
y = pd.cut(df['G3'], bins=2, labels=['fail', 'pass'], ordered=False)
df['G3'] = label_binarize(y, classes=['fail', 'pass']).ravel()
# Rename the columns to increase comprehensibility
df = rename_columns(df)
# Drop all duplicated values
df.drop_duplicates(inplace=True)
# Define the paths required
cwd_path = hydra.utils.get_original_cwd(
) # This is required to access the cwd when using Hydra
root_path = os.path.join(cwd_path, os.pardir)
output_folder = os.path.join(*[root_path, 'data', params['output_folder']])
output_file = os.path.join(output_folder, params['artifact_name'])
# Save the cleaned data in a local directory and upload to W&B
os.makedirs(output_folder, exist_ok=True)
df.to_csv(output_file, index=False)
logger.info('Create and upload a W&B Artifact')
artifact = wandb.Artifact(name=params['artifact_name'],
type=params['artifact_type'],
description=params['artifact_descript'])
artifact.add_file(output_file)
run.log_artifact(artifact)
# Finish the wandb run
wandb.finish()
def rename_columns(df: pd.DataFrame) -> pd.DataFrame:
# Identify the original columns names
nominal_var = [
'school', 'sex', 'address', 'famsize', 'Pstatus', 'Mjob', 'Fjob',
'reason', 'guardian', 'schoolsup', 'famsup', 'paid', 'activities',
'nursery', 'higher', 'internet', 'romantic'
]
numerical_var = ['age', 'absences']
ordinal_var = [
'Medu', 'Fedu', 'traveltime', 'studytime', 'failures', 'famrel',
'freetime', 'goout', 'Dalc', 'Walc', 'health'
]
target_var = ['G1', 'G2', 'G3']
# Define the new column names
nominal_var_renamed = [
'school', 'sex', 'address', 'fam_size', 'parents_cohabit', 'mother_job',
'father_job', 'reason_for_school', 'guardian', 'extra_edu_support',
'fam_edu_support', 'extra_paid_classes', 'extra_curri', 'att_nursery',
'wants_higheredu', 'internet_access', 'in_romantic'
]
numerical_var_renamed = ['age', 'num_school_absences']
ordinal_var_renamed = [
'mother_edu', 'father_edu', 'travel_time', 'weekly_studytime',
'past_classfailures', 'famrelation_quality', 'afterschool_time',
'friend_time', 'alc_consump_workday', 'alc_consump_weekend',
'health_status'
]
target_var_renamed = ['first_grade', 'second_grade', 'third_grade']
nominal_var_dict = dict(zip(nominal_var, nominal_var_renamed))
numerical_ver_dict = dict(zip(numerical_var, numerical_var_renamed))
ordinal_var_dict = dict(zip(ordinal_var, ordinal_var_renamed))
target_var_dict = dict(zip(target_var, target_var_renamed))
renamed_dict = {
**nominal_var_dict,
**numerical_ver_dict,
**ordinal_var_dict,
**target_var_dict
}
return df.rename(columns=renamed_dict)
if __name__ == "__main__":
start_pipeline()
|
'''
Define the pipeline component that is used to clean the raw data used for training
and evaluation.
This pipeline component uses the raw data artifact generated by the dataloader_pipeline
component. The raw data artifact student-mat.csv is retrieved from Weights & Biases.
This pipeline component generates the cleaned data artifact as student-maths-clean.csv
which is stored locally in data/clean and in Weights & Biases
The artifact generated from this pipeline component is to be used by the
test_data_pipeline component.
'''
import logging
import os
import hydra
import pandas as pd
import wandb
from omegaconf import DictConfig, OmegaConf
from sklearn.preprocessing import label_binarize
logging.basicConfig(level=logging.INFO, format="%(asctime)-15s %(message)s")
logger = logging.getLogger()
@hydra.main(config_path=os.path.join(os.pardir, 'configs'),
config_name='preprocess_config')
def start_pipeline(preprocess_config: DictConfig):
preprocess_config = OmegaConf.to_container(preprocess_config)
params = preprocess_config['parameters']
# Downloads the raw data artifact from W&B, and read it into a pandas Dataframe
logger.info('Download artifact from W&B')
run = wandb.init(project=preprocess_config['main']['project_name'],
group=preprocess_config['main']['experiment_name'],
job_type=preprocess_config['main']['job_type'])
artifact = run.use_artifact(params['input_artifact'])
artifact_dir = artifact.file()
df = pd.read_csv(artifact_dir)
# Bin the labels for binary classification
y = pd.cut(df['G3'], bins=2, labels=['fail', 'pass'], ordered=False)
df['G3'] = label_binarize(y, classes=['fail', 'pass']).ravel()
# Rename the columns to increase comprehensibility
df = rename_columns(df)
# Drop all duplicated values
df.drop_duplicates(inplace=True)
# Define the paths required
cwd_path = hydra.utils.get_original_cwd(
) # This is required to access the cwd when using Hydra
root_path = os.path.join(cwd_path, os.pardir)
output_folder = os.path.join(*[root_path, 'data', params['output_folder']])
output_file = os.path.join(output_folder, params['artifact_name'])
# Save the cleaned data in a local directory and upload to W&B
os.makedirs(output_folder, exist_ok=True)
df.to_csv(output_file, index=False)
logger.info('Create and upload a W&B Artifact')
artifact = wandb.Artifact(name=params['artifact_name'],
type=params['artifact_type'],
description=params['artifact_descript'])
artifact.add_file(output_file)
run.log_artifact(artifact)
# Finish the wandb run
wandb.finish()
def rename_columns(df: pd.DataFrame) -> pd.DataFrame:
# Identify the original columns names
nominal_var = [
'school', 'sex', 'address', 'famsize', 'Pstatus', 'Mjob', 'Fjob',
'reason', 'guardian', 'schoolsup', 'famsup', 'paid', 'activities',
'nursery', 'higher', 'internet', 'romantic'
]
numerical_var = ['age', 'absences']
ordinal_var = [
'Medu', 'Fedu', 'traveltime', 'studytime', 'failures', 'famrel',
'freetime', 'goout', 'Dalc', 'Walc', 'health'
]
target_var = ['G1', 'G2', 'G3']
# Define the new column names
nominal_var_renamed = [
'school', 'sex', 'address', 'fam_size', 'parents_cohabit', 'mother_job',
'father_job', 'reason_for_school', 'guardian', 'extra_edu_support',
'fam_edu_support', 'extra_paid_classes', 'extra_curri', 'att_nursery',
'wants_higheredu', 'internet_access', 'in_romantic'
]
numerical_var_renamed = ['age', 'num_school_absences']
ordinal_var_renamed = [
'mother_edu', 'father_edu', 'travel_time', 'weekly_studytime',
'past_classfailures', 'famrelation_quality', 'afterschool_time',
'friend_time', 'alc_consump_workday', 'alc_consump_weekend',
'health_status'
]
target_var_renamed = ['first_grade', 'second_grade', 'third_grade']
nominal_var_dict = dict(zip(nominal_var, nominal_var_renamed))
numerical_ver_dict = dict(zip(numerical_var, numerical_var_renamed))
ordinal_var_dict = dict(zip(ordinal_var, ordinal_var_renamed))
target_var_dict = dict(zip(target_var, target_var_renamed))
renamed_dict = {
**nominal_var_dict,
**numerical_ver_dict,
**ordinal_var_dict,
**target_var_dict
}
return df.rename(columns=renamed_dict)
if __name__ == "__main__":
start_pipeline()
|
en
| 0.813138
|
Define the pipeline component that is used to clean the raw data used for training and evaluation. This pipeline component uses the raw data artifact generated by the dataloader_pipeline component. The raw data artifact student-mat.csv is retrieved from Weights & Biases. This pipeline component generates the cleaned data artifact as student-maths-clean.csv which is stored locally in data/clean and in Weights & Biases The artifact generated from this pipeline component is to be used by the test_data_pipeline component. # Downloads the raw data artifact from W&B, and read it into a pandas Dataframe # Bin the labels for binary classification # Rename the columns to increase comprehensibility # Drop all duplicated values # Define the paths required # This is required to access the cwd when using Hydra # Save the cleaned data in a local directory and upload to W&B # Finish the wandb run # Identify the original columns names # Define the new column names
| 2.709266
| 3
|
moya/tags/soup.py
|
moyaproject/moya
| 129
|
6627046
|
from __future__ import unicode_literals
from __future__ import print_function
from ..elements.elementbase import Attribute
from ..tags.context import DataSetter
from ..compat import text_type
from ..html import slugify
from .. import namespaces
from lxml.cssselect import CSSSelector
from lxml.html import tostring, fromstring, fragment_fromstring
import json
class HTMLTag(object):
"""Represents an HTML tag."""
def __init__(self, el):
self._el = el
self.name = el.tag
self.attribs = dict(el.items())
self.text = el.text
def __repr__(self):
return tostring(self._el).decode("utf-8").strip()
class Strain(DataSetter):
"""
Manipulate HTML with CSS selectors.
The [c]select[/c] attribute should be a CSS selector which will filter tags from the [c]src[/c] string. The other attributes define what should happen to the matches tags.
The following example defines a [tag]filter[/tag] which uses [tag]{soup}strain[/tag] to add [c]class="lead"[/c] to the first paragraph of HTML:
[code xml]
<filter name="leadp" value="html">
<doc>Add class="lead" to first paragraph</doc>
<soup:strain src="html" select="p" max="1" let:class="'lead'" dst="leadp"/>
<return value="html:leadp"/>
</filter>
[/code]
"""
xmlns = namespaces.soup
class Help:
synopsis = """modify HTML with CSS selectors"""
select = Attribute("CSS selector", type="text", default="*")
src = Attribute("HTML document or fragment", type="expression", required=True)
append = Attribute(
"markup to append", type="expression", required=False, default=None
)
prepend = Attribute(
"markup to prepend", type="expression", required=False, default=None
)
replace = Attribute(
"markup to replace", type="expression", required=False, default=None
)
remove = Attribute("Remove matched element?", type="boolean", required=False)
filter = Attribute(
"Filter by attributes", type="function", required=False, default=None
)
_max = Attribute(
"Maximum number of tags to match", type="integer", required=False, default=None
)
def logic(self, context):
select, html = self.get_parameters(context, "select", "src")
if not html.strip():
self.set_context(context, self.dst(context), "")
return
let_map = self.get_let_map(context)
if not html:
self.set_context(context, self.dst(context), "")
return
try:
selector = CSSSelector(select)
except Exception as e:
self.throw("soup.bad-selector", text_type(e))
html_root = fragment_fromstring(html, create_parent=True)
(append, replace, prepend, remove, _max) = self.get_parameters(
context, "append", "replace", "prepend", "remove", "max"
)
if self.has_parameter("filter"):
filter_func = self.filter(context).get_scope_callable(context)
else:
filter_func = None
count = 0
for el in selector(html_root):
if filter_func is not None:
if not filter_func(el.attrib):
continue
if _max is not None and count >= _max:
break
count += 1
if let_map:
attrib = el.attrib
for k, v in let_map.items():
if v is None:
del attrib[k]
else:
attrib[k] = text_type(v)
if append is not None:
el.append(fragment_fromstring(append))
if replace is not None:
el.getparent().replace(el, fragment_fromstring(replace))
if prepend is not None:
el.insert(0, fragment_fromstring(prepend))
if remove:
el.getparent().remove(el)
result_markup = "".join(
tostring(child).decode("utf-8") for child in html_root.getchildren()
)
self.set_context(context, self.dst(context), result_markup)
class Extract(DataSetter):
"""
Extract tags from HTML with CSS selectors
"""
xmlns = namespaces.soup
class Help:
synopsis = "extract tags from HTML"
select = Attribute("CSS selector", type="text", default="*")
src = Attribute("HTML document or fragment", type="expression", required=True)
filter = Attribute(
"Filter by attributes", type="function", required=False, default=None
)
_max = Attribute(
"Maximum number of tags to match", type="integer", required=False, default=None
)
def logic(self, context):
(select, html, filter, _max) = self.get_parameters(
context, "select", "src", "filter", "max"
)
if not html.strip():
self.set_result(context, [])
return
try:
selector = CSSSelector(select)
except Exception as e:
self.throw("soup.bad-selector", text_type(e))
html_root = fromstring(html)
if self.has_parameter("filter"):
filter_func = self.filter(context).get_scope_callable(context)
else:
filter_func = None
elements = []
count = 0
for el in selector(html_root):
if filter_func is not None:
if not filter_func(el.attrib):
continue
if _max is not None and count >= _max:
break
count += 1
elements.append(el)
self.set_result(context, elements)
def set_result(self, context, elements):
result_markup = "".join(tostring(el).decode("utf-8") for el in elements)
self.set_context(context, self.dst(context), result_markup)
class ExtractList(Extract):
"""
Extract a list of markup fragments from HTML
"""
xmlns = namespaces.soup
class Help:
synopsis = "extract a list of markup fragments from HTML"
def set_result(self, context, elements):
result = [tostring(el).decode("utf-8") for el in elements]
self.set_context(context, self.dst(context), result)
class ExtractAttrs(Extract):
"""
Extract attributes from HTML tags
"""
xmlns = namespaces.soup
class Help:
synopsis = "extract attributes from HTML tags"
def set_result(self, context, elements):
result = [el.attrib for el in elements]
self.set_context(context, self.dst(context), result)
class ExtractTags(Extract):
"""
Extract tag objects from HTML.
"""
xmlns = namespaces.soup
class Help:
synopsis = "extract elements from HTML tags"
def set_result(self, context, elements):
result = [HTMLTag(el) for el in elements]
self.set_context(context, self.dst(context), result)
class ExtractToc(DataSetter):
"""Extract nested headings from HTML fragment."""
xmlns = namespaces.soup
src = Attribute("HTML document or fragment", type="expression", required=True)
def get_value(self, context):
html = self.src(context)
html_root = fragment_fromstring(html, create_parent=True)
selector = CSSSelector("h1,h2,h3,h4,h5,h6,h7")
root = [{"level": 0, "children": []}]
for h in selector(html_root):
if not h.text:
continue
level = int(h.tag.decode("utf-8")[1:])
title = h.text
if not isinstance(title, text_type):
title = title.decode("utf-8")
depth = root
while depth and level > depth[-1]["level"]:
depth = depth[-1]["children"]
depth.append({"level": level, "title": title, "children": []})
return root[0]["children"]
class AddIdToHeadings(DataSetter):
"""
Adds automatically generated id attributes to headings.
"""
xmlns = namespaces.soup
src = Attribute(
"HTML document or fragment", type="expression", missing=False, required=True
)
prefix = Attribute("Prefix to add to id", type="text", default="")
def get_value(self, context):
html = self.src(context)
prefix = self.prefix(context)
html_root = fragment_fromstring(html, create_parent=True)
selector = CSSSelector("h1,h2,h3,h4,h5,h6,h7")
for heading in selector(html_root):
heading.attrib["id"] = "{}{}".format(
prefix, slugify(heading.text.decode("utf-8"))
)
result_markup = "".join(
tostring(child).decode("utf-8") for child in html_root.getchildren()
)
return result_markup
class ExtractData(Extract):
"""
Extract HTML5 data- attributes
"""
xmlns = namespaces.soup
raw = Attribute(
"return raw data (without attempting JSON decode)?",
type="boolean",
default=False,
)
class Help:
synopsis = "extract HTML5 data attributes from HTML"
def set_result(self, context, elements):
all_data = []
raw = self.raw(context)
def make_data(v):
try:
data = json.loads(v)
except:
data = v
return data
for el in elements:
if raw:
data = {
k.partition("-")[-1]: v
for k, v in el.attrib.items()
if k.startswith("data-")
}
else:
data = {
k.partition("-")[-1]: make_data(v)
for k, v in el.attrib.items()
if k.startswith("data-")
}
all_data.append(data)
self.set_context(context, self.dst(context), all_data)
|
from __future__ import unicode_literals
from __future__ import print_function
from ..elements.elementbase import Attribute
from ..tags.context import DataSetter
from ..compat import text_type
from ..html import slugify
from .. import namespaces
from lxml.cssselect import CSSSelector
from lxml.html import tostring, fromstring, fragment_fromstring
import json
class HTMLTag(object):
"""Represents an HTML tag."""
def __init__(self, el):
self._el = el
self.name = el.tag
self.attribs = dict(el.items())
self.text = el.text
def __repr__(self):
return tostring(self._el).decode("utf-8").strip()
class Strain(DataSetter):
"""
Manipulate HTML with CSS selectors.
The [c]select[/c] attribute should be a CSS selector which will filter tags from the [c]src[/c] string. The other attributes define what should happen to the matches tags.
The following example defines a [tag]filter[/tag] which uses [tag]{soup}strain[/tag] to add [c]class="lead"[/c] to the first paragraph of HTML:
[code xml]
<filter name="leadp" value="html">
<doc>Add class="lead" to first paragraph</doc>
<soup:strain src="html" select="p" max="1" let:class="'lead'" dst="leadp"/>
<return value="html:leadp"/>
</filter>
[/code]
"""
xmlns = namespaces.soup
class Help:
synopsis = """modify HTML with CSS selectors"""
select = Attribute("CSS selector", type="text", default="*")
src = Attribute("HTML document or fragment", type="expression", required=True)
append = Attribute(
"markup to append", type="expression", required=False, default=None
)
prepend = Attribute(
"markup to prepend", type="expression", required=False, default=None
)
replace = Attribute(
"markup to replace", type="expression", required=False, default=None
)
remove = Attribute("Remove matched element?", type="boolean", required=False)
filter = Attribute(
"Filter by attributes", type="function", required=False, default=None
)
_max = Attribute(
"Maximum number of tags to match", type="integer", required=False, default=None
)
def logic(self, context):
select, html = self.get_parameters(context, "select", "src")
if not html.strip():
self.set_context(context, self.dst(context), "")
return
let_map = self.get_let_map(context)
if not html:
self.set_context(context, self.dst(context), "")
return
try:
selector = CSSSelector(select)
except Exception as e:
self.throw("soup.bad-selector", text_type(e))
html_root = fragment_fromstring(html, create_parent=True)
(append, replace, prepend, remove, _max) = self.get_parameters(
context, "append", "replace", "prepend", "remove", "max"
)
if self.has_parameter("filter"):
filter_func = self.filter(context).get_scope_callable(context)
else:
filter_func = None
count = 0
for el in selector(html_root):
if filter_func is not None:
if not filter_func(el.attrib):
continue
if _max is not None and count >= _max:
break
count += 1
if let_map:
attrib = el.attrib
for k, v in let_map.items():
if v is None:
del attrib[k]
else:
attrib[k] = text_type(v)
if append is not None:
el.append(fragment_fromstring(append))
if replace is not None:
el.getparent().replace(el, fragment_fromstring(replace))
if prepend is not None:
el.insert(0, fragment_fromstring(prepend))
if remove:
el.getparent().remove(el)
result_markup = "".join(
tostring(child).decode("utf-8") for child in html_root.getchildren()
)
self.set_context(context, self.dst(context), result_markup)
class Extract(DataSetter):
"""
Extract tags from HTML with CSS selectors
"""
xmlns = namespaces.soup
class Help:
synopsis = "extract tags from HTML"
select = Attribute("CSS selector", type="text", default="*")
src = Attribute("HTML document or fragment", type="expression", required=True)
filter = Attribute(
"Filter by attributes", type="function", required=False, default=None
)
_max = Attribute(
"Maximum number of tags to match", type="integer", required=False, default=None
)
def logic(self, context):
(select, html, filter, _max) = self.get_parameters(
context, "select", "src", "filter", "max"
)
if not html.strip():
self.set_result(context, [])
return
try:
selector = CSSSelector(select)
except Exception as e:
self.throw("soup.bad-selector", text_type(e))
html_root = fromstring(html)
if self.has_parameter("filter"):
filter_func = self.filter(context).get_scope_callable(context)
else:
filter_func = None
elements = []
count = 0
for el in selector(html_root):
if filter_func is not None:
if not filter_func(el.attrib):
continue
if _max is not None and count >= _max:
break
count += 1
elements.append(el)
self.set_result(context, elements)
def set_result(self, context, elements):
result_markup = "".join(tostring(el).decode("utf-8") for el in elements)
self.set_context(context, self.dst(context), result_markup)
class ExtractList(Extract):
"""
Extract a list of markup fragments from HTML
"""
xmlns = namespaces.soup
class Help:
synopsis = "extract a list of markup fragments from HTML"
def set_result(self, context, elements):
result = [tostring(el).decode("utf-8") for el in elements]
self.set_context(context, self.dst(context), result)
class ExtractAttrs(Extract):
"""
Extract attributes from HTML tags
"""
xmlns = namespaces.soup
class Help:
synopsis = "extract attributes from HTML tags"
def set_result(self, context, elements):
result = [el.attrib for el in elements]
self.set_context(context, self.dst(context), result)
class ExtractTags(Extract):
"""
Extract tag objects from HTML.
"""
xmlns = namespaces.soup
class Help:
synopsis = "extract elements from HTML tags"
def set_result(self, context, elements):
result = [HTMLTag(el) for el in elements]
self.set_context(context, self.dst(context), result)
class ExtractToc(DataSetter):
"""Extract nested headings from HTML fragment."""
xmlns = namespaces.soup
src = Attribute("HTML document or fragment", type="expression", required=True)
def get_value(self, context):
html = self.src(context)
html_root = fragment_fromstring(html, create_parent=True)
selector = CSSSelector("h1,h2,h3,h4,h5,h6,h7")
root = [{"level": 0, "children": []}]
for h in selector(html_root):
if not h.text:
continue
level = int(h.tag.decode("utf-8")[1:])
title = h.text
if not isinstance(title, text_type):
title = title.decode("utf-8")
depth = root
while depth and level > depth[-1]["level"]:
depth = depth[-1]["children"]
depth.append({"level": level, "title": title, "children": []})
return root[0]["children"]
class AddIdToHeadings(DataSetter):
"""
Adds automatically generated id attributes to headings.
"""
xmlns = namespaces.soup
src = Attribute(
"HTML document or fragment", type="expression", missing=False, required=True
)
prefix = Attribute("Prefix to add to id", type="text", default="")
def get_value(self, context):
html = self.src(context)
prefix = self.prefix(context)
html_root = fragment_fromstring(html, create_parent=True)
selector = CSSSelector("h1,h2,h3,h4,h5,h6,h7")
for heading in selector(html_root):
heading.attrib["id"] = "{}{}".format(
prefix, slugify(heading.text.decode("utf-8"))
)
result_markup = "".join(
tostring(child).decode("utf-8") for child in html_root.getchildren()
)
return result_markup
class ExtractData(Extract):
"""
Extract HTML5 data- attributes
"""
xmlns = namespaces.soup
raw = Attribute(
"return raw data (without attempting JSON decode)?",
type="boolean",
default=False,
)
class Help:
synopsis = "extract HTML5 data attributes from HTML"
def set_result(self, context, elements):
all_data = []
raw = self.raw(context)
def make_data(v):
try:
data = json.loads(v)
except:
data = v
return data
for el in elements:
if raw:
data = {
k.partition("-")[-1]: v
for k, v in el.attrib.items()
if k.startswith("data-")
}
else:
data = {
k.partition("-")[-1]: make_data(v)
for k, v in el.attrib.items()
if k.startswith("data-")
}
all_data.append(data)
self.set_context(context, self.dst(context), all_data)
|
en
| 0.586274
|
Represents an HTML tag. Manipulate HTML with CSS selectors. The [c]select[/c] attribute should be a CSS selector which will filter tags from the [c]src[/c] string. The other attributes define what should happen to the matches tags. The following example defines a [tag]filter[/tag] which uses [tag]{soup}strain[/tag] to add [c]class="lead"[/c] to the first paragraph of HTML: [code xml] <filter name="leadp" value="html"> <doc>Add class="lead" to first paragraph</doc> <soup:strain src="html" select="p" max="1" let:class="'lead'" dst="leadp"/> <return value="html:leadp"/> </filter> [/code] modify HTML with CSS selectors Extract tags from HTML with CSS selectors Extract a list of markup fragments from HTML Extract attributes from HTML tags Extract tag objects from HTML. Extract nested headings from HTML fragment. Adds automatically generated id attributes to headings. Extract HTML5 data- attributes
| 3.013416
| 3
|
tests/integration/test_charm.py
|
canonical/mysql-router-k8s-operator
| 0
|
6627047
|
#!/usr/bin/env python3
# Copyright 2022 Canonical Ltd.
# See LICENSE file for licensing details.
import logging
import socket
from pathlib import Path
import pytest
import yaml
from pytest_operator.plugin import OpsTest
logger = logging.getLogger(__name__)
METADATA = yaml.safe_load(Path("./metadata.yaml").read_text())
APP_NAME = METADATA["name"]
DEFAULT_PORT = 3600
@pytest.mark.abort_on_fail
async def test_build_and_deploy(ops_test: OpsTest):
"""Build the charm-under-test and deploy it together with related charms.
Assert on the unit status before any relations/configurations take place.
"""
# build and deploy charm from local source folder
charm = await ops_test.build_charm(".")
resources = {
"mysql-router-image": METADATA["resources"]["mysql-router-image"]["upstream-source"]
}
await ops_test.model.deploy(
charm,
resources=resources,
application_name=APP_NAME,
)
await ops_test.model.wait_for_idle(
apps=[APP_NAME],
status="waiting",
raise_on_blocked=True,
timeout=1000,
)
assert ops_test.model.applications[APP_NAME].units[0].workload_status == "waiting"
@pytest.mark.abort_on_fail
async def test_application_is_up(ops_test: OpsTest):
"""Test if the application is up."""
status = await ops_test.model.get_status() # noqa: F821
address = status["applications"][APP_NAME]["units"][f"{APP_NAME}/0"]["address"]
test_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
target = (address, DEFAULT_PORT)
logger.info("Querying app open port at %s:%s", address, DEFAULT_PORT)
port_status = test_socket.connect_ex(target)
test_socket.close()
assert port_status == 0
|
#!/usr/bin/env python3
# Copyright 2022 Canonical Ltd.
# See LICENSE file for licensing details.
import logging
import socket
from pathlib import Path
import pytest
import yaml
from pytest_operator.plugin import OpsTest
logger = logging.getLogger(__name__)
METADATA = yaml.safe_load(Path("./metadata.yaml").read_text())
APP_NAME = METADATA["name"]
DEFAULT_PORT = 3600
@pytest.mark.abort_on_fail
async def test_build_and_deploy(ops_test: OpsTest):
"""Build the charm-under-test and deploy it together with related charms.
Assert on the unit status before any relations/configurations take place.
"""
# build and deploy charm from local source folder
charm = await ops_test.build_charm(".")
resources = {
"mysql-router-image": METADATA["resources"]["mysql-router-image"]["upstream-source"]
}
await ops_test.model.deploy(
charm,
resources=resources,
application_name=APP_NAME,
)
await ops_test.model.wait_for_idle(
apps=[APP_NAME],
status="waiting",
raise_on_blocked=True,
timeout=1000,
)
assert ops_test.model.applications[APP_NAME].units[0].workload_status == "waiting"
@pytest.mark.abort_on_fail
async def test_application_is_up(ops_test: OpsTest):
"""Test if the application is up."""
status = await ops_test.model.get_status() # noqa: F821
address = status["applications"][APP_NAME]["units"][f"{APP_NAME}/0"]["address"]
test_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
target = (address, DEFAULT_PORT)
logger.info("Querying app open port at %s:%s", address, DEFAULT_PORT)
port_status = test_socket.connect_ex(target)
test_socket.close()
assert port_status == 0
|
en
| 0.772321
|
#!/usr/bin/env python3 # Copyright 2022 Canonical Ltd. # See LICENSE file for licensing details. Build the charm-under-test and deploy it together with related charms. Assert on the unit status before any relations/configurations take place. # build and deploy charm from local source folder Test if the application is up. # noqa: F821
| 1.867218
| 2
|
features/steps/login.py
|
nmfzone/django-modern-boilerplate
| 2
|
6627048
|
from behave import *
import crayons
@given('a web browser is on the Login page')
@when('a web browser is on the Login page')
def step_impl(context):
path = '/login'.lstrip('/')
context.browser.visit(context.base_url + '/' + path)
def console(text, bold=False):
print(crayons.yellow(text, bold=bold))
|
from behave import *
import crayons
@given('a web browser is on the Login page')
@when('a web browser is on the Login page')
def step_impl(context):
path = '/login'.lstrip('/')
context.browser.visit(context.base_url + '/' + path)
def console(text, bold=False):
print(crayons.yellow(text, bold=bold))
|
none
| 1
| 2.401097
| 2
|
|
regtests/c++/stack_mode.py
|
secureosv/pythia
| 17
|
6627049
|
<gh_stars>10-100
'''
stack memory mode
'''
with stack:
let garr : [10]int
grange = range(3)
class Bar:
def __init__(self, value:string):
self.value = value
class Foo:
def __init__(self, ok:bool):
self.ok = ok
let self.arr1: [4]Bar
class Sub(Foo):
def __init__(self, arr2:[4]Bar):
#self.arr2[...] = arr2 ## should work but fails
#self.arr2[...] = addr(arr2[0]) ## should also but work fails
## workaround, copy data
self.arr2[:] = arr2
let Foos : [10]Foo
def test_foos_vec( arr:[]Foo ):
assert len(arr)==10
for item in arr:
assert item is None
## in stack mode classes `act-like None`
assert item.ok is False
def test_foos_fixedarr( arr:[10]Foo):
assert len(arr)==10
for item in arr:
assert item.ok is False
def stack_test():
let bars : [4]Bar
bars[0] = Bar('hello')
bars[1] = Bar('world')
sub = Sub(bars)
print bars[0].value
print sub.arr2[0].value
test_foos_fixedarr(Foos)
with stdvec as 'std::vector<Foo>(std::begin(%s), std::end(%s))':
vec = stdvec(Foos, Foos)
test_foos_vec(addr(vec))
let arr : [5]int
for i in garr:
print i
assert i==0
print 'global array iter ok'
for i in arr:
print i
assert i==0
print 'local array iter ok'
j = 0
for i in grange:
print i
assert i==j
j+=1
print 'loop over global range ok'
for f in Foos:
assert f is None
print 'foos initalized to None ok'
comp = [ Bar('hello') for i in range(10) ]
assert len(comp)==10
comp.append( Bar('world') )
assert len(comp)==11
s = []Bar()
s.append( Bar('xxx') )
assert len(s)==1
def main():
j = 0
for i in grange:
print i
assert i==j
j+=1
stack_test()
|
'''
stack memory mode
'''
with stack:
let garr : [10]int
grange = range(3)
class Bar:
def __init__(self, value:string):
self.value = value
class Foo:
def __init__(self, ok:bool):
self.ok = ok
let self.arr1: [4]Bar
class Sub(Foo):
def __init__(self, arr2:[4]Bar):
#self.arr2[...] = arr2 ## should work but fails
#self.arr2[...] = addr(arr2[0]) ## should also but work fails
## workaround, copy data
self.arr2[:] = arr2
let Foos : [10]Foo
def test_foos_vec( arr:[]Foo ):
assert len(arr)==10
for item in arr:
assert item is None
## in stack mode classes `act-like None`
assert item.ok is False
def test_foos_fixedarr( arr:[10]Foo):
assert len(arr)==10
for item in arr:
assert item.ok is False
def stack_test():
let bars : [4]Bar
bars[0] = Bar('hello')
bars[1] = Bar('world')
sub = Sub(bars)
print bars[0].value
print sub.arr2[0].value
test_foos_fixedarr(Foos)
with stdvec as 'std::vector<Foo>(std::begin(%s), std::end(%s))':
vec = stdvec(Foos, Foos)
test_foos_vec(addr(vec))
let arr : [5]int
for i in garr:
print i
assert i==0
print 'global array iter ok'
for i in arr:
print i
assert i==0
print 'local array iter ok'
j = 0
for i in grange:
print i
assert i==j
j+=1
print 'loop over global range ok'
for f in Foos:
assert f is None
print 'foos initalized to None ok'
comp = [ Bar('hello') for i in range(10) ]
assert len(comp)==10
comp.append( Bar('world') )
assert len(comp)==11
s = []Bar()
s.append( Bar('xxx') )
assert len(s)==1
def main():
j = 0
for i in grange:
print i
assert i==j
j+=1
stack_test()
|
en
| 0.828064
|
stack memory mode #self.arr2[...] = arr2 ## should work but fails #self.arr2[...] = addr(arr2[0]) ## should also but work fails ## workaround, copy data ## in stack mode classes `act-like None`
| 3.058431
| 3
|
extractOptimizedCoords.py
|
roverman/gaussian_log_file_converter
| 6
|
6627050
|
import sys
import re
if len(sys.argv) < 3 :
print "Usage: pyton extractOptimizedCoords.py input.log xyz|gjf"
print "The output file name will be input[_optimized]_out.xyz|gjf"
print "If optimization failed, the coordinates for the lowest energy structure will be used."
exit()
finput = sys.argv[1]
fformat = sys.argv[2]
if fformat not in ["xyz", "gjf"] :
print "The output file format has to be either xyz or gjf"
exit()
def getEnergy(structure) :
for line in structure.split("\n") :
if line.startswith(" SCF Done:") :
arr = line.split("=")
return float(re.split(" +", arr[1].strip())[0])
return 1000.0
infoBlock = ""
optimized = False
optimized_structure = ""
with open(finput, "r") as fin :
isStructure = True
isInfo = True
structures = []
currentStructure = ""
for line in fin :
if line.startswith(" GradGrad") :
if isInfo :
isInfo = False
if currentStructure != "" :
structures.append((getEnergy(currentStructure), currentStructure))
currentStructure = ""
isStructure = not isStructure
elif isInfo :
infoBlock += line
elif isStructure :
currentStructure += line
else :
if line.find("Optimized") != -1 :
optimized = True
if optimized :
optimized_structure = currentStructure
else :
if currentStructure != "" :
structures.append((getEnergy(currentStructure), currentStructure))
structures = sorted(structures, key=lambda item : item[0])
optimized_structure = structures[0][1]
def findInList(dataList, target) :
for i in range(0, len(dataList)) :
if dataList[i].find(target) != -1 :
return i
return -1
def getCoordinates(dataList) :
start = findInList(dataList, "Standard orientation")
dataList = dataList[start + 5 : ]
dataList = dataList[: findInList(dataList, "-----")]
return dataList
def getChargeAndMultiplicity(infoBlock) :
lines = infoBlock.split("\n")
for line in lines :
if line.startswith(" Charge = ") :
arr = re.split(" +", line.strip())
return (int(arr[2]), int(arr[5]))
return (-1, -1)
code = {"1" : "H", "2" : "He", "3" : "Li", "4" : "Be", "5" : "B", \
"6" : "C", "7" : "N", "8" : "O", "9" : "F", "10" : "Ne", \
"11" : "Na" , "12" : "Mg" , "13" : "Al" , "14" : "Si" , "15" : "P", \
"16" : "S" , "17" : "Cl" , "18" : "Ar" , "19" : "K" , "20" : "Ca", \
"21" : "Sc" , "22" : "Ti" , "23" : "V" , "24" : "Cr" , "25" : "Mn", \
"26" : "Fe" , "27" : "Co" , "28" : "Ni" , "29" : "Cu" , "30" : "Zn", \
"31" : "Ga" , "32" : "Ge" , "33" : "As" , "34" : "Se" , "35" : "Br", \
"36" : "Kr" , "37" : "Rb" , "38" : "Sr" , "39" : "Y" , "40" : "Zr", \
"41" : "Nb" , "42" : "Mo" , "43" : "Tc" , "44" : "Ru" , "45" : "Rh", \
"46" : "Pd" , "47" : "Ag" , "48" : "Cd" , "49" : "In" , "50" : "Sn", \
"51" : "Sb" , "52" : "Te" , "53" : "I" , "54" : "Xe" , "55" : "Cs", \
"56" : "Ba" , "57" : "La" , "58" : "Ce" , "59" : "Pr" , "60" : "Nd", \
"61" : "Pm" , "62" : "Sm" , "63" : "Eu" , "64" : "Gd" , "65" : "Tb", \
"66" : "Dy" , "67" : "Ho" , "68" : "Er" , "69" : "Tm" , "70" : "Yb", \
"71" : "Lu" , "72" : "Hf" , "73" : "Ta" , "74" : "W" , "75" : "Re", \
"76" : "Os" , "77" : "Ir" , "78" : "Pt" , "79" : "Au" , "80" : "Hg", \
"81" : "Tl" , "82" : "Pb" , "83" : "Bi" , "84" : "Po" , "85" : "At", \
"86" : "Rn" , "87" : "Fr" , "88" : "Ra" , "89" : "Ac" , "90" : "Th", \
"91" : "Pa" , "92" : "U" , "93" : "Np" , "94" : "Pu" , "95" : "Am", \
"96" : "Cm" , "97" : "Bk" , "98" : "Cf" , "99" : "Es" ,"100" : "Fm", \
"101": "Md" ,"102" : "No" ,"103" : "Lr" ,"104" : "Rf" ,"105" : "Db", \
"106": "Sg" ,"107" : "Bh" ,"108" : "Hs" ,"109" : "Mt" ,"110" : "Ds", \
"111": "Rg" ,"112" : "Uub","113" : "Uut","114" : "Uuq","115" : "Uup", \
"116": "Uuh","117" : "Uus","118" : "Uuo"}
prefix = finput.strip(".log")
foutput= ""
chk = ""
if optimized :
chk = prefix + "_optimized_out.chk"
foutput = prefix + "_optimized_out." + fformat
else :
chk = prefix + "_out.chk"
foutput = prefix + "_out." + fformat
with open(foutput, "w") as fout :
dataList = optimized_structure.split("\n")
atoms = getCoordinates(dataList)
# print format specific headers
if fformat == "xyz" :
fout.write(str(len(atoms)) + "\n\n")
else :
fout.write("%mem=\n%nprocshared=\n%chk=" + chk + "\n# \n\nComplex " + prefix + "\n\n")
charge, multiplicity = getChargeAndMultiplicity(infoBlock)
fout.write(str(charge) + " " + str(multiplicity) + "\n")
for atom in atoms :
arr = atom.split()
symbol = code.get(arr[1], 'X')
fout.write(" %s %16.7f %16.7f %16.7f\n" % (symbol,float(arr[3]),float(arr[4]),float(arr[5])))
if fformat == "gjf" :
fout.write("\n")
|
import sys
import re
if len(sys.argv) < 3 :
print "Usage: pyton extractOptimizedCoords.py input.log xyz|gjf"
print "The output file name will be input[_optimized]_out.xyz|gjf"
print "If optimization failed, the coordinates for the lowest energy structure will be used."
exit()
finput = sys.argv[1]
fformat = sys.argv[2]
if fformat not in ["xyz", "gjf"] :
print "The output file format has to be either xyz or gjf"
exit()
def getEnergy(structure) :
for line in structure.split("\n") :
if line.startswith(" SCF Done:") :
arr = line.split("=")
return float(re.split(" +", arr[1].strip())[0])
return 1000.0
infoBlock = ""
optimized = False
optimized_structure = ""
with open(finput, "r") as fin :
isStructure = True
isInfo = True
structures = []
currentStructure = ""
for line in fin :
if line.startswith(" GradGrad") :
if isInfo :
isInfo = False
if currentStructure != "" :
structures.append((getEnergy(currentStructure), currentStructure))
currentStructure = ""
isStructure = not isStructure
elif isInfo :
infoBlock += line
elif isStructure :
currentStructure += line
else :
if line.find("Optimized") != -1 :
optimized = True
if optimized :
optimized_structure = currentStructure
else :
if currentStructure != "" :
structures.append((getEnergy(currentStructure), currentStructure))
structures = sorted(structures, key=lambda item : item[0])
optimized_structure = structures[0][1]
def findInList(dataList, target) :
for i in range(0, len(dataList)) :
if dataList[i].find(target) != -1 :
return i
return -1
def getCoordinates(dataList) :
start = findInList(dataList, "Standard orientation")
dataList = dataList[start + 5 : ]
dataList = dataList[: findInList(dataList, "-----")]
return dataList
def getChargeAndMultiplicity(infoBlock) :
lines = infoBlock.split("\n")
for line in lines :
if line.startswith(" Charge = ") :
arr = re.split(" +", line.strip())
return (int(arr[2]), int(arr[5]))
return (-1, -1)
code = {"1" : "H", "2" : "He", "3" : "Li", "4" : "Be", "5" : "B", \
"6" : "C", "7" : "N", "8" : "O", "9" : "F", "10" : "Ne", \
"11" : "Na" , "12" : "Mg" , "13" : "Al" , "14" : "Si" , "15" : "P", \
"16" : "S" , "17" : "Cl" , "18" : "Ar" , "19" : "K" , "20" : "Ca", \
"21" : "Sc" , "22" : "Ti" , "23" : "V" , "24" : "Cr" , "25" : "Mn", \
"26" : "Fe" , "27" : "Co" , "28" : "Ni" , "29" : "Cu" , "30" : "Zn", \
"31" : "Ga" , "32" : "Ge" , "33" : "As" , "34" : "Se" , "35" : "Br", \
"36" : "Kr" , "37" : "Rb" , "38" : "Sr" , "39" : "Y" , "40" : "Zr", \
"41" : "Nb" , "42" : "Mo" , "43" : "Tc" , "44" : "Ru" , "45" : "Rh", \
"46" : "Pd" , "47" : "Ag" , "48" : "Cd" , "49" : "In" , "50" : "Sn", \
"51" : "Sb" , "52" : "Te" , "53" : "I" , "54" : "Xe" , "55" : "Cs", \
"56" : "Ba" , "57" : "La" , "58" : "Ce" , "59" : "Pr" , "60" : "Nd", \
"61" : "Pm" , "62" : "Sm" , "63" : "Eu" , "64" : "Gd" , "65" : "Tb", \
"66" : "Dy" , "67" : "Ho" , "68" : "Er" , "69" : "Tm" , "70" : "Yb", \
"71" : "Lu" , "72" : "Hf" , "73" : "Ta" , "74" : "W" , "75" : "Re", \
"76" : "Os" , "77" : "Ir" , "78" : "Pt" , "79" : "Au" , "80" : "Hg", \
"81" : "Tl" , "82" : "Pb" , "83" : "Bi" , "84" : "Po" , "85" : "At", \
"86" : "Rn" , "87" : "Fr" , "88" : "Ra" , "89" : "Ac" , "90" : "Th", \
"91" : "Pa" , "92" : "U" , "93" : "Np" , "94" : "Pu" , "95" : "Am", \
"96" : "Cm" , "97" : "Bk" , "98" : "Cf" , "99" : "Es" ,"100" : "Fm", \
"101": "Md" ,"102" : "No" ,"103" : "Lr" ,"104" : "Rf" ,"105" : "Db", \
"106": "Sg" ,"107" : "Bh" ,"108" : "Hs" ,"109" : "Mt" ,"110" : "Ds", \
"111": "Rg" ,"112" : "Uub","113" : "Uut","114" : "Uuq","115" : "Uup", \
"116": "Uuh","117" : "Uus","118" : "Uuo"}
prefix = finput.strip(".log")
foutput= ""
chk = ""
if optimized :
chk = prefix + "_optimized_out.chk"
foutput = prefix + "_optimized_out." + fformat
else :
chk = prefix + "_out.chk"
foutput = prefix + "_out." + fformat
with open(foutput, "w") as fout :
dataList = optimized_structure.split("\n")
atoms = getCoordinates(dataList)
# print format specific headers
if fformat == "xyz" :
fout.write(str(len(atoms)) + "\n\n")
else :
fout.write("%mem=\n%nprocshared=\n%chk=" + chk + "\n# \n\nComplex " + prefix + "\n\n")
charge, multiplicity = getChargeAndMultiplicity(infoBlock)
fout.write(str(charge) + " " + str(multiplicity) + "\n")
for atom in atoms :
arr = atom.split()
symbol = code.get(arr[1], 'X')
fout.write(" %s %16.7f %16.7f %16.7f\n" % (symbol,float(arr[3]),float(arr[4]),float(arr[5])))
if fformat == "gjf" :
fout.write("\n")
|
en
| 0.302187
|
# print format specific headers # \n\nComplex " + prefix + "\n\n")
| 2.821995
| 3
|