hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c3795dc060d6509c41d3fbbb3beb2a1846b4540 | 4,040 | py | Python | sdk/python/pulumi_azure_nextgen/devices/latest/list_iot_dps_resource_keys_for_key_name.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/devices/latest/list_iot_dps_resource_keys_for_key_name.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/devices/latest/list_iot_dps_resource_keys_for_key_name.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ListIotDpsResourceKeysForKeyNameResult',
'AwaitableListIotDpsResourceKeysForKeyNameResult',
'list_iot_dps_resource_keys_for_key_name',
]
@pulumi.output_type
class ListIotDpsResourceKeysForKeyNameResult:
"""
Description of the shared access key.
"""
def __init__(__self__, key_name=None, primary_key=None, rights=None, secondary_key=None):
if key_name and not isinstance(key_name, str):
raise TypeError("Expected argument 'key_name' to be a str")
pulumi.set(__self__, "key_name", key_name)
if primary_key and not isinstance(primary_key, str):
raise TypeError("Expected argument 'primary_key' to be a str")
pulumi.set(__self__, "primary_key", primary_key)
if rights and not isinstance(rights, str):
raise TypeError("Expected argument 'rights' to be a str")
pulumi.set(__self__, "rights", rights)
if secondary_key and not isinstance(secondary_key, str):
raise TypeError("Expected argument 'secondary_key' to be a str")
pulumi.set(__self__, "secondary_key", secondary_key)
@property
@pulumi.getter(name="keyName")
def key_name(self) -> str:
"""
Name of the key.
"""
return pulumi.get(self, "key_name")
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> Optional[str]:
"""
Primary SAS key value.
"""
return pulumi.get(self, "primary_key")
@property
@pulumi.getter
def rights(self) -> str:
"""
Rights that this key has.
"""
return pulumi.get(self, "rights")
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> Optional[str]:
"""
Secondary SAS key value.
"""
return pulumi.get(self, "secondary_key")
class AwaitableListIotDpsResourceKeysForKeyNameResult(ListIotDpsResourceKeysForKeyNameResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListIotDpsResourceKeysForKeyNameResult(
key_name=self.key_name,
primary_key=self.primary_key,
rights=self.rights,
secondary_key=self.secondary_key)
def list_iot_dps_resource_keys_for_key_name(key_name: Optional[str] = None,
provisioning_service_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListIotDpsResourceKeysForKeyNameResult:
"""
Use this data source to access information about an existing resource.
:param str key_name: Logical key name to get key-values for.
:param str provisioning_service_name: Name of the provisioning service.
:param str resource_group_name: The name of the resource group that contains the provisioning service.
"""
__args__ = dict()
__args__['keyName'] = key_name
__args__['provisioningServiceName'] = provisioning_service_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:devices/latest:listIotDpsResourceKeysForKeyName', __args__, opts=opts, typ=ListIotDpsResourceKeysForKeyNameResult).value
return AwaitableListIotDpsResourceKeysForKeyNameResult(
key_name=__ret__.key_name,
primary_key=__ret__.primary_key,
rights=__ret__.rights,
secondary_key=__ret__.secondary_key)
| 37.757009 | 171 | 0.67005 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ListIotDpsResourceKeysForKeyNameResult',
'AwaitableListIotDpsResourceKeysForKeyNameResult',
'list_iot_dps_resource_keys_for_key_name',
]
@pulumi.output_type
class ListIotDpsResourceKeysForKeyNameResult:
def __init__(__self__, key_name=None, primary_key=None, rights=None, secondary_key=None):
if key_name and not isinstance(key_name, str):
raise TypeError("Expected argument 'key_name' to be a str")
pulumi.set(__self__, "key_name", key_name)
if primary_key and not isinstance(primary_key, str):
raise TypeError("Expected argument 'primary_key' to be a str")
pulumi.set(__self__, "primary_key", primary_key)
if rights and not isinstance(rights, str):
raise TypeError("Expected argument 'rights' to be a str")
pulumi.set(__self__, "rights", rights)
if secondary_key and not isinstance(secondary_key, str):
raise TypeError("Expected argument 'secondary_key' to be a str")
pulumi.set(__self__, "secondary_key", secondary_key)
@property
@pulumi.getter(name="keyName")
def key_name(self) -> str:
return pulumi.get(self, "key_name")
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> Optional[str]:
return pulumi.get(self, "primary_key")
@property
@pulumi.getter
def rights(self) -> str:
return pulumi.get(self, "rights")
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> Optional[str]:
return pulumi.get(self, "secondary_key")
class AwaitableListIotDpsResourceKeysForKeyNameResult(ListIotDpsResourceKeysForKeyNameResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListIotDpsResourceKeysForKeyNameResult(
key_name=self.key_name,
primary_key=self.primary_key,
rights=self.rights,
secondary_key=self.secondary_key)
def list_iot_dps_resource_keys_for_key_name(key_name: Optional[str] = None,
provisioning_service_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListIotDpsResourceKeysForKeyNameResult:
__args__ = dict()
__args__['keyName'] = key_name
__args__['provisioningServiceName'] = provisioning_service_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:devices/latest:listIotDpsResourceKeysForKeyName', __args__, opts=opts, typ=ListIotDpsResourceKeysForKeyNameResult).value
return AwaitableListIotDpsResourceKeysForKeyNameResult(
key_name=__ret__.key_name,
primary_key=__ret__.primary_key,
rights=__ret__.rights,
secondary_key=__ret__.secondary_key)
| true | true |
1c37967573dfccc4488c0576fd333668c6ce05f2 | 14,794 | py | Python | external/logger.py | yunshengtian/ppo-mujoco | 1989bc5491d2abc3d015d0ec81d34ea166c3352b | [
"MIT"
] | 1 | 2021-01-27T08:59:31.000Z | 2021-01-27T08:59:31.000Z | external/logger.py | yunshengtian/ppo-mujoco | 1989bc5491d2abc3d015d0ec81d34ea166c3352b | [
"MIT"
] | null | null | null | external/logger.py | yunshengtian/ppo-mujoco | 1989bc5491d2abc3d015d0ec81d34ea166c3352b | [
"MIT"
] | 1 | 2021-01-20T07:56:54.000Z | 2021-01-20T07:56:54.000Z | import os
import sys
import shutil
import os.path as osp
import json
import time
import datetime
import tempfile
from collections import defaultdict
from contextlib import contextmanager
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, 'wt')
self.own_file = True
else:
assert hasattr(filename_or_file, 'read'), 'expected file or str, got %s'%filename_or_file
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
# Create strings for printing
key2str = {}
for (key, val) in sorted(kvs.items()):
if hasattr(val, '__float__'):
valstr = '%-8.3g' % val
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
# Find max widths
if len(key2str) == 0:
print('WARNING: tried to write empty key-value dict')
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
# Write out the data
dashes = '-' * (keywidth + valwidth + 7)
lines = [dashes]
for (key, val) in sorted(key2str.items(), key=lambda kv: kv[0].lower()):
lines.append('| %s%s | %s%s |' % (
key,
' ' * (keywidth - len(key)),
val,
' ' * (valwidth - len(val)),
))
lines.append(dashes)
self.file.write('\n'.join(lines) + '\n')
# Flush the output to the file
self.file.flush()
def _truncate(self, s):
maxlen = 30
return s[:maxlen-3] + '...' if len(s) > maxlen else s
def writeseq(self, seq):
seq = list(seq)
for (i, elem) in enumerate(seq):
self.file.write(elem)
if i < len(seq) - 1: # add space unless this is the last one
self.file.write(' ')
self.file.write('\n')
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'wt')
def writekvs(self, kvs):
for k, v in sorted(kvs.items()):
if hasattr(v, 'dtype'):
kvs[k] = float(v)
self.file.write(json.dumps(kvs) + '\n')
self.file.flush()
def close(self):
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'w+t')
self.keys = []
self.sep = ','
def writekvs(self, kvs):
# Add our current row to the history
extra_keys = list(kvs.keys() - self.keys)
extra_keys.sort()
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(',')
self.file.write(k)
self.file.write('\n')
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.sep * len(extra_keys))
self.file.write('\n')
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(',')
v = kvs.get(k)
if v is not None:
self.file.write(str(v))
self.file.write('\n')
self.file.flush()
def close(self):
self.file.close()
class TensorBoardOutputFormat(KVWriter):
"""
Dumps key/value pairs into TensorBoard's numeric format.
"""
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = 'events'
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {'tag': k, 'simple_value': float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = self.step # is there any reason why you'd want to specify the step?
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
def make_output_format(format, ev_dir, log_suffix=''):
os.makedirs(ev_dir, exist_ok=True)
if format == 'stdout':
return HumanOutputFormat(sys.stdout)
elif format == 'log':
return HumanOutputFormat(osp.join(ev_dir, 'log%s.txt' % log_suffix))
elif format == 'json':
return JSONOutputFormat(osp.join(ev_dir, 'progress%s.json' % log_suffix))
elif format == 'csv':
return CSVOutputFormat(osp.join(ev_dir, 'progress%s.csv' % log_suffix))
elif format == 'tensorboard':
return TensorBoardOutputFormat(osp.join(ev_dir, 'tb%s' % log_suffix))
else:
raise ValueError('Unknown format specified: %s' % (format,))
# ================================================================
# API
# ================================================================
def logkv(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
If called many times, last value will be used.
"""
get_current().logkv(key, val)
def logkv_mean(key, val):
"""
The same as logkv(), but if called many times, values averaged.
"""
get_current().logkv_mean(key, val)
def logkvs(d):
"""
Log a dictionary of key-value pairs
"""
for (k, v) in d.items():
logkv(k, v)
def dumpkvs():
"""
Write all of the diagnostics from the current iteration
"""
return get_current().dumpkvs()
def getkvs():
return get_current().name2val
def log(*args, level=INFO):
"""
Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
"""
get_current().log(*args, level=level)
def debug(*args):
log(*args, level=DEBUG)
def info(*args):
log(*args, level=INFO)
def warn(*args):
log(*args, level=WARN)
def error(*args):
log(*args, level=ERROR)
def set_level(level):
"""
Set logging threshold on current logger.
"""
get_current().set_level(level)
def set_comm(comm):
get_current().set_comm(comm)
def get_dir():
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
"""
return get_current().get_dir()
record_tabular = logkv
dump_tabular = dumpkvs
@contextmanager
def profile_kv(scopename):
logkey = 'wait_' + scopename
tstart = time.time()
try:
yield
finally:
get_current().name2val[logkey] += time.time() - tstart
def profile(n):
"""
Usage:
@profile("my_func")
def my_func(): code
"""
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with profile_kv(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name
# ================================================================
# Backend
# ================================================================
def get_current():
if Logger.CURRENT is None:
_configure_default_logger()
return Logger.CURRENT
class Logger(object):
DEFAULT = None # A logger with no output files. (See right below class definition)
# So that you can still log to the terminal without setting up any output files
CURRENT = None # Current logger being used by the free functions above
def __init__(self, dir, output_formats, comm=None):
self.name2val = defaultdict(float) # values this iteration
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = dir
self.output_formats = output_formats
self.comm = comm
# Logging API, forwarded
# ----------------------------------------
def logkv(self, key, val):
self.name2val[key] = val
def logkv_mean(self, key, val):
oldval, cnt = self.name2val[key], self.name2cnt[key]
self.name2val[key] = oldval*cnt/(cnt+1) + val/(cnt+1)
self.name2cnt[key] = cnt + 1
def dumpkvs(self):
if self.comm is None:
d = self.name2val
else:
from external import mpi_util
d = mpi_util.mpi_weighted_mean(self.comm,
{name : (val, self.name2cnt.get(name, 1))
for (name, val) in self.name2val.items()})
if self.comm.rank != 0:
d['dummy'] = 1 # so we don't get a warning about empty dict
out = d.copy() # Return the dict for unit testing purposes
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(d)
self.name2val.clear()
self.name2cnt.clear()
return out
def log(self, *args, level=INFO):
if self.level <= level:
self._do_log(args)
# Configuration
# ----------------------------------------
def set_level(self, level):
self.level = level
def set_comm(self, comm):
self.comm = comm
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
# Misc
# ----------------------------------------
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
def get_rank_without_mpi_import():
# check environment variables here instead of importing mpi4py
# to avoid calling MPI_Init() when this module is imported
for varname in ['PMI_RANK', 'OMPI_COMM_WORLD_RANK']:
if varname in os.environ:
return int(os.environ[varname])
return 0
def configure(dir=None, format_strs=None, comm=None, log_suffix=''):
"""
If comm is provided, average all numerical stats across that comm
"""
if dir is None:
dir = os.getenv('OPENAI_LOGDIR')
if dir is None:
dir = osp.join(tempfile.gettempdir(),
datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"))
assert isinstance(dir, str)
dir = os.path.expanduser(dir)
os.makedirs(os.path.expanduser(dir), exist_ok=True)
rank = get_rank_without_mpi_import()
if rank > 0:
log_suffix = log_suffix + "-rank%03i" % rank
if format_strs is None:
if rank == 0:
format_strs = os.getenv('OPENAI_LOG_FORMAT', 'stdout,log,csv').split(',')
else:
format_strs = os.getenv('OPENAI_LOG_FORMAT_MPI', 'log').split(',')
format_strs = filter(None, format_strs)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
if output_formats:
log('Logging to %s'%dir)
def _configure_default_logger():
configure()
Logger.DEFAULT = Logger.CURRENT
def reset():
if Logger.CURRENT is not Logger.DEFAULT:
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log('Reset logger')
@contextmanager
def scoped_configure(dir=None, format_strs=None, comm=None):
prevlogger = Logger.CURRENT
configure(dir=dir, format_strs=format_strs, comm=comm)
try:
yield
finally:
Logger.CURRENT.close()
Logger.CURRENT = prevlogger
# ================================================================
def _demo():
info("hi")
debug("shouldn't appear")
set_level(DEBUG)
debug("should appear")
dir = "/tmp/testlogging"
if os.path.exists(dir):
shutil.rmtree(dir)
configure(dir=dir)
logkv("a", 3)
logkv("b", 2.5)
dumpkvs()
logkv("b", -2.5)
logkv("a", 5.5)
dumpkvs()
info("^^^ should see a = 5.5")
logkv_mean("b", -22.5)
logkv_mean("b", -44.4)
logkv("a", 5.5)
dumpkvs()
info("^^^ should see b = -33.3")
logkv("b", -2.5)
dumpkvs()
logkv("a", "longasslongasslongasslongasslongasslongassvalue")
dumpkvs()
# ================================================================
# Readers
# ================================================================
def read_json(fname):
import pandas
ds = []
with open(fname, 'rt') as fh:
for line in fh:
ds.append(json.loads(line))
return pandas.DataFrame(ds)
def read_csv(fname):
import pandas
return pandas.read_csv(fname, index_col=None, comment='#')
def read_tb(path):
"""
path : a tensorboard file OR a directory, where we will find all TB files
of the form events.*
"""
import pandas
import numpy as np
from glob import glob
import tensorflow as tf
if osp.isdir(path):
fnames = glob(osp.join(path, "events.*"))
elif osp.basename(path).startswith("events."):
fnames = [path]
else:
raise NotImplementedError("Expected tensorboard file or directory containing them. Got %s"%path)
tag2pairs = defaultdict(list)
maxstep = 0
for fname in fnames:
for summary in tf.train.summary_iterator(fname):
if summary.step > 0:
for v in summary.summary.value:
pair = (summary.step, v.simple_value)
tag2pairs[v.tag].append(pair)
maxstep = max(summary.step, maxstep)
data = np.empty((maxstep, len(tag2pairs)))
data[:] = np.nan
tags = sorted(tag2pairs.keys())
for (colidx,tag) in enumerate(tags):
pairs = tag2pairs[tag]
for (step, value) in pairs:
data[step-1, colidx] = value
return pandas.DataFrame(data, columns=tags)
if __name__ == "__main__":
_demo()
| 29.411531 | 122 | 0.56773 | import os
import sys
import shutil
import os.path as osp
import json
import time
import datetime
import tempfile
from collections import defaultdict
from contextlib import contextmanager
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, 'wt')
self.own_file = True
else:
assert hasattr(filename_or_file, 'read'), 'expected file or str, got %s'%filename_or_file
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
key2str = {}
for (key, val) in sorted(kvs.items()):
if hasattr(val, '__float__'):
valstr = '%-8.3g' % val
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
if len(key2str) == 0:
print('WARNING: tried to write empty key-value dict')
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
dashes = '-' * (keywidth + valwidth + 7)
lines = [dashes]
for (key, val) in sorted(key2str.items(), key=lambda kv: kv[0].lower()):
lines.append('| %s%s | %s%s |' % (
key,
' ' * (keywidth - len(key)),
val,
' ' * (valwidth - len(val)),
))
lines.append(dashes)
self.file.write('\n'.join(lines) + '\n')
self.file.flush()
def _truncate(self, s):
maxlen = 30
return s[:maxlen-3] + '...' if len(s) > maxlen else s
def writeseq(self, seq):
seq = list(seq)
for (i, elem) in enumerate(seq):
self.file.write(elem)
if i < len(seq) - 1:
self.file.write(' ')
self.file.write('\n')
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'wt')
def writekvs(self, kvs):
for k, v in sorted(kvs.items()):
if hasattr(v, 'dtype'):
kvs[k] = float(v)
self.file.write(json.dumps(kvs) + '\n')
self.file.flush()
def close(self):
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'w+t')
self.keys = []
self.sep = ','
def writekvs(self, kvs):
extra_keys = list(kvs.keys() - self.keys)
extra_keys.sort()
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(',')
self.file.write(k)
self.file.write('\n')
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.sep * len(extra_keys))
self.file.write('\n')
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(',')
v = kvs.get(k)
if v is not None:
self.file.write(str(v))
self.file.write('\n')
self.file.flush()
def close(self):
self.file.close()
class TensorBoardOutputFormat(KVWriter):
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = 'events'
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {'tag': k, 'simple_value': float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = self.step
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
def make_output_format(format, ev_dir, log_suffix=''):
os.makedirs(ev_dir, exist_ok=True)
if format == 'stdout':
return HumanOutputFormat(sys.stdout)
elif format == 'log':
return HumanOutputFormat(osp.join(ev_dir, 'log%s.txt' % log_suffix))
elif format == 'json':
return JSONOutputFormat(osp.join(ev_dir, 'progress%s.json' % log_suffix))
elif format == 'csv':
return CSVOutputFormat(osp.join(ev_dir, 'progress%s.csv' % log_suffix))
elif format == 'tensorboard':
return TensorBoardOutputFormat(osp.join(ev_dir, 'tb%s' % log_suffix))
else:
raise ValueError('Unknown format specified: %s' % (format,))
# ================================================================
# API
# ================================================================
def logkv(key, val):
get_current().logkv(key, val)
def logkv_mean(key, val):
get_current().logkv_mean(key, val)
def logkvs(d):
for (k, v) in d.items():
logkv(k, v)
def dumpkvs():
return get_current().dumpkvs()
def getkvs():
return get_current().name2val
def log(*args, level=INFO):
get_current().log(*args, level=level)
def debug(*args):
log(*args, level=DEBUG)
def info(*args):
log(*args, level=INFO)
def warn(*args):
log(*args, level=WARN)
def error(*args):
log(*args, level=ERROR)
def set_level(level):
get_current().set_level(level)
def set_comm(comm):
get_current().set_comm(comm)
def get_dir():
return get_current().get_dir()
record_tabular = logkv
dump_tabular = dumpkvs
@contextmanager
def profile_kv(scopename):
logkey = 'wait_' + scopename
tstart = time.time()
try:
yield
finally:
get_current().name2val[logkey] += time.time() - tstart
def profile(n):
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with profile_kv(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name
# ================================================================
# Backend
# ================================================================
def get_current():
if Logger.CURRENT is None:
_configure_default_logger()
return Logger.CURRENT
class Logger(object):
DEFAULT = None # A logger with no output files. (See right below class definition)
# So that you can still log to the terminal without setting up any output files
CURRENT = None # Current logger being used by the free functions above
def __init__(self, dir, output_formats, comm=None):
self.name2val = defaultdict(float) # values this iteration
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = dir
self.output_formats = output_formats
self.comm = comm
# Logging API, forwarded
# ----------------------------------------
def logkv(self, key, val):
self.name2val[key] = val
def logkv_mean(self, key, val):
oldval, cnt = self.name2val[key], self.name2cnt[key]
self.name2val[key] = oldval*cnt/(cnt+1) + val/(cnt+1)
self.name2cnt[key] = cnt + 1
def dumpkvs(self):
if self.comm is None:
d = self.name2val
else:
from external import mpi_util
d = mpi_util.mpi_weighted_mean(self.comm,
{name : (val, self.name2cnt.get(name, 1))
for (name, val) in self.name2val.items()})
if self.comm.rank != 0:
d['dummy'] = 1 # so we don't get a warning about empty dict
out = d.copy()
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(d)
self.name2val.clear()
self.name2cnt.clear()
return out
def log(self, *args, level=INFO):
if self.level <= level:
self._do_log(args)
def set_level(self, level):
self.level = level
def set_comm(self, comm):
self.comm = comm
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
def get_rank_without_mpi_import():
for varname in ['PMI_RANK', 'OMPI_COMM_WORLD_RANK']:
if varname in os.environ:
return int(os.environ[varname])
return 0
def configure(dir=None, format_strs=None, comm=None, log_suffix=''):
if dir is None:
dir = os.getenv('OPENAI_LOGDIR')
if dir is None:
dir = osp.join(tempfile.gettempdir(),
datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"))
assert isinstance(dir, str)
dir = os.path.expanduser(dir)
os.makedirs(os.path.expanduser(dir), exist_ok=True)
rank = get_rank_without_mpi_import()
if rank > 0:
log_suffix = log_suffix + "-rank%03i" % rank
if format_strs is None:
if rank == 0:
format_strs = os.getenv('OPENAI_LOG_FORMAT', 'stdout,log,csv').split(',')
else:
format_strs = os.getenv('OPENAI_LOG_FORMAT_MPI', 'log').split(',')
format_strs = filter(None, format_strs)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
if output_formats:
log('Logging to %s'%dir)
def _configure_default_logger():
configure()
Logger.DEFAULT = Logger.CURRENT
def reset():
if Logger.CURRENT is not Logger.DEFAULT:
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log('Reset logger')
@contextmanager
def scoped_configure(dir=None, format_strs=None, comm=None):
prevlogger = Logger.CURRENT
configure(dir=dir, format_strs=format_strs, comm=comm)
try:
yield
finally:
Logger.CURRENT.close()
Logger.CURRENT = prevlogger
def _demo():
info("hi")
debug("shouldn't appear")
set_level(DEBUG)
debug("should appear")
dir = "/tmp/testlogging"
if os.path.exists(dir):
shutil.rmtree(dir)
configure(dir=dir)
logkv("a", 3)
logkv("b", 2.5)
dumpkvs()
logkv("b", -2.5)
logkv("a", 5.5)
dumpkvs()
info("^^^ should see a = 5.5")
logkv_mean("b", -22.5)
logkv_mean("b", -44.4)
logkv("a", 5.5)
dumpkvs()
info("^^^ should see b = -33.3")
logkv("b", -2.5)
dumpkvs()
logkv("a", "longasslongasslongasslongasslongasslongassvalue")
dumpkvs()
# ================================================================
# Readers
# ================================================================
def read_json(fname):
import pandas
ds = []
with open(fname, 'rt') as fh:
for line in fh:
ds.append(json.loads(line))
return pandas.DataFrame(ds)
def read_csv(fname):
import pandas
return pandas.read_csv(fname, index_col=None, comment='
def read_tb(path):
import pandas
import numpy as np
from glob import glob
import tensorflow as tf
if osp.isdir(path):
fnames = glob(osp.join(path, "events.*"))
elif osp.basename(path).startswith("events."):
fnames = [path]
else:
raise NotImplementedError("Expected tensorboard file or directory containing them. Got %s"%path)
tag2pairs = defaultdict(list)
maxstep = 0
for fname in fnames:
for summary in tf.train.summary_iterator(fname):
if summary.step > 0:
for v in summary.summary.value:
pair = (summary.step, v.simple_value)
tag2pairs[v.tag].append(pair)
maxstep = max(summary.step, maxstep)
data = np.empty((maxstep, len(tag2pairs)))
data[:] = np.nan
tags = sorted(tag2pairs.keys())
for (colidx,tag) in enumerate(tags):
pairs = tag2pairs[tag]
for (step, value) in pairs:
data[step-1, colidx] = value
return pandas.DataFrame(data, columns=tags)
if __name__ == "__main__":
_demo()
| true | true |
1c3799c35730a1160f93225e698987adb9c3c071 | 4,558 | py | Python | kafka-python/json_test.py | pengfei99/KafkaPyClient | b18b361aedec9b58eef27c1d6f97346a64a1f154 | [
"Apache-2.0"
] | null | null | null | kafka-python/json_test.py | pengfei99/KafkaPyClient | b18b361aedec9b58eef27c1d6f97346a64a1f154 | [
"Apache-2.0"
] | null | null | null | kafka-python/json_test.py | pengfei99/KafkaPyClient | b18b361aedec9b58eef27c1d6f97346a64a1f154 | [
"Apache-2.0"
] | null | null | null | import json
def main():
msg = """{"tableName":"students","dbName":"default","owner":"pliu","createTime":1647683673,"lastAccessTime":0,"retention":0,"sd":{"cols":[{"name":"student_id","type":"int","comment":null,"setType":true,"setName":true,"setComment":false},{"name":"firstname","type":"string","comment":null,"setType":true,"setName":true,"setComment":false},{"name":"lastname","type":"string","comment":null,"setType":true,"setName":true,"setComment":false},{"name":"year","type":"string","comment":null,"setType":true,"setName":true,"setComment":false},{"name":"major","type":"string","comment":null,"setType":true,"setName":true,"setComment":false}],"location":"file:/home/pliu/hive_data/sample_data","inputFormat":"org.apache.hadoop.mapred.TextInputFormat","outputFormat":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","compressed":false,"numBuckets":-1,"serdeInfo":{"name":null,"serializationLib":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","parameters":{"field.delim":",","serialization.format":","},"description":null,"serializerClass":null,"deserializerClass":null,"serdeType":null,"setParameters":true,"parametersSize":2,"setName":false,"setDescription":false,"setSerdeType":false,"setSerializationLib":true,"setSerializerClass":false,"setDeserializerClass":false},"bucketCols":[],"sortCols":[],"parameters":{},"skewedInfo":{"skewedColNames":[],"skewedColValues":[],"skewedColValueLocationMaps":{},"setSkewedColNames":true,"setSkewedColValues":true,"setSkewedColValueLocationMaps":true,"skewedColNamesSize":0,"skewedColNamesIterator":[],"skewedColValuesSize":0,"skewedColValuesIterator":[],"skewedColValueLocationMapsSize":0},"storedAsSubDirectories":false,"colsSize":5,"setParameters":true,"setLocation":true,"setInputFormat":true,"parametersSize":0,"setCols":true,"colsIterator":[{"name":"student_id","type":"int","comment":null,"setType":true,"setName":true,"setComment":false},{"name":"firstname","type":"string","comment":null,"setType":true,"setName":true,"setComment":false},{"name":"lastname","type":"string","comment":null,"setType":true,"setName":true,"setComment":false},{"name":"year","type":"string","comment":null,"setType":true,"setName":true,"setComment":false},{"name":"major","type":"string","comment":null,"setType":true,"setName":true,"setComment":false}],"setSkewedInfo":true,"setOutputFormat":true,"setCompressed":false,"setNumBuckets":true,"bucketColsSize":0,"bucketColsIterator":[],"sortColsSize":0,"sortColsIterator":[],"setStoredAsSubDirectories":true,"setSortCols":true,"setSerdeInfo":true,"setBucketCols":true},"partitionKeys":[],"parameters":{"totalSize":"62","EXTERNAL":"TRUE","numFiles":"1","transient_lastDdlTime":"1647683673","bucketing_version":"2","comment":"Student Names"},"viewOriginalText":null,"viewExpandedText":null,"tableType":"EXTERNAL_TABLE","privileges":{"userPrivileges":{"pliu":[{"privilege":"INSERT","createTime":-1,"grantor":"pliu","grantorType":"USER","grantOption":true,"setPrivilege":true,"setGrantOption":true,"setCreateTime":true,"setGrantor":true,"setGrantorType":true},{"privilege":"SELECT","createTime":-1,"grantor":"pliu","grantorType":"USER","grantOption":true,"setPrivilege":true,"setGrantOption":true,"setCreateTime":true,"setGrantor":true,"setGrantorType":true},{"privilege":"UPDATE","createTime":-1,"grantor":"pliu","grantorType":"USER","grantOption":true,"setPrivilege":true,"setGrantOption":true,"setCreateTime":true,"setGrantor":true,"setGrantorType":true},{"privilege":"DELETE","createTime":-1,"grantor":"pliu","grantorType":"USER","grantOption":true,"setPrivilege":true,"setGrantOption":true,"setCreateTime":true,"setGrantor":true,"setGrantorType":true}]},"groupPrivileges":null,"rolePrivileges":null,"setUserPrivileges":true,"setGroupPrivileges":false,"setRolePrivileges":false,"userPrivilegesSize":1,"groupPrivilegesSize":0,"rolePrivilegesSize":0},"temporary":false,"rewriteEnabled":false,"creationMetadata":null,"catName":"hive","ownerType":"USER","partitionKeysSize":0,"setCatName":true,"setParameters":true,"setPartitionKeys":true,"setSd":true,"setPrivileges":true,"setDbName":true,"setTableName":true,"setCreateTime":true,"setLastAccessTime":false,"parametersSize":6,"setRetention":false,"partitionKeysIterator":[],"setTemporary":true,"setRewriteEnabled":false,"setOwner":true,"setViewOriginalText":false,"setViewExpandedText":false,"setTableType":true,"setCreationMetadata":false,"setOwnerType":true}"""
tmp_table = json.loads(msg)
print(tmp_table['tableName'])
print(tmp_table["dbName"])
if __name__ == "__main__":
main()
| 350.615385 | 4,394 | 0.753401 | import json
def main():
msg = """{"tableName":"students","dbName":"default","owner":"pliu","createTime":1647683673,"lastAccessTime":0,"retention":0,"sd":{"cols":[{"name":"student_id","type":"int","comment":null,"setType":true,"setName":true,"setComment":false},{"name":"firstname","type":"string","comment":null,"setType":true,"setName":true,"setComment":false},{"name":"lastname","type":"string","comment":null,"setType":true,"setName":true,"setComment":false},{"name":"year","type":"string","comment":null,"setType":true,"setName":true,"setComment":false},{"name":"major","type":"string","comment":null,"setType":true,"setName":true,"setComment":false}],"location":"file:/home/pliu/hive_data/sample_data","inputFormat":"org.apache.hadoop.mapred.TextInputFormat","outputFormat":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","compressed":false,"numBuckets":-1,"serdeInfo":{"name":null,"serializationLib":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","parameters":{"field.delim":",","serialization.format":","},"description":null,"serializerClass":null,"deserializerClass":null,"serdeType":null,"setParameters":true,"parametersSize":2,"setName":false,"setDescription":false,"setSerdeType":false,"setSerializationLib":true,"setSerializerClass":false,"setDeserializerClass":false},"bucketCols":[],"sortCols":[],"parameters":{},"skewedInfo":{"skewedColNames":[],"skewedColValues":[],"skewedColValueLocationMaps":{},"setSkewedColNames":true,"setSkewedColValues":true,"setSkewedColValueLocationMaps":true,"skewedColNamesSize":0,"skewedColNamesIterator":[],"skewedColValuesSize":0,"skewedColValuesIterator":[],"skewedColValueLocationMapsSize":0},"storedAsSubDirectories":false,"colsSize":5,"setParameters":true,"setLocation":true,"setInputFormat":true,"parametersSize":0,"setCols":true,"colsIterator":[{"name":"student_id","type":"int","comment":null,"setType":true,"setName":true,"setComment":false},{"name":"firstname","type":"string","comment":null,"setType":true,"setName":true,"setComment":false},{"name":"lastname","type":"string","comment":null,"setType":true,"setName":true,"setComment":false},{"name":"year","type":"string","comment":null,"setType":true,"setName":true,"setComment":false},{"name":"major","type":"string","comment":null,"setType":true,"setName":true,"setComment":false}],"setSkewedInfo":true,"setOutputFormat":true,"setCompressed":false,"setNumBuckets":true,"bucketColsSize":0,"bucketColsIterator":[],"sortColsSize":0,"sortColsIterator":[],"setStoredAsSubDirectories":true,"setSortCols":true,"setSerdeInfo":true,"setBucketCols":true},"partitionKeys":[],"parameters":{"totalSize":"62","EXTERNAL":"TRUE","numFiles":"1","transient_lastDdlTime":"1647683673","bucketing_version":"2","comment":"Student Names"},"viewOriginalText":null,"viewExpandedText":null,"tableType":"EXTERNAL_TABLE","privileges":{"userPrivileges":{"pliu":[{"privilege":"INSERT","createTime":-1,"grantor":"pliu","grantorType":"USER","grantOption":true,"setPrivilege":true,"setGrantOption":true,"setCreateTime":true,"setGrantor":true,"setGrantorType":true},{"privilege":"SELECT","createTime":-1,"grantor":"pliu","grantorType":"USER","grantOption":true,"setPrivilege":true,"setGrantOption":true,"setCreateTime":true,"setGrantor":true,"setGrantorType":true},{"privilege":"UPDATE","createTime":-1,"grantor":"pliu","grantorType":"USER","grantOption":true,"setPrivilege":true,"setGrantOption":true,"setCreateTime":true,"setGrantor":true,"setGrantorType":true},{"privilege":"DELETE","createTime":-1,"grantor":"pliu","grantorType":"USER","grantOption":true,"setPrivilege":true,"setGrantOption":true,"setCreateTime":true,"setGrantor":true,"setGrantorType":true}]},"groupPrivileges":null,"rolePrivileges":null,"setUserPrivileges":true,"setGroupPrivileges":false,"setRolePrivileges":false,"userPrivilegesSize":1,"groupPrivilegesSize":0,"rolePrivilegesSize":0},"temporary":false,"rewriteEnabled":false,"creationMetadata":null,"catName":"hive","ownerType":"USER","partitionKeysSize":0,"setCatName":true,"setParameters":true,"setPartitionKeys":true,"setSd":true,"setPrivileges":true,"setDbName":true,"setTableName":true,"setCreateTime":true,"setLastAccessTime":false,"parametersSize":6,"setRetention":false,"partitionKeysIterator":[],"setTemporary":true,"setRewriteEnabled":false,"setOwner":true,"setViewOriginalText":false,"setViewExpandedText":false,"setTableType":true,"setCreationMetadata":false,"setOwnerType":true}"""
tmp_table = json.loads(msg)
print(tmp_table['tableName'])
print(tmp_table["dbName"])
if __name__ == "__main__":
main()
| true | true |
1c3799d44bc7ac749bd8a851ed33c9e6a417e9f2 | 1,774 | py | Python | UserCode/jzhang/bubble_finder_test.py | RunzZhang/SBCcode | e75b8e751cec5fb2c28950edef0c82f005caedcb | [
"MIT"
] | 4 | 2018-08-27T18:02:34.000Z | 2020-06-09T21:19:04.000Z | UserCode/jzhang/bubble_finder_test.py | RunzZhang/SBCcode | e75b8e751cec5fb2c28950edef0c82f005caedcb | [
"MIT"
] | null | null | null | UserCode/jzhang/bubble_finder_test.py | RunzZhang/SBCcode | e75b8e751cec5fb2c28950edef0c82f005caedcb | [
"MIT"
] | 4 | 2019-06-20T21:36:26.000Z | 2020-11-10T17:23:14.000Z | # python sbc_pmttest_processall.py [run_list]
# if run_list is provided, the runs in the list will be processed; otherwise
# the runs in the script will be processed
import SBCcode.AnalysisModules.ImageAnalysis as ia
import SBCcode.DataHandling.WriteBinary as wb
import numpy as np
# import SBCcode as sbc
import os
import re
import sys
# datadir = '/bluearc/storage/SBC-17-data'
#recondir = '/bluearc/storage/recon/devel/SBC-15/output'
datadir = '/mnt/XENON_DAQ/SBC-17-data'
recondir = '.'
# ~ runlist = os.listdir(datadir)
# ~ runlist = filter(lambda fn: (not re.search('^\d+_\d+$', fn) is None) and
# ~ os.path.isdir(os.path.join(datadir, fn)),
# ~ runlist)
# ~ runlist = filter(lambda fn: os.path.exists(os.path.join(datadir,
# ~ *[fn, 'DAQversion.txt'])), runlist)
if len(sys.argv) > 1:
runlist = sys.argv[1:]
else:
runlist = ['20170625_0']
for runname in runlist:
runid_str = runname.split('_')
runid = np.int32(runid_str)
rundir = os.path.join(datadir,runname)
eventdirlist = os.listdir(rundir)
eventdirlist = filter(lambda fn: (not re.search('^\d+$', fn) is None) and
os.path.isdir(os.path.join(rundir, fn)),
eventdirlist)
eventlist = [int(x) for x in list(eventdirlist)]
eventlist = [21]
if not os.path.isdir(recondir):
os.mkdir(recondir)
bubbleList = []
for ev in eventlist:
bubbleList.append(ia.BubbleFinder(os.path.join(datadir,runname), ev,
12, 3, 15, 4).bubbles)
#print(bubbleList)
wb.WriteBinaryNtupleFile(os.path.join(recondir,'ImageAnalysis_' + runname + '.bin'), bubbleList,
rowdef=1, initialkeys=['runid', 'ev'], drop_first_dim=True)
| 34.784314 | 100 | 0.638106 |
import SBCcode.AnalysisModules.ImageAnalysis as ia
import SBCcode.DataHandling.WriteBinary as wb
import numpy as np
import os
import re
import sys
datadir = '/mnt/XENON_DAQ/SBC-17-data'
recondir = '.'
if len(sys.argv) > 1:
runlist = sys.argv[1:]
else:
runlist = ['20170625_0']
for runname in runlist:
runid_str = runname.split('_')
runid = np.int32(runid_str)
rundir = os.path.join(datadir,runname)
eventdirlist = os.listdir(rundir)
eventdirlist = filter(lambda fn: (not re.search('^\d+$', fn) is None) and
os.path.isdir(os.path.join(rundir, fn)),
eventdirlist)
eventlist = [int(x) for x in list(eventdirlist)]
eventlist = [21]
if not os.path.isdir(recondir):
os.mkdir(recondir)
bubbleList = []
for ev in eventlist:
bubbleList.append(ia.BubbleFinder(os.path.join(datadir,runname), ev,
12, 3, 15, 4).bubbles)
wb.WriteBinaryNtupleFile(os.path.join(recondir,'ImageAnalysis_' + runname + '.bin'), bubbleList,
rowdef=1, initialkeys=['runid', 'ev'], drop_first_dim=True)
| true | true |
1c379ab34d3650c0fbce680f9a59effe54787ad0 | 857 | py | Python | restriction_sites.py | Tiago-Minuzzi/lab-stuff | b4cbca8c578e3cc4035df5686254d9254a876413 | [
"MIT"
] | null | null | null | restriction_sites.py | Tiago-Minuzzi/lab-stuff | b4cbca8c578e3cc4035df5686254d9254a876413 | [
"MIT"
] | null | null | null | restriction_sites.py | Tiago-Minuzzi/lab-stuff | b4cbca8c578e3cc4035df5686254d9254a876413 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created by Tiago Minuzzi
"""
import sys
from Bio import SeqIO
from Bio.Restriction import *
INFILE=sys.argv[1]
with open(INFILE) as fasta:
for record in SeqIO.parse(fasta, 'fasta'):
fid = record.id
sequencia = record.seq
tamanho = len(record.seq)
# Find restriction sites
sitiosHIII = HindIII.search(sequencia)
sitiosERI = EcoRI.search(sequencia)
allsites= sitiosHIII+sitiosERI
allsites=list(set(allsites))
allsites.sort()
allsites.insert(0,0)
for i,j in zip(allsites,allsites[1:]+[None]):
sitio=f'{i+1}:{j}'
sitio=sitio.replace('None',str(len(sequencia)))
corte=sequencia[i:j]
tam=len(corte)
print(f'>{fid}|pos={sitio}|length={tam}\n{corte}')
| 28.566667 | 62 | 0.590432 |
import sys
from Bio import SeqIO
from Bio.Restriction import *
INFILE=sys.argv[1]
with open(INFILE) as fasta:
for record in SeqIO.parse(fasta, 'fasta'):
fid = record.id
sequencia = record.seq
tamanho = len(record.seq)
sitiosHIII = HindIII.search(sequencia)
sitiosERI = EcoRI.search(sequencia)
allsites= sitiosHIII+sitiosERI
allsites=list(set(allsites))
allsites.sort()
allsites.insert(0,0)
for i,j in zip(allsites,allsites[1:]+[None]):
sitio=f'{i+1}:{j}'
sitio=sitio.replace('None',str(len(sequencia)))
corte=sequencia[i:j]
tam=len(corte)
print(f'>{fid}|pos={sitio}|length={tam}\n{corte}')
| true | true |
1c379b44da55f2f427dc8bdf12f7b203223a0aba | 2,990 | py | Python | run.py | felix2072/pytorch-CycleGAN-and-pix2pix | 4980106ceab5e1eb7bb20c2b492d007b6310d9e1 | [
"BSD-3-Clause"
] | null | null | null | run.py | felix2072/pytorch-CycleGAN-and-pix2pix | 4980106ceab5e1eb7bb20c2b492d007b6310d9e1 | [
"BSD-3-Clause"
] | null | null | null | run.py | felix2072/pytorch-CycleGAN-and-pix2pix | 4980106ceab5e1eb7bb20c2b492d007b6310d9e1 | [
"BSD-3-Clause"
] | null | null | null | import socket
from options.test_options import TestOptions
from data import create_dataset
from models import create_model
import time
from util import util
UDP_IP = "127.0.0.1"
OUT_PORT = 5004
IN_PORT = 5005
buf = 1024
timeout = 3
if __name__ == '__main__':
opt = TestOptions().parse() # get test options
# hard-code some parameters for test
opt.num_threads = 0 # test code only supports num_threads = 0
opt.batch_size = 1 # test code only supports batch_size = 1
opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed.
opt.no_flip = True # no flip; comment this line if results on flipped images are needed.
opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file.
model = create_model(opt) # create a model given opt.model and other options
model.setup(opt) # regular setup: load and print networks; create schedulers
while True:
print ("-------------------------")
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
time.sleep(0.03)
info = "need file"
print(info)
sock.sendto(info.encode(), (UDP_IP, OUT_PORT))
time.sleep(0.04)
info = "end"
print(info)
sock.sendto(info.encode(), (UDP_IP, OUT_PORT))
sock.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((UDP_IP, IN_PORT))
while True:
data, addr = sock.recvfrom(1024)
if data.decode("utf-8") == "image saved":
print("load base image")
break
dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
#print("dataset :%s was created" % dataset)
for i, data in enumerate(dataset):
if i >= opt.num_test: # only apply our model to opt.num_test images.
break
model.set_input(data) # unpack data from data loader
model.test(i) # run inference
visuals = model.get_current_visuals() # get image results
for label, im_data in visuals.items():
im = util.tensor2im(im_data)
save_path = './datasets/{}/test_6result/fake{}.jpg'.format(opt.name,i)
util.save_image(im, save_path, aspect_ratio=opt.aspect_ratio)
info = "fake is ready"
print(info)
sock.sendto(info.encode(), (UDP_IP, OUT_PORT))
time.sleep(0.04)
info = "end"
print(info)
sock.sendto(info.encode(), (UDP_IP, OUT_PORT))
sock.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((UDP_IP, IN_PORT))
while True:
print("wait for VL to load image")
data, addr = sock.recvfrom(1024)
if data.decode("utf-8") == "got fake":
print("fake image")
break
| 36.463415 | 123 | 0.600669 | import socket
from options.test_options import TestOptions
from data import create_dataset
from models import create_model
import time
from util import util
UDP_IP = "127.0.0.1"
OUT_PORT = 5004
IN_PORT = 5005
buf = 1024
timeout = 3
if __name__ == '__main__':
opt = TestOptions().parse()
opt.num_threads = 0
opt.batch_size = 1
opt.serial_batches = True
opt.no_flip = True
opt.display_id = -1
model = create_model(opt)
model.setup(opt)
while True:
print ("-------------------------")
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
time.sleep(0.03)
info = "need file"
print(info)
sock.sendto(info.encode(), (UDP_IP, OUT_PORT))
time.sleep(0.04)
info = "end"
print(info)
sock.sendto(info.encode(), (UDP_IP, OUT_PORT))
sock.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((UDP_IP, IN_PORT))
while True:
data, addr = sock.recvfrom(1024)
if data.decode("utf-8") == "image saved":
print("load base image")
break
dataset = create_dataset(opt)
for i, data in enumerate(dataset):
if i >= opt.num_test:
break
model.set_input(data)
model.test(i)
visuals = model.get_current_visuals()
for label, im_data in visuals.items():
im = util.tensor2im(im_data)
save_path = './datasets/{}/test_6result/fake{}.jpg'.format(opt.name,i)
util.save_image(im, save_path, aspect_ratio=opt.aspect_ratio)
info = "fake is ready"
print(info)
sock.sendto(info.encode(), (UDP_IP, OUT_PORT))
time.sleep(0.04)
info = "end"
print(info)
sock.sendto(info.encode(), (UDP_IP, OUT_PORT))
sock.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((UDP_IP, IN_PORT))
while True:
print("wait for VL to load image")
data, addr = sock.recvfrom(1024)
if data.decode("utf-8") == "got fake":
print("fake image")
break
| true | true |
1c379c378fffabea367feee717680ccc02e4754d | 200,004 | py | Python | tests/api_test.py | SCiarella/jax | a7c9b6d11fa833c748d72b3ccc11baeed9c0248c | [
"Apache-2.0"
] | null | null | null | tests/api_test.py | SCiarella/jax | a7c9b6d11fa833c748d72b3ccc11baeed9c0248c | [
"Apache-2.0"
] | 6 | 2022-01-03T08:14:15.000Z | 2022-02-14T08:13:40.000Z | tests/api_test.py | SCiarella/jax | a7c9b6d11fa833c748d72b3ccc11baeed9c0248c | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import collections.abc
from contextlib import contextmanager
import copy
import enum
from functools import partial
import operator
import re
import subprocess
import sys
import types
import unittest
import warnings
import weakref
import functools
import itertools as it
import operator as op
from absl import logging
from absl.testing import absltest, parameterized
import numpy as np
import concurrent.futures
import jax
import jax.numpy as jnp
from jax import float0, jit, grad, device_put, jacfwd, jacrev, hessian
from jax import core, dtypes, lax
from jax._src import api
from jax.core import Primitive
from jax.errors import UnexpectedTracerError
from jax.interpreters import ad
from jax.interpreters import xla
from jax.interpreters import pxla
from jax.interpreters.sharded_jit import PartitionSpec as P
import jax._src.lib
from jax._src.lib import xla_client
from jax._src import test_util as jtu
from jax import tree_util
from jax import linear_util as lu
import jax._src.util
from jax._src.ad_checkpoint import saved_residuals
from jax.ad_checkpoint import checkpoint as new_checkpoint, checkpoint_name
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
python_version = (sys.version_info[0], sys.version_info[1])
numpy_version = tuple(map(int, np.__version__.split('.')[:3]))
class CPPJitTest(jtu.BufferDonationTestCase):
"""Shared tests between the Python and the C++ jax,jit implementations.
Because the Python implementation supports more features, we need to have the
Python tests that extend the C++ tests (and not the other way around).
"""
@property
def jit(self):
# Right now, the CPP tests also test the Python code-path when jaxlib is
# too old.
# TODO(jblespiau,phawkins): Remove this when jaxlib has been released.
# This is in the future, because we are making a breaking change to
# Tensorflow.
return api._cpp_jit
@unittest.skipIf(jax._src.lib._xla_extension_version < 40,
"Test requires jaxlib 0.1.73")
def test_jit_repr(self):
def my_function():
return
jitted = jit(my_function)
self.assertEqual(repr(jitted), f"<CompiledFunction of {repr(my_function)}>")
@unittest.skipIf(jax._src.lib._xla_extension_version < 40,
"Test requires jaxlib 0.1.73")
def test_jit_repr_errors(self):
class Callable:
def __call__(self): pass
def __repr__(self):
raise ValueError("invalid repr")
# repr succeeds when underlying function repr fails.
jitted = jit(Callable())
self.assertEqual(repr(jitted), "<CompiledFunction>")
# repr succeeds when object is malformed.
del jitted.__wrapped__
self.assertEqual(repr(jitted), "<CompiledFunction>")
def test_jit_of_noncallable(self):
self.assertRaisesRegex(TypeError, "Expected a callable value.*",
lambda: self.jit(3))
def test_jit_of_generator(self):
def gen(x):
yield x
self.assertRaisesRegex(TypeError,
"Expected a function, got a generator function.*",
lambda: self.jit(gen))
@parameterized.parameters([
# Integer support
(1, 2, 3, 4, 5),
# Numpy array support
(
np.asarray(1, np.int32),
np.asarray(2, np.int32),
np.asarray(3, np.int32),
np.asarray(4, np.int32),
np.asarray(5, np.int32),
),
])
def test_jit_static_args(self, one, two, three, four, five):
side = []
def f(x, y, z, flag=False, flag2=False):
del flag2 # unused
assert flag
side.append(None)
return 100 * x + 10 * y + z
f1 = self.jit(f, static_argnums=(3, 4))
assert f1(one, two, three, True, False) == 123
assert len(side) == 1
assert f1(one, two, three, True, False) == 123
assert len(side) == 1 # Obvious cache hit.
assert f1(two, one, three, True, False) == 213
assert len(side) == 1 # Should cache hit because same signature.
assert f1(two, one, three, True, True) == 213
assert len(side) == 2
side[:] = []
f2 = self.jit(f, static_argnums=(0, 2, 3, 4))
assert f2(1, 2, 3, True, False) == 123
assert len(side) == 1
assert f2(1, 3, 3, True, False) == 133
assert len(side) == 1
assert f2(2, 2, 3, True, False) == 223
assert len(side) == 2
assert f2(2, 4, 3, True, False) == 243
assert len(side) == 2
assert f2(2, 4, 3, True, True) == 243
assert len(side) == 3
assert f2(2, 5, 3, True, True) == 253
assert len(side) == 3
def test_static_args_equality(self):
class A():
def __hash__(self):
return 1
def __eq__(self, other):
return isinstance(other, A)
side = []
def f(x, static_arg):
del static_arg
side.append(None)
return x * 100
f1 = self.jit(f, static_argnums=(1,))
self.assertEqual(f1(1, A()), 100)
self.assertLen(side, 1)
self.assertEqual(f1(1, A()), 100)
self.assertLen(side, 1)
if self.jit == api._cpp_jit:
f1_cpp = getattr(f1, "_cpp_jitted_f", f1)
self.assertEqual(f1_cpp._cache_size(), 1)
@parameterized.parameters([
(1, 2, 3),
(
np.asarray(1, np.int32),
np.asarray(2, np.int32),
np.asarray(3, np.int32),
),
])
def test_jit_kwargs(self, one, two, three):
side = []
# For the CPP jit, we need to clear the cache to prevent cache hits between
# parameterized tests.
if hasattr(self.jit, "cache_clear"):
self.jit.cache_clear()
def f(x, y, z):
side.append(None)
return 100 * x + 10 * y + z
f = self.jit(f)
assert f(one, two, three) == 123
assert len(side) == 1
assert f(one, two, three) == 123
assert len(side) == 1
assert f(one, two, z=three) == 123
assert len(side) == 2 # actually recompiles from kwarg
assert f(one, two, z=three) == 123
assert len(side) == 2 # but should still cache
f(one, two, z=np.zeros(3)) # doesn't crash
if config.x64_enabled:
# In the above call, three is of a new type (int64), thus it should
# trigger a new compilation.
assert len(side) == 3
def test_jit_device(self):
device = jax.devices()[-1]
x = self.jit(lambda x: x, device=device)(3.)
self.assertIsInstance(x, xla.DeviceArray)
self.assertEqual(x.device_buffer.device(), device)
def test_complex_support(self):
self.assertEqual(self.jit(lambda x: x + 1)(1 + 1j), 2 + 1j)
def test_jit_with_many_args_works(self):
@self.jit
def f(args_list):
return sum(args_list)
self.assertEqual(f(list(range(500))), sum(range(500)))
# Jit and Donate arguments
def test_jit_donate_argnums_warning_raised(self):
x = jnp.array([1.0, 2.0], jnp.float32)
y = jnp.array([1, 2], jnp.int32)
f = self.jit(lambda x, y: x.sum() + y.sum(), donate_argnums=(0, 1))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
f(x, y)
self.assertLen(w, 1)
self.assertTrue(issubclass(w[-1].category, UserWarning))
self.assertIn(
"Some donated buffers were not usable: f32[2]{0}, s32[2]{0}",
str(w[-1].message))
@jtu.skip_on_devices("cpu") # In/out aliasing not supported on CPU.
def test_jit_donate_argnums_invalidates_input(self):
# We can't just use `lambda x: x` because JAX simplifies this away to an
# empty XLA computation.
move = self.jit(lambda x: x + x - x, donate_argnums=0)
x = jnp.ones([])
y = move(x)
self.assertDeleted(x)
self.assertEqual(y, 1.)
@jtu.skip_on_devices("cpu") # In/out aliasing not supported on CPU.
def test_jit_donate_argnums_static_argnums(self):
jit_fun = self.jit(
lambda a, b, c, d: ((a + b + c), (a + b + d)),
static_argnums=(0, 1),
donate_argnums=(2, 3))
c = jax.device_put(jnp.array([1., 1.]))
d = jax.device_put(jnp.array([1., 1., 1.]))
e, f = jit_fun(1, 2, c, d)
np.testing.assert_allclose(e, jnp.array([4., 4.]))
np.testing.assert_allclose(f, jnp.array([4., 4., 4.]))
self.assertDeleted(c)
self.assertDeleted(d)
@jtu.skip_on_devices("cpu") # In/out aliasing not supported on CPU.
def test_jnp_array_copy(self):
# https://github.com/google/jax/issues/3412
@partial(self.jit, donate_argnums=(0,))
def _test(array):
return array.at[0].set(77)
x = jnp.asarray([0, 1])
x_copy = jnp.array(x, copy=True)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_test(x) # donation
# Gives: RuntimeError: Invalid argument: CopyToHostAsync() called on invalid buffer.
print(x_copy) # doesn't crash
def test_jit_global_cache(self):
def f(x):
assert python_should_be_executing
return x
python_should_be_executing = True
self.jit(f)(2)
python_should_be_executing = False
self.jit(f)(3)
def test_jit_shallow_copy(self):
def f(x):
return copy.copy(x)
self.jit(f)(1)
def test_jit_deep_copy(self):
def f(x):
return copy.deepcopy(x)
self.jit(f)(1)
def test_disable_jit(self):
effects = []
@self.jit
def f(x):
effects.append(1)
return x
with api.disable_jit():
f(2)
f(2)
assert len(effects) == 2
f(2)
f(2)
assert len(effects) == 3
def test_static_argnum_on_method(self):
class A:
@functools.partial(self.jit, static_argnums=(0,))
def my_func_jit(self, x):
return x+2
A().my_func_jit(3)
def test_static_argnum_on_static_method_is_not_supported(self):
with self.assertRaisesRegex(TypeError, "Expected a callable value"):
class A:
@functools.partial(self.jit, static_argnums=(0,))
@classmethod
def my_classmethod_jit(cls, x):
return x+2
def test_staticmethod_is_not_supported(self):
with self.assertRaisesRegex(TypeError,
"staticmethod arguments are not supported"):
class A:
@functools.partial(self.jit)
@staticmethod
def my_staticmethod_jit(x):
return x + 2
def test_concurrent_jit(self):
@self.jit
def f(x):
return x + x - 3.
xs = [np.random.randn(i) for i in range(10)]
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = [executor.submit(partial(f, x)) for x in xs]
ys = [f.result() for f in futures]
for x, y in zip(xs, ys):
self.assertAllClose(x * 2 - 3., y)
def test_trivial_computations(self):
x = jnp.array([1, 2, 3])
y = self.jit(lambda x: x)(x)
self.assertIs(x, y)
z1, z2 = self.jit(lambda x: (x, x))(x)
self.assertIs(z1, z2)
x1, x2 = jnp.array([1, 2]), jnp.array([2, 3])
z1, z2, z3 = self.jit(lambda x, y: (y, 1, x))(x1, x2)
self.assertIs(z1, x2)
self.assertIs(z3, x1)
self.assertEqual(z2, 1)
def test_trivial_computations_with_tokens(self):
@self.jit
def noop(arr, token):
return arr, token
arr = jax.numpy.ones(10)
token = jax.lax.create_token()
self.assertEqual(token, noop(arr, token)[1])
def test_jit_bad_input(self):
def f(x):
return x
self.assertRaisesRegex(
TypeError, ".* 'foo' of type <.*'str'> is not a valid JAX type",
lambda: self.jit(f)("foo"))
def test_jit_on_all_devices(self):
# Verifies we can run the same computation on every device present, even
# if they are, for example, different models of GPU.
data = np.random.rand(1000).astype(np.float32)
f = self.jit(jnp.negative)
for device in jax.local_devices():
x = device_put(data, device=device)
np.testing.assert_array_equal(-data, f(x))
def test_jit_nested_donate_ignored(self):
jit_fun = self.jit(lambda x: self.jit(lambda y: y**2, donate_argnums=0)(x))
a = jax.device_put(jnp.array(1))
# NOTE(mattjj): stopped raising error here and instead just ignored
# with self.assertRaisesRegex(ValueError, "nested.*not supported"):
# jit_fun(a)
jit_fun(a) # doesn't crash
def test_jit_reference_dropping(self):
x = jnp.ones(10)
f = (lambda x: lambda: x)(x) # reference to x in f's closure
g = self.jit(f)
x = weakref.ref(x) # no more strong ref to x in this scope
assert x() is not None # x is still around
f() # f runs
g() # g runs
g() # g runs a second time
del f # delete the raw callable
assert x() is not None # x is still around
g() # g still runs
del g # no more references to x
assert x() is None # x is gone
def test_jit_raises_on_first_invocation_on_non_hashable_static_argnum(self):
if self.jit != api._python_jit:
raise unittest.SkipTest("this test only applies to _python_jit")
f = lambda x, y: x + 3
jitted_f = self.jit(f, static_argnums=(1,))
msg = ("Non-hashable static arguments are not supported, as this can lead "
"to unexpected cache-misses. Static argument (index 1) of type "
"<class 'numpy.ndarray'> for function <lambda> is non-hashable.")
with self.assertRaisesRegex(ValueError, re.escape(msg)):
jitted_f(1, np.asarray(1))
def test_cpp_jit_raises_on_non_hashable_static_argnum(self):
if self.jit != api._cpp_jit:
raise unittest.SkipTest("this test only applies to _cpp_jit")
f = lambda x, y: x + 3
jitted_f = api._cpp_jit(f, static_argnums=[1])
jitted_f(1, 1)
msg = ("Non-hashable static arguments are not supported. An error occured "
".*while trying to hash an object of type "
"<class 'numpy\\.ndarray'>, 1. The error was:\nTypeError: "
"unhashable type: 'numpy\\.ndarray'")
with self.assertRaisesRegex(ValueError, msg):
jitted_f(1, np.asarray(1))
class HashableWithoutEq:
def __hash__(self):
return 1
def __eq__(self, other):
raise NotImplementedError(
"A Python error is as is, without stack trace")
with self.assertRaisesRegex(
ValueError,
re.escape("static arguments should be comparable using __eq__")):
jitted_f(1, HashableWithoutEq())
def test_cpp_jitted_function_returns_PyBuffer(self):
if self.jit != api._cpp_jit:
raise unittest.SkipTest("this test only applies to _cpp_jit")
jitted_f = self.jit(lambda a: a + 1)
jitted_f(1)
self.assertIsInstance(jitted_f(2), xla._CppDeviceArray)
@jtu.skip_on_devices("cpu")
def test_explicit_backend(self):
f = lambda x: x + 1
jitted_f = jit(f, backend=jtu.device_under_test())
jitted_f_cpu = jit(f, backend="cpu")
result = jitted_f(1.)
result_cpu = jitted_f_cpu(1.)
self.assertEqual(result.device_buffer.platform(), jtu.device_under_test())
self.assertEqual(result_cpu.device_buffer.platform(), "cpu")
@jtu.skip_on_devices("cpu")
def test_device_to_device_copy_between_backends(self):
# b/186624243
f = lambda x: x + 1
jitted_f = jit(f, backend=jtu.device_under_test())
jitted_f_cpu = jit(f, backend="cpu")
x = np.arange(30).reshape(1, 10, 3)
result = jitted_f(x)
result_cpu = jitted_f_cpu(result)
result_2 = jitted_f(result_cpu)
result_cpu_2 = jitted_f_cpu(result_2)
self.assertAllClose(result_2, x + 3)
self.assertAllClose(result_cpu_2, x + 4)
@jtu.skip_on_devices("cpu")
def test_mismatched_nested_backends(self):
@partial(jit, backend=jtu.device_under_test())
def f(x):
return jit(lambda x: x + 1, backend="cpu")(x)
with self.assertRaisesRegex(
ValueError,
f"Outer-jit backend specification {jtu.device_under_test()} must match "
f"explicit inner-jit backend specification cpu."):
f(1.)
def test_omnistaging(self):
# See https://github.com/google/jax/issues/5206
# TODO(frostig): remove once we always enable_custom_prng
def _prng_key_as_array(key):
return key.unsafe_raw_array() if config.jax_enable_custom_prng else key
# TODO(frostig): remove once we always enable_custom_prng
def _array_as_prng_key(arr):
arr = np.array(arr, dtype=np.uint32)
if config.jax_enable_custom_prng:
return jax._src.prng.PRNGKeyArray(
jax._src.prng.threefry_prng_impl, arr)
else:
return arr
key_list = [None]
def init():
key, subkey = jax.random.split(key_list[0])
key_list[0] = key
return jax.random.normal(subkey, ())
key_list[0] = _array_as_prng_key([2384771982, 3928867769])
init()
self.jit(init)()
self.assertIsInstance(_prng_key_as_array(key_list[0]), core.Tracer)
def test_jit_wrapped_attributes(self):
def f(x: int) -> int:
"""docstring of f."""
return x + 1
f.some_value = 4
jf = self.jit(f)
for attr in ["doc", "name", "module", "qualname", "annotations"]:
self.assertEqual(
{attr: getattr(f, f"__{attr}__")},
{attr: getattr(jf, f"__{attr}__")})
self.assertEqual(f.some_value, jf.some_value)
def test_jit_python_builtin(self):
x = jnp.array([1, 2])
expected = x + 1
jit_add = self.jit(operator.add, static_argnums=(1,))
actual = jit_add(x, 1)
self.assertArraysEqual(expected, actual)
def test__infer_argnums_and_argnames(self):
def f(x, y=1):
pass
argnums, argnames = api._infer_argnums_and_argnames(
f, argnums=None, argnames=None)
assert argnums == ()
assert argnames == ()
argnums, argnames = api._infer_argnums_and_argnames(
f, argnums=0, argnames=None)
assert argnums == (0,)
assert argnames == ('x',)
argnums, argnames = api._infer_argnums_and_argnames(
f, argnums=None, argnames='y')
assert argnums == (1,)
assert argnames == ('y',)
argnums, argnames = api._infer_argnums_and_argnames(
f, argnums=0, argnames='y') # no validation
assert argnums == (0,)
assert argnames == ('y',)
def g(x, y, *args):
pass
argnums, argnames = api._infer_argnums_and_argnames(
g, argnums=(1, 2), argnames=None)
assert argnums == (1, 2)
assert argnames == ('y',)
def h(x, y, **kwargs):
pass
argnums, argnames = api._infer_argnums_and_argnames(
h, argnums=None, argnames=('foo', 'bar'))
assert argnums == ()
assert argnames == ('foo', 'bar')
def test_jit_with_static_argnames(self):
def f(x):
assert x == 'foo'
return 1
f_nums = self.jit(f, static_argnums=0)
assert f_nums('foo') == 1
assert f_nums(x='foo') == 1
f_names = self.jit(f, static_argnames='x')
assert f_names('foo') == 1
assert f_names(x='foo') == 1
def test_new_static_argnum_on_keyword_arguments(self):
f = self.jit(lambda x: x, static_argnums=0)
y = f(x=4)
assert y == 4
def test_new_static_argnum_with_default_arguments(self):
f = self.jit(lambda x=4: x, static_argnums=0)
y = f()
assert y == 4
def test_jit_with_mismatched_static_argnames(self):
x_is_tracer, y_is_tracer = False, False
def f(x, y):
assert isinstance(x, core.Tracer) == x_is_tracer
assert isinstance(y, core.Tracer) == y_is_tracer
return 1
# If both static_argnums and static_argnames are provided, they are allowed
# to disagree and `jit` will respect the user's choices.
f_nums = self.jit(f, static_argnums=1, static_argnames=())
x_is_tracer, y_is_tracer = True, False
assert f_nums(2, 'foo') == 1
x_is_tracer, y_is_tracer = True, True
assert f_nums(1, y=2) == 1
f_names = self.jit(f, static_argnums=(), static_argnames='y')
x_is_tracer, y_is_tracer = True, True
assert f_names(2, 3) == 1
x_is_tracer, y_is_tracer = True, False
assert f_names(1, y='foo') == 1
f_mixed = self.jit(f, static_argnums=(1,), static_argnames='x')
x_is_tracer, y_is_tracer = True, False
assert f_mixed(2, 'foo') == 1
x_is_tracer, y_is_tracer = True, True
assert f_mixed(1, y=3) == 1
x_is_tracer, y_is_tracer = False, True
assert f_mixed(x='foo', y=3) == 1
# TODO(zhangqiaorjc): Test pruning constants after DCE pass prunes primitive
# applications.
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_num_args={}".format(num_args),
"num_args": num_args}
for num_args in [2, 3, 4]))
def test_jit_with_pruned_args(self, num_args):
def f(*args):
used = np.array(2)
return args[1] + used
f_pruned = self.jit(f)
args = range(num_args)
with jtu.count_device_put() as count:
np.testing.assert_allclose(f_pruned(*args), 3)
self.assertEqual(count[0], 1)
@unittest.skipIf(jax._src.lib._xla_extension_version <= 36,
"Test requires jaxlib 0.1.71")
def testBuffersAreFreedPromptly(self):
# Regression test for a bug where garbage collection was delayed too long
# for NumPy buffers that are aliased zero-copy by the runtime.
@self.jit
def f(x):
return x + 1
refs = []
x = np.ones((10000,), np.float32)
for step in range(1000):
x = f(x)
refs.append(weakref.ref(x))
x = np.asarray(x)
# We expect most of the input buffers to have been garbage
# collected in parallel with the execution. We can't call
# block_until_ready() here because it would force a garbage collection.
live_refs = len([ref for ref in refs if ref() is not None])
self.assertLessEqual(live_refs, 100)
def test_jit_lower_compile(self):
def f(x):
return jnp.sqrt(x ** 2) + 1.
f_jit = self.jit(f)
f_low = f_jit.lower(1.)
f_exe = f_low.compile()
self.assertAllClose(f_exe(1.), 2.)
def test_jit_lower_compile_in_tree_mismatch(self):
def f(x):
return jnp.sqrt(x ** 2) + 1.
f_jit = self.jit(f)
f_low = f_jit.lower(1.)
f_exe = f_low.compile()
self.assertRaisesRegex(
TypeError, "function compiled for .*, called with .*",
lambda: f_exe([1.]))
def test_jit_lower_compile_trivial(self):
def f(x): return x
out = self.jit(f).lower(1.).compile()(4.)
self.assertAllClose(out, 4.)
def test_jit_lower_compile_trivial_in_tree_mismatch(self):
def f(x): return x
f_exe = self.jit(f).lower(1.).compile()
self.assertRaisesRegex(
TypeError, "function compiled for .*, called with .*",
lambda: f_exe([4.]))
def test_jit_lower_compile_arg_type_mismatch(self):
def f(x):
return jnp.sqrt(x ** 2) + 1.
x = jnp.array(1, dtype=int)
x_f32 = x.astype(jnp.float32)
x_i32 = x.astype(jnp.int32)
f_exe = self.jit(f).lower(x_f32).compile()
self.assertRaisesRegex(
TypeError,
"Computation compiled for input types:\n.*float32.*\n"
"called with:\n.*int32.*",
lambda: f_exe(x_i32))
def test_jit_lower_compile_multi_arg(self):
def f(*args):
x, *_ = args
return jnp.sqrt(x ** 2) + 1.
f_exe = self.jit(f).lower(1., 1.).compile()
self.assertAllClose(f_exe(1., 1.), 2.)
def test_jit_lower_compile_trivial_multi_arg(self):
def f(*args):
x, *_ = args
return x
f_exe = self.jit(f).lower(1., 1.).compile()
self.assertAllClose(f_exe(1., 1.), 1.)
class PythonJitTest(CPPJitTest):
@property
def jit(self):
return api._python_jit
class APITest(jtu.JaxTestCase):
def test_grad_bad_input(self):
def f(x):
return x
self.assertRaisesRegex(
TypeError, ".* 'foo' of type <.*'str'> is not a valid JAX type",
lambda: grad(f)("foo"))
def test_grad_argnums(self):
def f(x, y, z, flag=False):
assert flag
return 1.0 * x + 2.0 * y + 3.0 * z
assert grad(f)(1.0, 1.0, 1.0, flag=True) == 1.0
assert grad(f, argnums=1)(1.0, 1.0, 1.0, flag=True) == 2.0
assert grad(f, argnums=(2, 0))(1.0, 1.0, 1.0, flag=True) == (3.0, 1.0)
def test_value_and_grad_argnums(self):
def f(x, y, z, flag=False):
assert flag
return 1.0 * x + 2.0 * y + 3.0 * z
y = f(1.0, 1.0, 1.0, flag=True)
assert api.value_and_grad(f)(1.0, 1.0, 1.0, flag=True) == (y, 1.0)
assert api.value_and_grad(f, argnums=1)(1.0, 1.0, 1.0, flag=True) == (y, 2.0)
assert api.value_and_grad(f, argnums=(2, 0))(1.0, 1.0, 1.0, flag=True) == (y, (3.0, 1.0))
def test_grad_of_jit(self):
side = []
@jit
def f(x):
side.append(None)
return x * x
assert grad(f)(1.0) == 2.0
assert len(side) == 1
assert grad(f)(2.0) == 4.0
assert len(side) == 1
def test_jit_of_grad(self):
side = []
@jit
def f(x):
side.append(None)
return x * x
g = jit(grad(f))
assert g(1.0) == 2.0
assert len(side) == 1
assert g(2.0) == 4.0
assert len(side) == 1
def test_bad_input(self):
def f(x):
return x
self.assertRaisesRegex(
TypeError, ".* 'foo' of type <.*'str'> is not a valid JAX type",
lambda: grad(f)("foo"))
self.assertRaisesRegex(
TypeError, ".* 'foo' of type <.*'str'> is not a valid JAX type",
lambda: jit(f)("foo"))
def test_grad_tuple_output(self):
jtu.check_raises(lambda: grad(lambda x: (x,x))(1.0), TypeError,
"Gradient only defined for scalar-output functions. ")
def test_grad_unit_output(self):
jtu.check_raises(lambda: grad(lambda x: ())(np.zeros(3)), TypeError,
"Gradient only defined for scalar-output functions. ")
def test_grad_nonscalar_output(self):
jtu.check_raises(lambda: grad(lambda x: x)(np.zeros(3)), TypeError,
"Gradient only defined for scalar-output functions. ")
def test_unwrapped_numpy(self):
def f(x):
return np.exp(x)
with self.assertRaisesRegex(Exception, "The numpy.ndarray conversion .*"):
grad(f)(np.zeros(3))
def test_binop_mismatch(self):
def f(x, y):
return x + y
jtu.check_raises(
lambda: f(jnp.zeros(3), jnp.zeros(4)),
TypeError,
"add got incompatible shapes for broadcasting: (3,), (4,).")
jtu.check_raises(
lambda: grad(f)(np.zeros(3), np.zeros(4)),
TypeError,
"add got incompatible shapes for broadcasting: (3,), (4,).")
def test_dot_mismatch(self):
def f(x, y):
return jnp.dot(x, y)
self.assertRaisesRegex(
TypeError, "Incompatible shapes for dot: got \\(3L?,\\) and \\(4L?,\\).",
lambda: grad(f)(np.zeros(3), np.zeros(4)))
def test_abstract_error_message(self):
for castfun in [float, complex, int]:
def f(x):
return castfun(x)
self.assertRaisesRegex(
TypeError,
f"[Tt]ry using `x.astype\\({castfun.__name__}\\)`",
lambda: jit(f)(1.0))
def test_switch_value_jit(self):
def f(x):
y = x > 0
if y:
return x
else:
return -x
assert grad(f)(1.0) == 1.0
assert grad(f)(-1.0) == -1.0
with self.assertRaisesRegex(core.ConcretizationTypeError,
"Abstract tracer value"):
jit(f)(1)
def test_list_index_err(self):
L = [1, 2, 3]
def f(n):
return L[n]
assert jit(f, static_argnums=(0,))(0) == L[0]
self.assertRaisesRegex(
TypeError,
r"The __index__\(\) method was called on the JAX Tracer object.*",
lambda: jit(f)(0))
def test_range_err(self):
def f(x, n):
for i in range(n):
x = x + i
return x
assert jit(f, static_argnums=(1,))(0, 5) == 10
self.assertRaisesRegex(
TypeError,
r"The __index__\(\) method was called on the JAX Tracer object.*",
lambda: jit(f)(0, 5))
def test_cast_int(self):
f = lambda x: int(x)
self.assertRaisesRegex(
TypeError,
"('(?:JaxprTracer|DynamicJaxprTracer)' object cannot be interpreted as an integer"
"|Abstract tracer value encountered where concrete value is expected.*)", lambda: jit(f)(0))
def test_casts(self):
for castfun in [hex, oct]:
f = lambda x: castfun(x)
self.assertRaisesRegex(
TypeError,
r"The __index__\(\) method was called on the JAX Tracer object.*", lambda: jit(f)(0))
def test_unimplemented_interpreter_rules(self):
foo_p = Primitive('foo')
def foo(x):
return foo_p.bind(x)
jtu.check_raises(lambda: foo(1.0), NotImplementedError,
"Evaluation rule for 'foo' not implemented")
jtu.check_raises(lambda: jit(foo)(1.0), NotImplementedError,
"Abstract evaluation for 'foo' not implemented")
jtu.check_raises(lambda: grad(foo)(1.0), NotImplementedError,
"Differentiation rule for 'foo' not implemented")
foo_p.def_abstract_eval(lambda x: x)
jtu.check_raises(lambda: jit(foo)(1.0), NotImplementedError,
"XLA translation rule for primitive 'foo' not found")
foo_p.def_impl(lambda x: x)
ad.defjvp(foo_p, lambda g, x: foo(g))
jtu.check_raises(lambda: grad(foo)(1.0), NotImplementedError,
"Transpose rule (for reverse-mode differentiation) for 'foo' not implemented")
def test_is_subclass(self):
self.assertTrue(issubclass(xla.DeviceArray, jnp.ndarray))
self.assertTrue(issubclass(xla._CppDeviceArray, jnp.ndarray))
self.assertTrue(issubclass(pxla.ShardedDeviceArray, jnp.ndarray))
self.assertTrue(issubclass(pxla._ShardedDeviceArray, jnp.ndarray))
self.assertFalse(issubclass(np.ndarray, jnp.ndarray))
self.assertFalse(issubclass(xla.DeviceArray, np.ndarray))
self.assertFalse(issubclass(xla._CppDeviceArray, np.ndarray))
self.assertFalse(issubclass(pxla.ShardedDeviceArray, np.ndarray))
self.assertFalse(issubclass(pxla._ShardedDeviceArray, np.ndarray))
def test_is_instance(self):
def f(x):
self.assertIsInstance(x, jnp.ndarray)
self.assertNotIsInstance(x, np.ndarray)
return x + 2
jit(f)(3)
jax.vmap(f)(np.arange(3))
def test_device_put_and_get(self):
x = np.arange(12.).reshape((3, 4)).astype("float32")
dx = api.device_put(x)
self.assertIsInstance(dx, xla.DeviceArray)
self.assertIsInstance(dx, jnp.ndarray)
self.assertNotIsInstance(dx, np.ndarray)
x2 = api.device_get(dx)
self.assertNotIsInstance(x2, jnp.ndarray)
self.assertIsInstance(x2, np.ndarray)
assert np.all(x == x2)
y = [x, (2 * x, 3 * x)]
dy = api.device_put(y)
y2 = api.device_get(dy)
self.assertIsInstance(y2, list)
self.assertIsInstance(y2[0], np.ndarray)
assert np.all(y2[0] == x)
self.assertIsInstance(y2[1], tuple)
self.assertIsInstance(y2[1][0], np.ndarray)
assert np.all(y2[1][0] == 2 * x)
self.assertIsInstance(y2[1][1], np.ndarray)
assert np.all(y2[1][1] == 3 * x)
def test_device_get_scalar(self):
x = np.arange(12.).reshape((3, 4)).astype("float32")
x = api.device_put(x)
self.assertIsInstance(x, xla.DeviceArray)
y = [x, 2]
y2 = api.device_get(y)
self.assertIsInstance(y2, list)
self.assertIsInstance(y2[0], np.ndarray)
assert np.all(y2[0] == x)
self.assertIsInstance(y2[1], int)
self.assertEqual(y2[1], 2)
@parameterized.parameters([(3,)], [(2, 0)])
def test_device_put_across_devices(self, shape):
if len(api.local_devices()) < 2:
raise unittest.SkipTest("this test requires multiple devices")
d1, d2 = api.local_devices()[:2]
data = np.random.randn(*shape).astype(np.float32)
x = api.device_put(data, device=d1)
self.assertEqual(x.device_buffer.device(), d1)
y = api.device_put(x, device=d2)
self.assertEqual(y.device_buffer.device(), d2)
np.testing.assert_array_equal(data, np.array(y))
# Make sure these don't crash
api.device_put(x)
api.device_put(y)
@jtu.skip_on_devices("cpu")
def test_device_put_across_platforms(self):
default_device = jax.devices()[0]
cpu_device = jax.devices("cpu")[0]
np_arr = np.array([1,2,3])
scalar = 1
device_arr = jnp.array([1,2,3])
assert device_arr.device_buffer.device() is default_device
for val in [np_arr, device_arr, scalar]:
x = api.device_put(val, device=cpu_device)
self.assertEqual(x.device_buffer.device(), cpu_device)
@jtu.skip_on_devices("tpu")
def test_jacobian(self):
R = np.random.RandomState(0).randn
A = R(4, 3)
x = R(3)
f = lambda x: jnp.dot(A, x)
assert np.allclose(jacfwd(f)(x), A)
assert np.allclose(jacrev(f)(x), A)
f = lambda x: jnp.tanh(jnp.dot(A, x))
assert np.allclose(jacfwd(f)(x), jacrev(f)(x))
@jtu.skip_on_devices("tpu")
def test_hessian(self):
R = np.random.RandomState(0).randn
A = R(4, 4)
x = R(4)
f = lambda x: jnp.dot(x, jnp.dot(A, x))
assert np.allclose(hessian(f)(x), A + A.T)
def test_std_basis(self):
basis = api._std_basis(jnp.zeros(3))
assert getattr(basis, "shape", None) == (3, 3)
assert np.allclose(basis, np.eye(3))
basis = api._std_basis(jnp.zeros((3, 3)))
assert getattr(basis, "shape", None) == (9, 3, 3)
assert np.allclose(basis, np.eye(9).reshape(9, 3, 3))
basis = api._std_basis([0., (jnp.zeros(3), jnp.zeros((3, 4)))])
assert isinstance(basis, list) and len(basis) == 2
assert getattr(basis[0], "shape", None) == (16,)
assert isinstance(basis[1], tuple) and len(basis[1]) == 2
assert getattr(basis[1][0], "shape", None) == (16, 3)
assert getattr(basis[1][1], "shape", None) == (16, 3, 4)
@jtu.skip_on_devices("tpu")
def test_jacobian_on_pytrees(self):
for jacfun in [jacfwd, jacrev]:
ans = jacfun(lambda x, y: (x, y))(0., 1.)
expected = (1., 0.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = jacfun(lambda x, y: (x, y), 1)(0., 1.)
expected = (0., 1.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = jacfun(lambda x, y: (x, y), (0, 1))(0., 1.)
expected = ((1., 0.),
(0., 1.),)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = jacfun(lambda x: x[:2])((1., 2., 3.))
expected = ((1., 0., 0.),
(0., 1., 0.))
self.assertAllClose(ans, expected, check_dtypes=False)
R = np.random.RandomState(0).randn
x = R(2)
y = R(3)
ans = jacfun(lambda x, y: {'x': x, 'xy': jnp.outer(x, y)})(x, y)
expected = {'x': np.eye(2),
'xy': np.kron(np.eye(2), y[:, None]).reshape(2, 3, 2)}
self.assertAllClose(ans, expected, check_dtypes=False)
@jtu.skip_on_devices("tpu")
def test_hessian_on_pytrees(self):
ans = hessian(lambda x: jnp.array(x)**2)((1., 2.))
expected = ((np.array([2., 0.]), np.array([0., 0.])),
(np.array([0., 0.]), np.array([0., 2.])))
self.assertAllClose(ans, expected, check_dtypes=False)
@jtu.skip_on_devices("tpu")
def test_issue1372(self):
def quad(x):
return jnp.dot(x, x)
def f(x, u):
return quad(x) + quad(u)
x, u = jnp.ones(5), jnp.ones(2)
rev = jacrev
fwd = jacfwd
# Diagonal entries
self.assertEqual(rev(rev(f, 0), 0)(x, u).shape, (5, 5))
self.assertEqual(rev(fwd(f, 0), 0)(x, u).shape, (5, 5))
self.assertEqual(fwd(rev(f, 0), 0)(x, u).shape, (5, 5))
self.assertEqual(fwd(fwd(f, 0), 0)(x, u).shape, (5, 5))
self.assertEqual(rev(rev(f, 1), 1)(x, u).shape, (2, 2))
self.assertEqual(rev(fwd(f, 1), 1)(x, u).shape, (2, 2))
self.assertEqual(fwd(rev(f, 1), 1)(x, u).shape, (2, 2))
self.assertEqual(fwd(fwd(f, 1), 1)(x, u).shape, (2, 2))
# Off-diagonal entries by reverse-mode on the outside
self.assertEqual(rev(rev(f, 1), 0)(x, u).shape, (2, 5))
self.assertEqual(rev(fwd(f, 1), 0)(x, u).shape, (2, 5))
self.assertEqual(rev(rev(f, 0), 1)(x, u).shape, (5, 2))
self.assertEqual(rev(fwd(f, 0), 1)(x, u).shape, (5, 2))
# Off-diagonal entries by forward-mode on the outside
self.assertEqual(fwd(rev(f, 1), 0)(x, u).shape, (2, 5))
self.assertEqual(fwd(fwd(f, 1), 0)(x, u).shape, (2, 5))
self.assertEqual(fwd(rev(f, 0), 1)(x, u).shape, (5, 2))
self.assertEqual(fwd(fwd(f, 0), 1)(x, u).shape, (5, 2))
def test_large_device_constant(self):
ans = jit(lambda x: 2 * x)(jnp.ones(int(2e6))) # doesn't crash
self.assertAllClose(ans, np.ones(int(2e6)) * 2., check_dtypes=False)
def test_grad_and_aux_basic(self):
g, aux = grad(lambda x: (x**3, [x**2]), has_aux=True)(3.)
self.assertAllClose(g, grad(lambda x: x**3)(3.))
self.assertAllClose(aux, [9.], check_dtypes=False)
def test_grad_and_aux_error(self):
with self.assertRaisesRegex(TypeError, "two-element tuple"):
grad(lambda x: (1, 2, 3), has_aux=True)(1.)
with self.assertRaisesRegex(TypeError, "two-element tuple"):
grad(lambda x: x, has_aux=True)(1.)
with self.assertRaisesRegex(TypeError, "two-element tuple"):
grad(lambda x: (x,), has_aux=True)(1.)
def test_grad_and_aux_nested(self):
def f(x):
g, aux = grad(lambda x: (x**3, [x**3]), has_aux=True)(x)
return aux[0]
f2 = lambda x: x**3
self.assertEqual(grad(f)(4.), grad(f2)(4.))
self.assertEqual(jit(grad(f))(4.), grad(f2)(4.))
self.assertEqual(jit(grad(jit(f)))(4.), grad(f2)(4.))
def f(x):
g, aux = grad(lambda x: (x**3, [x**3]), has_aux=True)(x)
return aux[0] * jnp.sin(x)
f2 = lambda x: x**3 * jnp.sin(x)
self.assertEqual(grad(f)(4.), grad(f2)(4.))
self.assertEqual(jit(grad(f))(4.), grad(f2)(4.))
self.assertEqual(jit(grad(jit(f)))(4.), grad(f2)(4.))
def test_grad_and_aux_constant(self):
g, aux = grad(lambda x: (x**3, [4.]), has_aux=True)(4.)
self.assertEqual(g, grad(lambda x: x**3)(4.))
self.assertEqual(aux, [4.])
g, aux = grad(lambda x: (x**3, [x**2, 4.]), has_aux=True)(4.)
self.assertEqual(g, grad(lambda x: x**3)(4.))
self.assertEqual(aux, [4.**2, 4.])
def test_grad_and_aux_no_tracers(self):
# see https://github.com/google/jax/issues/1950
def f(x):
aux = dict(identity=x, p1=x+1)
return x ** 2, aux
_, aux = jax.grad(f, has_aux=True)(3.)
self.assertIsInstance(aux, dict)
for val in aux.values():
self.assertNotIsInstance(val, core.Tracer)
def test_jvp_mismatched_arguments(self):
self.assertRaisesRegex(
TypeError,
("primal and tangent arguments to jax.jvp must have the same tree "
"structure"),
lambda: api.jvp(lambda x, y: x * y, (np.float32(2),), ()))
# If primals and tangents must both be tuples or both lists
self.assertRaisesRegex(
TypeError,
("primal and tangent arguments to jax.jvp must have the same tree "
"structure"),
lambda: api.jvp(lambda x, y: x * y, (np.float32(2),), [np.float32(2)]))
self.assertRaisesRegex(
TypeError,
"primal and tangent arguments to jax.jvp do not match.",
lambda: api.jvp(lambda x: -x, (np.float16(2),), (np.float32(4),)))
# If primals and tangents are not of the same shape then raise error
fun = lambda x: x+1
with self.assertRaisesRegex(
ValueError, "jvp called with different primal and tangent shapes"):
api.jvp(fun, (jnp.array([1.,2.,3.]),), (jnp.array([1.,2.,3.,4.]),))
with self.assertRaisesRegex(
ValueError, "jvp called with different primal and tangent shapes"):
api.jvp(fun, (jnp.float32(10.),), (jnp.array([1.,2.,3.], dtype=jnp.float32),))
with self.assertRaisesRegex(
ValueError, "jvp called with different primal and tangent shapes"):
api.jvp(fun, (jnp.array([1.,2.,3.], dtype=jnp.float32),), (jnp.float32(20.),))
with self.assertRaisesRegex(
ValueError, "jvp called with different primal and tangent shapes"):
api.jvp(fun, (jnp.array([1.,2.,3.]),), (20.,))
def test_jvp_non_tuple_arguments(self):
def f(x, y): return x + y
self.assertRaisesRegex(
TypeError,
"primal and tangent arguments to jax.jvp must be tuples or lists; found float and tuple.",
lambda: api.jvp(f, 0., (1.,)))
self.assertRaisesRegex(
TypeError,
"primal and tangent arguments to jax.jvp must be tuples or lists; found tuple and ndarray.",
lambda: api.jvp(f, (0.,), np.array([1., 2.])))
def test_vjp_mismatched_arguments(self):
_, pullback = api.vjp(lambda x, y: x * y, np.float32(3), np.float32(4))
self.assertRaisesRegex(
TypeError,
"Tree structure of cotangent input.*does not match",
lambda: pullback((np.float32(7), np.float32(100))))
self.assertRaisesRegex(
TypeError,
"Type of cotangent input to vjp pullback.*is not the expected tangent type",
lambda: pullback((np.float16(42))))
def test_vjp_bad_cotangent_shape(self):
x = np.ones((2, 5), dtype=np.float32)
y = np.ones((5, 3), dtype=np.float32)
def f_jax(x, y):
return jnp.matmul(x, y)
res, pullback = jax.vjp(f_jax, x, y)
with self.assertRaisesRegex(
ValueError,
"Shape of cotangent input to vjp pullback function .* must be the same as the shape of corresponding primal input .*"):
pullback(np.ones((2, 4), dtype=np.float32))
def test_jvp_jit_cached(self):
"""Bug in caching in presence of JVP and JIT."""
def func(x):
def inner(y):
return y * x
# Must have two calls to the inner jit (the second one hits the cache)
res1 = api.jit(inner)(4.)
res2 = api.jit(inner)(5.)
return res1 + res2
self.assertAllClose((45., 9.), api.jvp(func, (5.,), (1.,)))
def test_linear_transpose_abstract(self):
x = types.SimpleNamespace(shape=(3,), dtype=np.dtype(np.float32))
y = jnp.arange(3, dtype=np.float32)
transpose_fun = api.linear_transpose(lambda x: 2 * x, x)
z, = transpose_fun(y)
self.assertArraysEqual(2 * y, z, check_dtypes=True)
def test_linear_transpose_integer(self):
f = lambda x: 2 * x
transpose = api.linear_transpose(f, 1)
actual, = transpose(3)
expected = 6
self.assertEqual(actual, expected)
def test_linear_transpose_error(self):
with self.assertRaisesRegex(
TypeError, "linear_transpose only supports"):
api.linear_transpose(lambda x: 2. * x, 1)
transpose_fun = api.linear_transpose(lambda x: [x, x], 1.0)
with self.assertRaisesRegex(TypeError, "cotangent tree does not match"):
transpose_fun(1.0)
transpose_fun = api.linear_transpose(lambda x: jnp.stack([x, x]), 1.0)
with self.assertRaisesRegex(TypeError, "cotangent type does not match"):
transpose_fun(1.0)
transpose_fun = api.linear_transpose(lambda x: 1j * x, 1.0)
with self.assertRaisesRegex(TypeError, "cotangent type does not match"):
transpose_fun(1.0)
transpose_fun = api.linear_transpose(lambda x: x, 1.0)
with self.assertRaisesRegex(TypeError, "cotangent type does not match"):
transpose_fun(1j)
def test_linear_transpose_complex(self):
f = lambda x: (1 + 2j) * x
transpose = api.linear_transpose(f, 1j)
actual, = transpose(3 + 4j)
expected = -5 + 10j
self.assertEqual(actual, expected)
def test_linear_transpose_zeros(self):
f = lambda x: x[0]
transpose = api.linear_transpose(f, [1., 2.])
actual, = transpose(3.)
expected = [3., 0.]
self.assertEqual(actual, expected)
def test_complex_grad_raises_error(self):
self.assertRaises(TypeError, lambda: grad(lambda x: jnp.sin(x))(1 + 2j))
def test_holomorphic_grad(self):
out = grad(lambda x: jnp.sin(x), holomorphic=True)(1 + 2j)
expected = 2.0327230070196656 - 3.0518977991518j
self.assertAllClose(out, expected, check_dtypes=False)
def test_nonholomorphic_grad(self):
zs = 0.5j * np.arange(5) + np.arange(5)
def f(z):
return jnp.sum(jnp.cos(jnp.abs(z)))
ans = grad(f)(zs)
expected = np.array([ 0. + 0.j,
-0.80430663 + 0.40215331j,
-0.70368982 + 0.35184491j,
0.1886467 - 0.09432335j,
0.86873727 - 0.43436864j])
self.assertAllClose(ans, expected, check_dtypes=False,
atol=jtu.default_gradient_tolerance,
rtol=jtu.default_gradient_tolerance)
def test_complex_output_jacrev_raises_error(self):
self.assertRaises(TypeError, lambda: jacrev(lambda x: jnp.sin(x))(1 + 2j))
def test_nonholomorphic_jacrev(self):
# code based on https://github.com/google/jax/issues/603
zs = 0.5j * np.arange(5) + np.arange(5)
def f(z):
return jnp.cos(jnp.linalg.norm(2 * z))
ans = jacrev(f)(zs)
expected = grad(f)(zs)
self.assertAllClose(ans, expected)
def test_heterogeneous_jacfwd(self):
# See https://github.com/google/jax/issues/7157
# See https://github.com/google/jax/issues/7780
x = np.array([2.0], dtype=np.float16)
y = np.array([3.0], dtype=np.float32)
a = (x, y)
def f(tup):
jtu._check_dtypes_match(tup, a)
x, y = tup
return x, y, x + y
actual = jacfwd(f)(a)
desired = ((np.array(1., dtype=np.float16), np.array(0., dtype=np.float16)),
(np.array(0., dtype=np.float32), np.array(1., dtype=np.float32)),
(np.array(1., dtype=np.float32), np.array(1., dtype=np.float32)))
jtu._check_dtypes_match(actual, desired)
jtu.check_eq(actual, desired)
def test_heterogeneous_jacrev(self):
# See https://github.com/google/jax/issues/7157
# See https://github.com/google/jax/issues/7780
x = np.array([2.0], dtype=np.float16)
y = np.array([3.0], dtype=np.float32)
a = (x, y)
def f(tup):
jtu._check_dtypes_match(tup, a)
x, y = tup
return x, y, x + y
actual = jacrev(f)(a)
desired = ((np.array(1., dtype=np.float16), np.array(0., dtype=np.float32)),
(np.array(0., dtype=np.float16), np.array(1., dtype=np.float32)),
(np.array(1., dtype=np.float16), np.array(1., dtype=np.float32)))
jtu._check_dtypes_match(actual, desired)
jtu.check_eq(actual, desired)
def test_heterogeneous_grad(self):
# See https://github.com/google/jax/issues/7157
x = np.array(1.0+1j)
y = np.array(2.0)
a = (x, y)
def f(tup):
jtu._check_dtypes_match(tup, a)
x, y = tup
return jnp.square(jnp.abs(x)) + y
actual = grad(f)(a)
desired = (np.array(2 - 2j), np.array(1.))
jtu._check_dtypes_match(actual, desired)
jtu.check_eq(actual, desired)
def test_complex_input_jacfwd_raises_error(self):
self.assertRaises(TypeError, lambda: jacfwd(lambda x: jnp.sin(x))(1 + 2j))
def test_legacy_devicearray_repr(self):
dx = device_put(3.)
str(dx.item()) # doesn't crash
def test_devicearray_repr(self):
x = device_put(jnp.zeros(3))
self.assertIsInstance(x, xla.DeviceArray)
repr(x) # doesn't crash
x = device_put(jnp.ones(3) + 1j * jnp.ones(3))
self.assertIsInstance(x, xla.DeviceArray)
repr(x) # doesn't crash
def test_devicearray_delete(self):
x = device_put(1.)
x.delete()
self.assertRaisesRegex(RuntimeError, "DeviceArray has been deleted.",
lambda: repr(x))
def test_devicearray_block_until_ready(self):
x = device_put(1.)
y = x.block_until_ready()
# Tests mostly that block_until_ready() does not produce an error.
self.assertTrue(y is x)
def test_devicearray_weakref_friendly(self):
x = device_put(1.)
y = weakref.ref(x)
self.assertEqual(y(), 1.)
del x
self.assertIsNone(y())
def test_namedtuple_transparency(self):
# See https://github.com/google/jax/issues/446
Point = collections.namedtuple("Point", ["x", "y"])
def f(pt):
return jnp.sqrt(pt.x ** 2 + pt.y ** 2)
pt = Point(1., 2.)
f(pt) # doesn't crash
g = api.grad(f)(pt)
self.assertIsInstance(g, Point)
f_jit = api.jit(f)
self.assertAllClose(f(pt), f_jit(pt), check_dtypes=False)
def test_namedtuple_subclass_transparency(self):
# See https://github.com/google/jax/issues/806
Point = collections.namedtuple("Point", ["x", "y"])
class ZeroPoint(Point):
def is_zero(self):
return (self.x == 0) and (self.y == 0)
pt = ZeroPoint(0., 0.)
def f(pt):
return 0. if pt.is_zero() else jnp.sqrt(pt.x ** 2 + pt.y ** 2)
f(pt) # doesn't crash
_ = api.grad(f)(pt)
self.assertIsInstance(pt, ZeroPoint)
@parameterized.parameters(1, 2, 3)
def test_shape_dtype_struct(self, i):
s = api.ShapeDtypeStruct(shape=(i, 2, 3), dtype=jnp.float32)
self.assertEqual(s.shape, (i, 2, 3))
self.assertEqual(s.dtype, jnp.float32)
self.assertEqual(s.ndim, 3)
self.assertEqual(s.size, i * 2 * 3)
self.assertLen(s, i)
for f in (str, repr):
self.assertEqual(
f(s), "ShapeDtypeStruct(shape=({}, 2, 3), dtype=float32)".format(i))
def test_shape_dtype_struct_scalar(self):
s = api.ShapeDtypeStruct(shape=(), dtype=jnp.float32)
self.assertEmpty(s.shape)
self.assertEqual(s.size, 1)
self.assertEqual(s.ndim, 0)
with self.assertRaisesRegex(TypeError, "len[(][)] of unsized object"):
_ = len(s)
def test_shape_dtype_struct_hash(self):
s1 = api.ShapeDtypeStruct(shape=(2, 3), dtype=jnp.float32)
s2 = api.ShapeDtypeStruct(shape=(2, 3), dtype=jnp.float32)
s3 = api.ShapeDtypeStruct(shape=(2, 4), dtype=jnp.float32)
self.assertEqual(hash(s1), hash(s2))
self.assertNotEqual(hash(s1), hash(s3))
def test_eval_shape(self):
def fun(x, y):
return jnp.tanh(jnp.dot(x, y) + 3.)
x = jnp.ones((2, 3))
y = jnp.ones((3, 4))
out_shape = api.eval_shape(fun, x, y)
self.assertEqual(out_shape.shape, (2, 4))
def test_eval_shape_constants(self):
def fun():
x = jnp.ones((2, 3))
y = jnp.ones((3, 4))
return jnp.tanh(jnp.dot(x, y) + 3.)
out_shape = api.eval_shape(fun)
self.assertEqual(out_shape.shape, (2, 4))
def test_eval_shape_tuple_unpacking(self):
def fun(x, y):
a, b = x
return a + b + y
x = (jnp.ones(2), jnp.ones(2))
y = 3.
out_shape = api.eval_shape(fun, x, y)
self.assertEqual(out_shape.shape, (2,))
def test_eval_shape_tuple_itemgetting(self):
def fun(x, y):
return x[0] + x[1] + y
x = (jnp.ones(2), jnp.ones(2))
y = 3.
out_shape = api.eval_shape(fun, x, y)
self.assertEqual(out_shape.shape, (2,))
def test_eval_shape_output_dict(self):
def fun(x, y):
return {'hi': x[0] + x[1] + y}
x = (jnp.ones(2), jnp.ones(2))
y = 3.
out_shape = api.eval_shape(fun, x, y)
out_shape = tree_util.tree_map(np.shape, out_shape)
self.assertEqual(out_shape, {'hi': (2,)})
def test_eval_shape_shape_error(self):
def fun(x, y):
return jnp.tanh(jnp.dot(x, y) + 3.)
x = jnp.ones((3, 3))
y = jnp.ones((4, 4))
self.assertRaises(TypeError, lambda: api.eval_shape(fun, x, y))
def test_eval_shape_duck_typing(self):
def fun(A, b, x):
return jnp.dot(A, x) + b
class MyArgArray(object):
def __init__(self, shape, dtype):
self.shape = shape
self.dtype = np.dtype(dtype)
A = MyArgArray((3, 4), jnp.float32)
b = MyArgArray((5,), jnp.float32)
x = MyArgArray((4, 5), jnp.float32)
out_shape = api.eval_shape(fun, A, b, x)
self.assertEqual(out_shape.shape, (3, 5))
def test_eval_shape_duck_typing2(self):
# https://github.com/google/jax/issues/5683
class EasyDict(dict):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__dict__ = self
x = EasyDict(shape=(3,), dtype=np.dtype('float32'))
out_shape = api.eval_shape(lambda x: x, x) # doesn't crash
self.assertEqual(out_shape.shape, (3,))
def test_eval_shape_names(self):
def fun(x, y):
return lax.psum(x, 'i') + y
class MyArgArray(object):
def __init__(self, shape, dtype, named_shape):
self.shape = shape
self.dtype = jnp.dtype(dtype)
self.named_shape = named_shape
x = MyArgArray((3, 2), jnp.float32, {'i': 10})
y = MyArgArray((3, 2), jnp.float32, {'j': 5})
with core.extend_axis_env('i', 10, None):
with core.extend_axis_env('j', 5, None):
out_shape = api.eval_shape(fun, x, y)
self.assertEqual(out_shape.named_shape, {'j': 5})
def test_issue_871(self):
T = jnp.array([[1., 2.], [3., 4.], [5., 6.]])
x = jnp.array([1, 2, 3])
msg = ("linearized function called on tangent values inconsistent with "
"the original primal values")
y, f_jvp = api.linearize(jnp.sum, x)
with self.assertRaisesRegex(ValueError, msg):
f_jvp(T)
y, f_jvp = api.linearize(api.jit(jnp.sum), x)
with self.assertRaisesRegex(ValueError, msg):
f_jvp(T)
def test_grad_of_int_errors(self):
# Errors without allow_int=True
dfn = grad(lambda x: x ** 2)
self.assertRaisesRegex(
TypeError,
(r"grad requires real- or complex-valued inputs \(input dtype that is a "
r"sub-dtype of np.inexact\), but got int.*."),
lambda: dfn(3))
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_jvp_of_int_identity(self):
primals = (1,)
tangents = (np.zeros(shape=(), dtype=float0),)
_, out = api.jvp(lambda x: x, primals, tangents)
self.assertEqual(out, np.zeros(shape=(), dtype=float0))
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_jvp_of_int_add(self):
primals = (2,)
tangents = (np.zeros(shape=(), dtype=float0),)
_, out_tangent = api.jvp(lambda x: x+1, primals, tangents)
self.assertEqual(out_tangent, np.zeros(shape=(), dtype=float0))
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_jit_jvp_of_int(self):
primals = (2,)
tangents = (np.zeros(shape=(), dtype=float0),)
_, out_tangent = api.jvp(jax.jit(lambda x: x+1), primals, tangents)
self.assertEqual(out_tangent, np.zeros(shape=(), dtype=float0))
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_vjp_of_int_index(self):
primal, fn_vjp = api.vjp(lambda x, i: x[i], np.ones(2)*2, 1)
tangent_x, tangent_i = fn_vjp(1.)
self.assertEqual(primal, 2.)
self.assertAllClose(tangent_x, jnp.array([0., 1.]))
self.assertEqual(tangent_i, np.zeros(shape=(), dtype=float0))
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_vjp_of_int_shapes(self):
out, fn_vjp = api.vjp(lambda x: lax.reshape(x, (2, 2)), np.ones((4, 1),
dtype=int))
tangent, = fn_vjp(out)
self.assertArraysEqual(tangent, np.zeros(shape=(4, 1), dtype=float0))
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_jit_vjp_of_int(self):
primal, fn_vjp = api.vjp(lambda x, y: x+y, 2, 1)
tangent_x, tangent_i = jax.jit(fn_vjp)(1)
self.assertEqual(primal, 3)
self.assertEqual(tangent_x, np.zeros(shape=(), dtype=float0))
self.assertEqual(tangent_i, np.zeros(shape=(), dtype=float0))
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_vjp_of_int_fulllike(self):
# Regression test for tangent and cotangent mismatch in convert_element_type
# transpose rule wrt a ConstVar
f = lax.full_like
out, vjp = api.vjp(f, np.zeros((2, 2)), 1)
self.assertAllClose(out, jnp.ones((2, 2)))
tangent_x, tangent_y = vjp(out)
self.assertAllClose(tangent_x, jnp.zeros((2, 2)))
self.assertEqual(tangent_y, np.zeros(shape=(), dtype=float0))
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_grad_of_int(self):
# Need real-valued output, but testing integer input.
out = api.grad(lambda x: x+0., allow_int=True)(1)
self.assertEqual(out, np.zeros(shape=(), dtype=float0))
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_grad_of_bool(self):
def cond(pred):
return lax.cond(pred, lambda _: 1., lambda _: 2., 1.)
value, grd = api.value_and_grad(cond, allow_int=True)(True)
self.assertEqual(value, 1.)
self.assertEqual(grd, np.zeros(shape=(), dtype=float0))
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_grad_of_int_index(self):
grad_x, grad_i = api.grad(lambda x, i: x[i], argnums=(0, 1),
allow_int=True)(np.ones(2), 1)
self.assertAllClose(grad_x, jnp.array([0., 1.]))
self.assertEqual(grad_i, np.zeros(shape=(), dtype=float0))
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_jit_grad_of_int(self):
grad_f = api.grad(lambda x, i: x[i], argnums=(0, 1), allow_int=True)
grad_x, grad_i = jax.jit(grad_f)(np.ones(2), 1)
self.assertAllClose(grad_x, jnp.array([0., 1.]))
self.assertEqual(grad_i, np.zeros(shape=(), dtype=float0))
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_float0_reshape(self):
# dtype-agnostic operations are supported
float0_array = jax.grad(lambda x: jnp.sum(x+0.),
allow_int=True)(np.ones((2, 4), dtype=int))
self.assertArraysEqual(float0_array.reshape((4, 2)),
np.zeros((4, 2), dtype=float0))
self.assertArraysEqual(float0_array.transpose(),
np.zeros((4, 2), dtype=float0))
def test_float0_error(self):
# float0 is incompatible with other dtypes
float0_array = jax.grad(lambda x: x+0., allow_int=True)(1)
error_text = "float0s do not support any operations by design"
with self.assertRaisesRegex(TypeError, error_text):
# dispatch via DeviceArray
_ = float0_array + jnp.zeros(())
with self.assertRaisesRegex(TypeError, error_text):
# dispatch via lax
_ = lax.add(float0_array, jnp.zeros(()))
def test_grad_complex_result_errors(self):
dfn = grad(lambda x: x ** 2 + 1j)
self.assertRaisesRegex(
TypeError,
(r"grad requires real-valued outputs \(output dtype that is a "
r"sub-dtype of np.floating\), but got complex.*"),
lambda: dfn(3.))
def test_holomorphic_grad_of_float_errors(self):
dfn = grad(lambda x: x ** 2, holomorphic=True)
self.assertRaisesRegex(
TypeError,
(r"grad with holomorphic=True requires inputs with complex dtype, "
r"but got float.*"),
lambda: dfn(3.))
def test_holomorphic_jacrev_of_float_errors(self):
dfn = jacrev(lambda x: x ** 2, holomorphic=True)
self.assertRaisesRegex(
TypeError,
(r"jacrev with holomorphic=True requires inputs with complex dtype, "
r"but got float.*"),
lambda: dfn(3.))
def test_holomorphic_jacfwd_of_float_errors(self):
dfn = jacfwd(lambda x: x ** 2, holomorphic=True)
self.assertRaisesRegex(
TypeError,
(r"jacfwd with holomorphic=True requires inputs with complex dtype, "
r"but got float.*"),
lambda: dfn(3.))
def test_jacfwd_of_complex_errors(self):
dfn = jacfwd(lambda x: x ** 2)
self.assertRaisesRegex(
TypeError,
(r"jacfwd requires real-valued inputs \(input dtype that is a "
r"sub-dtype of np.floating\), but got complex.*"),
lambda: dfn(3. + 1j))
def test_xla_computation(self):
# these tests basically check the examples in the xla_computation docstring
def e(x):
return jnp.sin(jnp.cos(x))
c = api.xla_computation(e)(2.)
self.assertIn('cosine', c.as_hlo_text())
self.assertIn('sine', c.as_hlo_text())
def f(x):
return x - lax.psum(x, 'i')
axis_env = [('i', 4)]
c = api.xla_computation(f, axis_env=axis_env)(2)
self.assertIn('all-reduce', c.as_hlo_text())
self.assertIn('replica_groups={{0,1,2,3}}', c.as_hlo_text())
def g(x):
rowsum = lax.psum(x, 'i')
colsum = lax.psum(x, 'j')
allsum = lax.psum(x, ('i', 'j'))
return rowsum, colsum, allsum
axis_env = [('i', 4), ('j', 2)]
c = api.xla_computation(g, axis_env=axis_env)(5.)
self.assertIn('all-reduce', c.as_hlo_text())
self.assertIn('replica_groups={{0,2,4,6},{1,3,5,7}}', c.as_hlo_text())
self.assertIn('replica_groups={{0,1},{2,3},{4,5},{6,7}}', c.as_hlo_text())
self.assertIn('replica_groups={{0,1,2,3,4,5,6,7}}', c.as_hlo_text())
def h(x):
rowsum = lax.psum(x, 'i', axis_index_groups=[[0, 1], [2, 3]])
colsum = lax.psum(x, 'j')
return rowsum, colsum
axis_env = [('i', 4), ('j', 2)]
c = api.xla_computation(h, axis_env=axis_env)(5.)
self.assertIn('all-reduce', c.as_hlo_text())
self.assertIn('replica_groups={{0,2},{4,6},{1,3},{5,7}}', c.as_hlo_text())
self.assertIn('replica_groups={{0,1},{2,3},{4,5},{6,7}}', c.as_hlo_text())
def test_xla_computation_args(self):
def foo(x, y, z):
return x + y + z
c = api.xla_computation(foo)(1., 2., 3.)
self.assertEqual(len(c.program_shape().parameter_shapes()), 3)
c = api.xla_computation(foo, tuple_args=True)(1., 2., 3.)
param_shapes = c.program_shape().parameter_shapes()
self.assertEqual(len(param_shapes), 1)
self.assertEqual(param_shapes[0].xla_element_type(),
xla_client.PrimitiveType.TUPLE)
def test_xla_computation_duck_typing(self):
def foo(x, y, z):
return x + y + z
x = jax.ShapeDtypeStruct((), np.float32)
y = jax.ShapeDtypeStruct((), np.float32)
z = jax.ShapeDtypeStruct((), np.float32)
c = api.xla_computation(foo)(x, y, z)
self.assertEqual(len(c.program_shape().parameter_shapes()), 3)
c = api.xla_computation(foo, tuple_args=True)(1., 2., 3.)
param_shapes = c.program_shape().parameter_shapes()
self.assertEqual(len(param_shapes), 1)
self.assertEqual(param_shapes[0].xla_element_type(),
xla_client.PrimitiveType.TUPLE)
def test_staging_out_multi_replica(self):
def f(x):
return api.pmap(jnp.mean)(x)
xla_comp = api.xla_computation(f)
xla_comp(jnp.arange(8)).as_hlo_text() # doesn't crash
def test_xla_computation_instantiate_constant_outputs(self):
def f():
return jnp.zeros((3, 4))
xla_comp = api.xla_computation(f)()
out_shape, = xla_comp.program_shape().result_shape().tuple_shapes()
self.assertEqual(out_shape.dimensions(), (3, 4))
def test_xla_computation_static_argnums(self):
def f(x, y):
return x + y
xla_comp = api.xla_computation(f, static_argnums=(1,))(2, 3)
hlo_text = xla_comp.as_hlo_text()
self.assertIn("constant(3)", hlo_text)
# The static arguments should be removed from the function being compiled,
# thus the function should have only a single argument.
self.assertIn("parameter.1", hlo_text)
self.assertNotIn("parameter.2", hlo_text)
def test_xla_computation_return_shape(self):
_, shape_tree = api.xla_computation(lambda x: (x + 1, jnp.zeros(2, jnp.float32)),
return_shape=True)(np.int32(1))
expected = (api.ShapeDtypeStruct(shape=(), dtype=jnp.int32),
api.ShapeDtypeStruct(shape=(2,), dtype=jnp.float32))
self.assertEqual(shape_tree, expected)
def test_xla_computation_partitioned(self):
def f(x, y):
return jnp.dot(x, y) + 1
x = jax.ShapeDtypeStruct((8, 8), np.float32)
y = jax.ShapeDtypeStruct((8, 16), np.float32)
xla_comp = api.xla_computation(f, in_parts=(P(2, 2), None),
out_parts=P(4, 1))(x, y)
hlo_text = xla_comp.as_hlo_text()
self.assertIn('sharding={devices=[2,2]0,1,2,3}', hlo_text)
self.assertIn('sharding={replicated}', hlo_text)
self.assertIn('sharding={{devices=[4,1]0,1,2,3}}', hlo_text)
def test_xla_computation_replicated_and_partitioned(self):
def f(x, y):
return jnp.dot(x, y), lax.psum(x, 'i')
x = jax.ShapeDtypeStruct((8, 8), np.float32)
y = jax.ShapeDtypeStruct((8, 16), np.float32)
axis_env = [('i', 4)]
xla_comp = api.xla_computation(f, axis_env=axis_env,
in_parts=(P(2, 2), None),
out_parts=(P(4, 1), None))(x, y)
hlo_text = xla_comp.as_hlo_text()
self.assertIn('all-reduce', hlo_text)
self.assertIn('replica_groups={{0,1,2,3}}', hlo_text)
self.assertIn('sharding={devices=[2,2]0,1,2,3}', hlo_text)
self.assertIn('sharding={replicated}', hlo_text)
self.assertIn('sharding={{devices=[4,1]0,1,2,3}, {replicated}}', hlo_text)
def test_xla_computation_psum_constant(self):
f = lambda: jax.lax.psum(1, "i")
api.xla_computation(f, axis_env=[("i", 2)])() # doesn't crash
@jtu.skip_on_devices("cpu", "gpu")
@jtu.ignore_warning(message="Some donated buffers were not usable")
def test_xla_computation_donate_argnums(self):
api.xla_computation(lambda x: None, donate_argnums=(0,))(3) # doesn't crash
def test_xla_computation_lower_fun_axis_env(self):
axis_name = 'i'
def fn(x):
y = lax.all_gather(
x, axis_name=axis_name)
return y * lax.axis_index(axis_name).astype(jnp.float32)
input_x = jnp.ones((5,6,4))
axis_env = [(axis_name, api.local_device_count())]
_ = api.xla_computation(fn, axis_env=axis_env, backend='cpu')(input_x)
def test_xla_computation_axis_env(self):
def fn(x):
z = x * jax.lax.axis_index('i').astype(jnp.float32)
def inner_fn(carry, a):
return carry + a, ()
return jax.lax.scan(inner_fn, jnp.zeros_like(z[0]), z)
x = jnp.ones((5, 6, 4))
_ = jax.xla_computation(fn, axis_env=(('i', 8),), backend='cpu')(x)
def test_concurrent_device_get_and_put(self):
def f(x):
for _ in range(100):
y = jax.device_put(x)
x = jax.device_get(y)
return x
xs = [np.random.randn(i) for i in range(10)]
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = [executor.submit(partial(f, x)) for x in xs]
ys = [f.result() for f in futures]
for x, y in zip(xs, ys):
self.assertAllClose(x, y)
def test_dtype_warning(self):
# cf. issue #1230
if config.x64_enabled:
raise unittest.SkipTest("test only applies when x64 is disabled")
def check_warning(warn, nowarn):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
nowarn() # get rid of extra startup warning
prev_len = len(w)
nowarn()
assert len(w) == prev_len
warn()
assert len(w) > 0
msg = str(w[-1].message)
expected_prefix = "Explicitly requested dtype "
self.assertEqual(expected_prefix, msg[:len(expected_prefix)])
prev_len = len(w)
nowarn()
assert len(w) == prev_len
check_warning(lambda: jnp.array([1, 2, 3], dtype="float64"),
lambda: jnp.array([1, 2, 3], dtype="float32"))
check_warning(lambda: jnp.array([1, 2, 3], dtype="float64"),
lambda: jnp.array([1, 2, 3], dtype=float))
check_warning(lambda: jnp.ones(3, dtype=np.float64),
lambda: jnp.ones(3))
check_warning(lambda: jnp.ones(3, dtype=np.float64),
lambda: jnp.ones(3, dtype=float))
check_warning(lambda: jnp.ones_like(3, dtype=np.int64),
lambda: jnp.ones_like(3, dtype=np.int32))
check_warning(lambda: jnp.zeros(3, dtype="int64"),
lambda: jnp.zeros(3, dtype="int32"))
check_warning(lambda: jnp.zeros_like(3, dtype="float64"),
lambda: jnp.zeros_like(3, dtype="float32"))
check_warning(lambda: jnp.full((2, 3), 1, dtype="int64"),
lambda: jnp.full((2, 3), 1))
check_warning(lambda: jnp.ones(3).astype("float64"),
lambda: jnp.ones(3).astype("float32"))
check_warning(lambda: jnp.eye(3, dtype=np.float64),
lambda: jnp.eye(3))
check_warning(lambda: jnp.arange(3, dtype=np.float64),
lambda: jnp.arange(3, dtype=np.float32))
check_warning(lambda: jnp.linspace(0, 3, dtype=np.float64),
lambda: jnp.linspace(0, 3, dtype=np.float32))
check_warning(lambda: jnp.tri(2, dtype="float64"),
lambda: jnp.tri(2, dtype="float32"))
check_warning(lambda: jnp.arange(1).astype("float64"),
lambda: jnp.arange(1).astype(float))
check_warning(lambda: jnp.arange(1.0).astype("int64"),
lambda: jnp.arange(1.0).astype(int))
def test_error_for_invalid_dtype(self):
with self.assertRaisesRegex(TypeError, ".*not a valid JAX array type.*"):
lax.add(jnp.array(7), np.array("hello"))
def test_vmap_preserves_docstr(self):
def superfun(a):
"""Does things with stuff."""
pass
self.assertRegex(api.vmap(superfun).__doc__, "\n".join([
"Vectorized version of superfun.*",
"",
"Original documentation:",
"",
superfun.__doc__,
]))
def test_vmap_in_axes_list(self):
# https://github.com/google/jax/issues/2367
dictionary = {'a': 5., 'b': jnp.ones(2)}
x = jnp.zeros(3)
y = jnp.arange(3.)
def f(dct, x, y):
return dct['a'] + dct['b'] + x + y
out1 = api.vmap(f, (None, 0, 0))(dictionary, x, y)
out2 = api.vmap(f, [None, 0, 0])(dictionary, x, y)
self.assertAllClose(out1, out2)
def test_vmap_in_axes_tree_prefix_error(self):
# https://github.com/google/jax/issues/795
value_tree = jnp.ones(3)
self.assertRaisesRegex(
ValueError,
"vmap in_axes specification must be a tree prefix of the corresponding "
r"value, got specification \(0, 0\) for value tree "
+ re.escape(f"{tree_util.tree_structure((value_tree,))}."),
lambda: api.vmap(lambda x: x, in_axes=(0, 0))(value_tree)
)
def test_vmap_in_axes_leaf_types(self):
with self.assertRaisesRegex(
TypeError, r"vmap in_axes must be an int, None, or .*"):
api.vmap(lambda x: x, in_axes=(jnp.array([1., 2.]),))(jnp.array([1., 2.]))
def test_vmap_out_axes_leaf_types(self):
with self.assertRaisesRegex(
TypeError, r"vmap out_axes must be an int, None, or .*"):
api.vmap(lambda x: x, out_axes=(jnp.array([1., 2.]),))(jnp.array([1., 2.]))
def test_vmap_unbatched_object_passthrough_issue_183(self):
# https://github.com/google/jax/issues/183
fun = lambda f, x: f(x)
vfun = api.vmap(fun, (None, 0))
ans = vfun(lambda x: x + 1, jnp.arange(3))
self.assertAllClose(ans, np.arange(1, 4), check_dtypes=False)
def test_vmap_mismatched_axis_sizes_error_message_issue_705(self):
# https://github.com/google/jax/issues/705
def h(a, b):
return jnp.sum(a) + jnp.sum(b)
X = np.random.randn(10, 4)
U = np.random.randn(10, 2)
with self.assertRaisesRegex(
ValueError,
"vmap got inconsistent sizes for array axes to be mapped:\n"
r"arg 0 has shape \(10, 4\) and axis 0 is to be mapped" "\n"
r"arg 1 has shape \(10, 2\) and axis 1 is to be mapped" "\n"
"so\n"
"arg 0 has an axis to be mapped of size 10\n"
"arg 1 has an axis to be mapped of size 2"):
api.vmap(h, in_axes=(0, 1))(X, U)
with self.assertRaisesRegex(
ValueError,
"vmap got inconsistent sizes for array axes to be mapped:\n"
r"arg 0 has shape \(10, 4\) and axis 0 is to be mapped" "\n"
r"arg 1 has shape \(10, 2\) and axis 1 is to be mapped" "\n"
r"arg 2 has shape \(10, 4\) and axis 0 is to be mapped" "\n"
"so\n"
"args 0, 2 have axes to be mapped of size 10\n"
"arg 1 has an axis to be mapped of size 2"):
api.vmap(lambda x, y, z: None, in_axes=(0, 1, 0))(X, U, X)
with self.assertRaisesRegex(
ValueError,
"vmap got inconsistent sizes for array axes to be mapped:\n"
"the tree of axis sizes is:\n"
r"\(10, \[2, 2\]\)"):
api.vmap(h, in_axes=(0, 1))(X, [U, U])
error = (r"vmap was requested to map its argument along axis 0, which "
r"implies that its rank should be at least 1, but is only 0 "
r"\(its shape is \(\)\)")
with self.assertRaisesRegex(ValueError, error):
# The mapped inputs cannot be scalars
api.vmap(lambda x: x)(1.)
with self.assertRaisesRegex(
ValueError, "vmap must have at least one non-None value in in_axes"):
# If the output is mapped, there must be a non-None in_axes
api.vmap(lambda x: x, in_axes=None)(jnp.array([1., 2.]))
error = (r"vmap was requested to map its argument along axis 1, which "
r"implies that its rank should be at least 2, but is only 1 "
r"\(its shape is \(2,\)\)")
with self.assertRaisesRegex(ValueError, error):
api.vmap(lambda x: x, in_axes=1)(jnp.array([1., 2.]))
# Error is: TypeError: only integer scalar arrays can be converted to a scalar index
with self.assertRaisesRegex(
ValueError,
"vmap out_axes specification must be a tree prefix of the "
"corresponding value.*"):
api.vmap(lambda x: x, in_axes=0, out_axes=(2, 3))(jnp.array([1., 2.]))
with self.assertRaisesRegex(
ValueError,
r"vmap has mapped output \(axis_name=foo\) but out_axes is None"):
# If the output is mapped (user-named axis), then there must be some
# out_axes specified.
api.vmap(lambda x: x, out_axes=None, axis_name="foo")(jnp.array([1., 2.]))
with self.assertRaisesRegex(
ValueError,
"vmap has mapped output but out_axes is None"):
# If the output is mapped (unnamed axis), then there must be some out_axes
# specified.
api.vmap(lambda x: x, out_axes=None)(jnp.array([1., 2.]))
def test_vmap_structured_in_axes(self):
A, B, C, D = 2, 3, 4, 5
K = 6 # batch size
x = np.ones((K, A, B)) # batch axis in different locations
y = np.ones((B, K, C))
z = np.ones((C, D, K))
def foo(tree_arg):
x, (y, z) = tree_arg
return jnp.dot(x, jnp.dot(y, z))
tree = (x, (y, z))
vfoo = api.vmap(foo, in_axes=((0, (1, 2)),))
self.assertEqual(vfoo(tree).shape, (6, 2, 5))
Point = collections.namedtuple("Point", ["x", "y"])
tree = (x, Point(y, z))
vfoo = api.vmap(foo, in_axes=((0, Point(1, 2)),))
self.assertEqual(vfoo(tree).shape, (6, 2, 5))
def foo(tree_arg):
x, dct = tree_arg
y, z = dct['a'], dct['b']
return jnp.dot(x, jnp.dot(y, z))
tree = (x, {'a': y, 'b': z})
vfoo = api.vmap(foo, in_axes=((0, {'a': 1, 'b': 2}),))
self.assertEqual(vfoo(tree).shape, (6, 2, 5))
tree = (x, collections.OrderedDict([('a', y), ('b', z)]))
vfoo = api.vmap(
foo, in_axes=((0, collections.OrderedDict([('a', 1), ('b', 2)])),))
self.assertEqual(vfoo(tree).shape, (6, 2, 5))
def test_vmap_in_axes_bool_error(self):
# https://github.com/google/jax/issues/6372
with self.assertRaisesRegex(TypeError, "must be an int"):
api.vmap(lambda x: x, in_axes=False)(jnp.zeros(3))
def test_pmap_in_axes_bool_error(self):
# https://github.com/google/jax/issues/6372
with self.assertRaisesRegex(TypeError, "must be an int"):
api.pmap(lambda x: x, in_axes=False)(jnp.zeros(1))
def test_pmap_global_cache(self):
def f(x, y):
return x, y
x = np.ones((1, 1, 1))
# All defaults
with jtu.assert_num_jit_and_pmap_compilations(1):
for _ in range(2):
api.pmap(f)(x, x)
# With axis name
with jtu.assert_num_jit_and_pmap_compilations(1):
for _ in range(2):
api.pmap(f, 'i')(x, x)
# With in_axes and out_axes
for x_in, y_in, x_out, y_out in it.product(*((0, 1, 2) for _ in range(4))):
with jtu.assert_num_jit_and_pmap_compilations(1):
for _ in range(2):
api.pmap(f, 'i', in_axes=(x_in, y_in), out_axes=(x_out, y_out))(x, x)
# Forward-mode AD on the outside
with jtu.assert_num_jit_and_pmap_compilations(1):
for _ in range(2):
api.jvp(api.pmap(f), (x, x), (x, x))
# Reverse-mode AD on the outside. One compilation for forward, one for backward.
with jtu.assert_num_jit_and_pmap_compilations(2):
for _ in range(2):
api.vjp(api.pmap(f), x, x)[1]((x, x))
def test_device_array_repr(self):
rep = jnp.ones(()) + 1.
self.assertStartsWith(repr(rep), "DeviceArray")
def test_device_array_hash(self):
rep = jnp.ones(()) + 1.
self.assertIsInstance(rep, jax.interpreters.xla.DeviceArray)
self.assertNotIsInstance(rep, collections.abc.Hashable)
with self.assertRaisesRegex(TypeError, 'unhashable type'):
hash(rep)
def test_grad_without_enough_args_error_message(self):
# https://github.com/google/jax/issues/1696
def f(x, y): return x + y
df = api.grad(f, argnums=0)
self.assertRaisesRegex(
TypeError,
"differentiating with respect to argnums=0 requires at least 1 "
"positional arguments to be passed by the caller, but got only 0 "
"positional arguments.",
lambda: partial(df, x=0.)(y=1.))
def test_grad_of_jit_compilation_caching(self):
if not hasattr(self, "assertLogs"):
raise unittest.SkipTest("test requires assertLogs (python 3)")
lax.add(1, 2) # make sure some initial warnings are already printed
sin = api.jit(jnp.sin)
prev_level = logging.get_verbosity()
try:
logging.set_verbosity('DEBUG')
with self.assertLogs(level=logging.DEBUG) as l:
ans1 = api.grad(sin)(2.)
ans2 = api.grad(sin)(3.)
finally:
logging.set_verbosity(prev_level)
self.assertLen(l.output, 2)
self.assertAllClose(ans1, np.cos(2.), check_dtypes=False)
self.assertAllClose(ans2, np.cos(3.), check_dtypes=False)
def test_grad_of_jit_compilation_caching2(self):
# Like the above test, but instead of logging use our compile counters.
@api.jit
def f(x):
return jnp.sin(x)
with jtu.count_jit_and_pmap_compiles() as count: # noqa: F841
_ = jax.grad(f)(3.)
self.assertEqual(count[0], 2) # one for fwd, one for bwd
with jtu.count_jit_and_pmap_compiles() as count: # noqa: F841
_ = jax.grad(f)(3.)
_ = jax.grad(f)(4.)
self.assertEqual(count[0], 0) # cache hits on both fwd and bwd
def test_grad_does_not_unflatten_tree_with_none(self):
# https://github.com/google/jax/issues/7546
class CustomNode(list):
pass
def unflatten(unused_aux_data, children):
self.assertIsNotNone(children[0])
return CustomNode(children)
tree_util.register_pytree_node(CustomNode, lambda x: (x, None), unflatten)
grad(lambda x: x[0])(CustomNode([0.]))
def test_trivial_computations(self):
x = jnp.array([1, 2, 3])
y = api.jit(lambda x: x)(x)
self.assertIs(x, y)
z1, z2 = api.jit(lambda x: (x, x))(x)
self.assertIs(z1, z2)
x1, x2 = jnp.array([1, 2]), jnp.array([2, 3])
z1, z2, z3 = api.jit(lambda x, y: (y, 1, x))(x1, x2)
self.assertIs(z1, x2)
self.assertIs(z3, x1)
self.assertEqual(z2, 1)
def test_nested_jit_hoisting(self):
@api.jit
def f(x, y):
z = 2 * x
return y + z, 3
@api.jit
def g(x):
return f(2, x)
jaxpr_subcomp = xla.jaxpr_subcomp
jaxprs = []
def jaxpr_subcomp_and_collect(c, jaxpr, *args, **kwargs):
jaxprs.append(jaxpr)
return jaxpr_subcomp(c, jaxpr, *args, **kwargs)
try:
xla.jaxpr_subcomp = jaxpr_subcomp_and_collect
ans = g(3)
finally:
xla.jaxpr_subcomp = jaxpr_subcomp
self.assertEqual(ans, (7, 3))
self.assertLen(jaxprs, 2)
outer_jaxpr, inner_jaxpr = jaxprs
self.assertLen(outer_jaxpr.eqns, 1)
self.assertEqual(outer_jaxpr.eqns[0].primitive.name, 'xla_call')
subjaxpr_1 = outer_jaxpr.eqns[0].params["call_jaxpr"]
self.assertEqual(str(subjaxpr_1), str(inner_jaxpr))
self.assertLen(inner_jaxpr.eqns, 2)
self.assertEqual(inner_jaxpr.eqns[-2].primitive.name, 'mul')
self.assertEqual(inner_jaxpr.eqns[-1].primitive.name, 'add')
def test_primitive_compilation_cache(self):
with jtu.count_primitive_compiles() as count:
lax.add(1, 2)
lax.add(2, 3)
self.assertEqual(count[0], 1)
def test_arange_jit(self):
# see https://github.com/google/jax/issues/553
def fun(x):
r = jnp.arange(x.shape[0])[x]
return r
jit(fun)(jnp.array([0, 1, 2], dtype=jnp.int32)) # doesn't crash
def helper_save_tracer(self, x):
self._saved_tracer = x
return x
def test_escaped_tracers_different_top_level_traces(self):
api.jit(self.helper_save_tracer)(0.)
with self.assertRaisesRegex(
UnexpectedTracerError, "Encountered an unexpected tracer"):
api.jit(lambda x: self._saved_tracer)(0.)
def test_escaped_tracers_cant_lift_sublevels(self):
api.jit(self.helper_save_tracer)(0.)
with self.assertRaisesRegex(
UnexpectedTracerError,
re.compile(
"Encountered an unexpected tracer",
re.DOTALL)):
api.jit(lambda x: x)(self._saved_tracer)
def test_escaped_tracers_tracer_from_higher_level(self):
api.grad(self.helper_save_tracer)(0.)
with self.assertRaisesRegex(
UnexpectedTracerError,
re.compile(
"Encountered an unexpected tracer.*Tracer from a higher level",
re.DOTALL)):
api.grad(lambda x: x)(self._saved_tracer)
def test_escaped_tracers_incompatible_sublevel(self):
def func1(x):
api.jit(self.helper_save_tracer)(0.)
# Use the tracer
return x + self._saved_tracer
with self.assertRaisesRegex(
UnexpectedTracerError,
re.compile("Encountered an unexpected tracer",
re.DOTALL)):
api.jit(func1)(2.)
def test_escaped_tracers_cant_lift(self):
def func1(x):
api.grad(self.helper_save_tracer)(0.)
return x + self._saved_tracer
with self.assertRaisesRegex(
UnexpectedTracerError,
re.compile("Encountered an unexpected tracer.*Can't lift",
re.DOTALL)):
api.grad(func1)(2.)
def test_escaped_tracers_not_among_input_tracers(self):
def func1(x):
api.grad(self.helper_save_tracer)(x)
# Use the tracer
return x + self._saved_tracer
with self.assertRaisesRegex(
UnexpectedTracerError,
re.compile(
"Encountered an unexpected tracer.*Tracer not among input tracers",
re.DOTALL)):
api.jit(func1)(2.)
def test_escaped_tracer_omnistaging(self):
count = 1
@jit
def f():
nonlocal count
count = jnp.add(count, 1)
f() # leaked a tracer! but currently undetected
def f(x, c):
jnp.add(count, 1)
return None, None
@jit
def g():
lax.scan(f, None, None, length=2)
with self.assertRaisesRegex(UnexpectedTracerError,
"was created on line"):
g()
def test_escaped_tracer_omnistaging_top_trace(self):
count = 1
def f(_, __):
nonlocal count
count = jnp.add(count, 1)
return None, None
lax.scan(f, None, None, length=2) # leaked a tracer! (of level 1!)
with self.assertRaisesRegex(UnexpectedTracerError,
"was created on line"):
# The following call will try and raise the ones array to the count tracer
# level, which is no longer live.
jax.jit(jnp.add)(jnp.ones(()), count)
def test_escaped_tracer_transform_name(self):
with self.assertRaisesRegex(UnexpectedTracerError,
"for jit"):
jax.jit(self.helper_save_tracer)(1)
_ = self._saved_tracer+1
with self.assertRaisesRegex(UnexpectedTracerError,
"for pmap"):
jax.pmap(self.helper_save_tracer)(jnp.ones((1, 2)))
_ = self._saved_tracer+1
with self.assertRaisesRegex(UnexpectedTracerError,
"for eval_shape"):
jax.eval_shape(self.helper_save_tracer, 1)
_ = self._saved_tracer+1
def test_escaped_tracer_shape_dtype(self):
with self.assertRaisesRegex(core.UnexpectedTracerError,
r"shape \(4, 3\) and dtype int32"):
jax.jit(self.helper_save_tracer)(jnp.ones((4, 3), dtype=jnp.int32))
_ = self._saved_tracer+1
def test_pmap_static_kwarg_error_message(self):
# https://github.com/google/jax/issues/3007
def f(a, b):
return a + b
g = jax.pmap(f, static_broadcasted_argnums=(1,))
msg = (r"pmapped function has static_broadcasted_argnums=\(1,\) but was "
r"called with only 1 positional argument. All static broadcasted "
r"arguments must be passed positionally.")
with self.assertRaisesRegex(ValueError, msg):
g(jnp.ones((1, 1)), b=1)
def test_vmap_unmapped_last(self):
@partial(jax.vmap, out_axes=-1)
def f(x):
return np.zeros((2,))
f(np.zeros((5,)))
# TODO(jakevdp): re-enable this if possible.
@unittest.skipIf(True, "broken by convert_element_type change.")
def test_xla_constant_dedup(self):
y = np.array([7, 14], dtype=np.float32)
def f(x):
return x + y + y
x = np.array([1, 2], dtype=np.float32)
hlo_lines = jax.xla_computation(f)(x).as_hlo_text().split('\n')
hlo_lines = set([s.strip() for s in hlo_lines])
self.assertIn('constant.1 = f32[2]{0} constant({7, 14})', hlo_lines)
self.assertNotIn('constant.2 = f32[2]{0} constant({7, 14})', hlo_lines)
def test_eval_context(self):
@jit
def f():
with core.eval_context():
assert jnp.add(1, 1) == 2
f() # doesn't crash
def test_concrete_error_because_arg_unary(self):
@jax.jit
def f(x):
if x > 0:
return x
else:
return 0
msg = r"on the value of the argument 'x'"
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f(1)
def test_concrete_error_because_arg_binary(self):
@jax.jit
def f(x, y):
if x > y:
return x
else:
return y
msg = r"on the values of the arguments 'x' and 'y'"
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f(1, 2)
def test_concrete_error_because_arg_ternary(self):
@jax.jit
def f(x, y, z):
if x > z:
return x
else:
return y
msg = r"on the values of the arguments 'x' and 'z'"
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f(1, 2, 3)
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f(1, 2, z=3)
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f(1, y=2, z=3)
def test_concrete_error_because_arg_varargs(self):
@jax.jit
def f(*args):
x, y, z = args
if x > z:
return x
else:
return y
msg = r"on the values of the argument 'args'"
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f(1, 2, 3)
def test_concrete_error_because_arg_kwargs(self):
@jax.jit
def f(**kwargs):
x, y, z = kwargs['x'], kwargs['y'], kwargs['z']
if x > z:
return x
else:
return y
msg = r"on the values of the argument 'kwargs'"
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f(x=1, y=2, z=3)
def test_concrete_error_because_arg_pytree(self):
@jax.jit
def f(xy, z):
x, y = xy
if x > 0:
return x
else:
return y
msg = r"on the value of the argument 'xy'"
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f((1, 2), z=3)
def test_concrete_error_because_const(self):
@jax.jit
def f():
assert jnp.add(1, 1) > 0
msg = "on these lines"
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f()
def test_xla_computation_zeros_doesnt_device_put(self):
with jtu.count_device_put() as count:
api.xla_computation(lambda: jnp.zeros(3))()
self.assertEqual(count[0], 0)
def test_join_concrete_arrays_with_omnistaging(self):
# https://github.com/google/jax/issues/4622
x = jnp.array([1., 2., 3.])
y = jnp.array([1., 2., 4.])
@jit
def f():
core.lattice_join(core.ConcreteArray(x), core.ConcreteArray(y))
f() # doesn't crash
def test_linearize_aval_error(self):
# https://github.com/google/jax/issues/4622
f = lambda x: x
# these should not error
_, f_jvp = api.linearize(f, 1.)
f_jvp(1.)
_, f_jvp = api.linearize(f, np.ones(2, np.int32))
f_jvp(np.zeros(2, float0))
# these should error
_, f_jvp = api.linearize(f, 1.)
with self.assertRaisesRegex(ValueError, "tangent values inconsistent"):
f_jvp(1)
_, f_jvp = api.linearize(f, np.ones(2, np.int32))
with self.assertRaisesRegex(ValueError, "tangent values inconsistent"):
f_jvp(np.ones(2, np.int32))
def test_grad_of_token_consuming_primitive(self):
# https://github.com/google/jax/issues/5463
tokentest_p = core.Primitive("tokentest")
tokentest_p.def_impl(partial(xla.apply_primitive, tokentest_p))
tokentest_p.def_abstract_eval(lambda x, y: x)
xla.translations[tokentest_p] = lambda c, x, y: x
ad.defjvp(tokentest_p, (lambda g, x, token: x), None)
token = jax.lax.create_token(123)
arr = jnp.ones((3, 2))
res, vjp_fun = jax.vjp(lambda x: tokentest_p.bind(x, token), arr)
# Should not crash.
vjp_fun(arr)
def test_jit_returning_token(self):
x = jax.jit(jax.lax.create_token)(1.0)
self.assertIsInstance(x, jax.interpreters.xla.Token)
def test_leak_checker_catches_a_jit_leak(self):
with jax.checking_leaks():
lst = []
@jit
def f(x):
lst.append(x)
return x
with self.assertRaisesRegex(Exception, r"Leaked"):
f(3)
def test_leak_checker_catches_a_pmap_leak(self):
with jax.checking_leaks():
lst = []
@api.pmap
def f(x):
lst.append(x)
return x
with self.assertRaisesRegex(Exception, r"Leaked"):
f(np.ones(1))
def test_leak_checker_catches_a_grad_leak(self):
with jax.checking_leaks():
lst = []
def f(x):
lst.append(x)
return x
with self.assertRaisesRegex(Exception, r"Leaked trace"):
api.grad(f)(3.)
def test_leak_checker_avoids_false_positives(self):
with jax.checking_leaks():
@jit
def f(x):
return x
f(3) # doesn't crash
api.vmap(f)(np.arange(3)) # doesn't crash
api.grad(f)(3.) # doesn't crash
@api.pmap
def f(x):
return x
f(np.ones(1)) # doesn't crash
api.vmap(f)(np.ones((1, 1))) # doesn't crash
def test_leak_checker_catches_a_scan_leak(self):
with jax.checking_leaks():
lst = []
to_scan = lambda c, x: (lst.append(c) or jnp.sin(c), None)
with self.assertRaisesRegex(Exception, r"Leaked trace"):
lax.scan(to_scan, 1., np.arange(3.))
def test_leak_checker_avoids_false_positives_scan(self):
with jax.checking_leaks():
to_scan = lambda c, x: (jnp.sin(c), None)
lax.scan(to_scan, 1., np.arange(3.)) # doesn't crash
def test_leak_checker_avoids_false_positives_scan_jvp(self):
with jax.checking_leaks():
to_scan = lambda c, x: (c, None)
def f(x):
lax.scan(to_scan, x, None, length=1)
api.jvp(f, (3.,), (1.,)) # doesn't crash
def test_leak_checker_avoids_false_positives_scan_vmap(self):
with jax.checking_leaks():
to_scan = lambda c, _: (1., None)
@api.vmap
def f(x):
lax.scan(to_scan, x, None, length=1)
f(np.arange(5.)) # doesn't crash
def test_leak_checker_avoids_false_positives_scan_vmap_2(self):
with jax.checking_leaks():
to_scan = lambda c, _: (c, None)
@api.vmap
def f(x):
lax.scan(to_scan, x, None, length=1)
f(np.arange(5.)) # doesn't crash
def test_leak_checker_catches_a_sublevel_leak(self):
with jax.checking_leaks():
@jit
def f(x):
lst = []
@jit
def g(x):
lst.append(x)
return x
x = g(x)
return x
with self.assertRaisesRegex(Exception, r"Leaked sublevel"):
f(3)
def test_leak_checker_avoids_false_positive_custom_jvp(self):
# see https://github.com/google/jax/issues/5636
with jax.checking_leaks():
@api.custom_jvp
def t(y):
return y
def t_jvp(p, t):
pass
t.defjvp(t_jvp)
@jit
def s(y):
return t(y)
s(3) # doesn't crash
def test_default_backend(self):
first_local_device = api.local_devices()[0]
self.assertEqual(first_local_device.platform, api.default_backend())
def test_dunder_jax_array(self):
# https://github.com/google/jax/pull/4725
class AlexArray:
def __init__(self, jax_val):
self.jax_val = jax_val
def __jax_array__(self):
return self.jax_val
dtype = property(lambda self: self.jax_val.dtype)
shape = property(lambda self: self.jax_val.shape)
x = AlexArray(jnp.array([1., 2., 3.]))
y = jnp.sin(x)
self.assertAllClose(y, jnp.sin(jnp.array([1., 2., 3.])))
y = api.grad(api.jit(lambda x: jnp.sin(x).sum()))(x)
self.assertAllClose(y, jnp.cos(jnp.array([1., 2., 3.])))
x = AlexArray(jnp.array([[1., 2., 3.]]))
y = api.pmap(jnp.sin)(x)
self.assertAllClose(y, jnp.sin(jnp.array([[1., 2., 3.]])))
x = jnp.array(1)
a = AlexArray(x)
for f in [jnp.isscalar, jnp.size, jnp.shape, jnp.dtype]:
self.assertEqual(f(x), f(a))
def test_constant_handler_mro(self):
# https://github.com/google/jax/issues/6129
class Foo(enum.IntEnum):
bar = 1
@api.pmap
def f(_):
return Foo.bar
ans = f(jnp.arange(1)) # doesn't crash
expected = jnp.arange(1) + 1
self.assertAllClose(ans, expected)
def test_large_python_ints(self):
with self.assertRaises(OverflowError):
jnp.multiply(2 ** 100, 3.)
out = lax.convert_element_type(2 ** 100, jnp.float32) # doesn't crash
self.assertArraysEqual(out, np.float32(2 ** 100))
def test_dot_precision_context_manager(self):
x = jnp.zeros((2, 2))
with jax.default_matmul_precision(None):
jnp.dot(x, x) # doesn't crash
jaxpr = jax.make_jaxpr(jnp.dot)(x, x)
self.assertIn('precision=None', str(jaxpr))
with jax.default_matmul_precision("bfloat16"):
x @ x # doesn't crash
jaxpr = jax.make_jaxpr(op.matmul)(x, x)
self.assertIn('Precision.DEFAULT', str(jaxpr))
with jax.default_matmul_precision("tensorfloat32"):
jnp.dot(x, x) # doesn't crash
jaxpr = jax.make_jaxpr(jnp.dot)(x, x)
self.assertIn('Precision.HIGH', str(jaxpr))
with jax.default_matmul_precision("float32"):
jnp.dot(x, x) # doesn't crash
jaxpr = jax.make_jaxpr(jnp.dot)(x, x)
self.assertIn('Precision.HIGHEST', str(jaxpr))
dot = partial(jnp.dot, precision=lax.Precision.HIGHEST)
with jax.default_matmul_precision("tensorfloat32"):
dot(x, x) # doesn't crash
jaxpr = jax.make_jaxpr(dot)(x, x)
self.assertIn('Precision.HIGHEST', str(jaxpr))
def test_dot_precision_flag(self):
x = jnp.zeros((2, 2))
prev_val = config._read("jax_default_matmul_precision")
try:
config.FLAGS.jax_default_matmul_precision = "tensorfloat32"
jnp.dot(x, x) # doesn't crash
jaxpr = jax.make_jaxpr(jnp.dot)(x, x)
finally:
config.FLAGS.jax_default_matmul_precision = prev_val
self.assertIn('Precision.HIGH', str(jaxpr))
self.assertEqual(prev_val, config._read("jax_default_matmul_precision"))
prev_val = config._read("jax_default_matmul_precision")
try:
config.update('jax_default_matmul_precision','tensorfloat32')
jnp.dot(x, x) # doesn't crash
jaxpr = jax.make_jaxpr(jnp.dot)(x, x)
finally:
config.update('jax_default_matmul_precision', prev_val)
self.assertIn('Precision.HIGH', str(jaxpr))
self.assertEqual(prev_val, config._read("jax_default_matmul_precision"))
def test_dot_precision_forces_retrace(self):
num_traces = 0
def g(x):
nonlocal num_traces
num_traces += 1
return jnp.dot(x, x)
def f_cond(x):
return lax.cond(True, g, g, x)
@jax.jit
def f_jit(x):
nonlocal num_traces
num_traces += 1
return jnp.dot(x, x)
for f in [f_jit, f_cond]:
precision = config.jax_default_matmul_precision
try:
num_traces = 0
x = jnp.zeros((2, 2))
f(x)
self.assertEqual(num_traces, 1)
f(x)
self.assertEqual(num_traces, 1)
with jax.default_matmul_precision("tensorfloat32"):
f(x)
self.assertEqual(num_traces, 2)
FLAGS.jax_default_matmul_precision = "float32"
f(x)
self.assertGreaterEqual(num_traces, 2)
nt = num_traces
f(x)
self.assertEqual(num_traces, nt + 1)
f(x)
self.assertEqual(num_traces, nt + 1)
finally:
FLAGS.jax_default_matmul_precision = precision
def test_rank_promotion_forces_retrace(self):
num_traces = 0
def g(x):
nonlocal num_traces
num_traces += 1
return x + x
def f_cond(x):
return lax.cond(True, g, g, x)
@jax.jit
def f_jit(x):
nonlocal num_traces
num_traces += 1
return x + x
for f in [f_jit, f_cond]:
allow_promotion = config.jax_numpy_rank_promotion
try:
num_traces = 0
@jax.jit
def f(x):
nonlocal num_traces
num_traces += 1
return x + x
x = jnp.zeros((2, 2))
f(x)
self.assertEqual(num_traces, 1)
f(x)
self.assertEqual(num_traces, 1)
with jax.numpy_rank_promotion("warn"):
f(x)
self.assertEqual(num_traces, 2)
FLAGS.jax_numpy_rank_promotion = "raise"
f(x)
self.assertGreaterEqual(num_traces, 2)
nt = num_traces
f(x)
self.assertEqual(num_traces, nt + 1)
f(x)
self.assertEqual(num_traces, nt + 1)
finally:
FLAGS.jax_numpy_rank_promotion = allow_promotion
def test_backward_pass_ref_dropping(self):
refs = []
@api.custom_vjp
def f(x):
return x
def f_fwd(x):
return x, None
def f_rev(_, g):
assert len(refs) != 2 or refs[0]() is None
zero = np.zeros(())
refs.append(weakref.ref(zero))
return (zero,)
f.defvjp(f_fwd, f_rev)
api.grad(lambda x: f(f(f(x))))(1.)
def test_custom_vjp_scan_batching_edge_case(self):
# https://github.com/google/jax/issues/5832
@jax.custom_vjp
def mul(x, coeff): return x * coeff
def mul_fwd(x, coeff): return mul(x, coeff), (x, coeff)
def mul_bwd(res, g):
x, coeff = res
g_x = g * coeff
g_coeff = (x * g).sum()
return g_x, g_coeff
mul.defvjp(mul_fwd, mul_bwd)
def scan_over_mul(x, coeff):
def f_(x, t):
return mul(x, coeff), None
y, _ = jax.lax.scan(f_, x, jnp.arange(3))
return y
key = jax.random.PRNGKey(0)
key1, key2 = jax.random.split(key, 2)
x_batch = jax.random.normal(key1, (3, 2))
covector_batch = jax.random.normal(key2, (3, 2))
coeff = jnp.array(1.)
batched_scan_over_mul = jax.vmap(scan_over_mul, in_axes=(0, None), out_axes=0)
res, vjp_fun = jax.vjp(batched_scan_over_mul, x_batch, coeff)
vjp_fun(covector_batch) # doesn't crash
jtu.check_grads(batched_scan_over_mul, (x_batch, coeff), order=2,
modes=['rev'])
def test_jit_inline(self):
@partial(api.jit, inline=False)
def f(x):
return x * 2
jaxpr = api.make_jaxpr(f)(3)
self.assertIn('xla_call', str(jaxpr))
@partial(api.jit, inline=True)
def f(x):
return x * 2
jaxpr = api.make_jaxpr(f)(3)
self.assertNotIn('xla_call', str(jaxpr))
# Repro for https://github.com/google/jax/issues/7229.
def test_compute_with_large_transfer(self):
def f(x, delta):
return x + jnp.asarray(delta, x.dtype)
# A large and potentially unaligned array to trigger non-zero-copy and
# async device array copy.
xs = np.random.uniform(0., 1., size=(10, 131, 111, 3)).astype(np.float32)
for x in xs:
delta = np.random.uniform(-0.5, 0.5, size=())
jitted_f = api.jit(f)
np.testing.assert_allclose(jitted_f(x, delta), f(x, delta))
def test_vjp_fun_jit(self):
# test that the function returned by vjp can be returned
# from and passed to jitted functions
f = lambda x: 2. * x
@partial(jit, static_argnums=0)
def linearize_vjp(f, x):
_, vjp_fun = api.vjp(f, x)
return vjp_fun
linearized = linearize_vjp(f, 1.)
actual = jit(lambda f, x: f(x))(linearized, 3.)
expected = (6.,)
self.assertEqual(actual, expected)
def test_linearize_fun_jit(self):
# test that the function returned by linearize can be returned
# from and passed to jitted functions
f = lambda x: 2. * x
@partial(jit, static_argnums=0)
def linearize(f, x):
_, jvp_fun = api.linearize(f, x)
return jvp_fun
linearized = linearize(f, 1.)
actual = jit(lambda f, x: f(x))(linearized, 3.)
expected = 6.
self.assertEqual(actual, expected)
def test_linear_transpose_fun_jit(self):
# test that the function returned by linear_transpose can be returned
# from and passed to jitted functions
f = lambda x: 2. * x
@partial(jit, static_argnums=0)
def transpose(f, x):
return api.linear_transpose(f, x)
transposed = transpose(f, 1.)
actual = jit(lambda f, x: f(x))(transposed, 3.)
expected = (6.,)
self.assertEqual(actual, expected)
def test_leaked_tracer_issue_7613(self):
# from https://github.com/google/jax/issues/7613
import numpy.random as npr
def sigmoid(x):
return 1. / (1. + jnp.exp(-x))
x = jnp.ones((50,))
A = jnp.array(npr.randn(50, 50))
@jax.jit
def loss(A, x):
h = jax.nn.sigmoid(A * x)
return jnp.sum((h - x)**2)
with jax.checking_leaks():
_ = jax.grad(loss)(A, x) # doesn't crash
def test_vmap_caching(self):
# https://github.com/google/jax/issues/7621
f = lambda x: jnp.square(x).mean()
jf = jax.jit(f)
x = jax.random.uniform(jax.random.PRNGKey(0), shape=(8, 4))
with jtu.count_jit_and_pmap_compiles() as count: # noqa: F841
for _ in range(5):
jax.hessian(jf)(x).block_until_ready()
n = count[0]
# The exact number of compilations may vary depending on the number of
# jit decorators in the function above, but it should not grow after an
# initial warmup phase.
for _ in range(5):
jax.hessian(jf)(x).block_until_ready()
self.assertEqual(count[0], n)
def test_jnp_array_doesnt_device_put(self):
with jtu.count_device_put() as count:
api.make_jaxpr(lambda: jnp.array(3))()
self.assertEqual(count[0], 0)
class RematTest(jtu.JaxTestCase):
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', api.remat),
('_policy', partial(api.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_basic(self, remat):
@remat
def g(x):
return lax.sin(lax.sin(x)), 3.
def f(x):
x, _ = g(x)
return x
ans = f(2.)
expected = np.sin(np.sin(2.))
self.assertAllClose(ans, expected, check_dtypes=False)
ans, f_lin = api.linearize(f, 2.)
expected = np.sin(np.sin(2.))
self.assertAllClose(ans, expected, check_dtypes=False)
ans = f_lin(3.)
expected = np.cos(np.sin(2.)) * np.cos(2.) * 3.
self.assertAllClose(ans, expected, check_dtypes=False)
sin_calls = []
cos_calls = []
sin_impl = lax.sin_p.impl
cos_impl = lax.cos_p.impl
try:
lax.sin_p.def_impl(lambda x: sin_calls.append(1) or sin_impl(x))
lax.cos_p.def_impl(lambda x: cos_calls.append(1) or cos_impl(x))
f_lin(3.)
finally:
lax.sin_p.def_impl(sin_impl)
lax.cos_p.def_impl(cos_impl)
self.assertEqual(len(sin_calls), 1)
self.assertEqual(len(cos_calls), 2)
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', api.remat),
('_policy', partial(api.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_freevars(self, remat):
def f1(x):
y = 2 * jnp.sin(x)
z = jnp.cos(x) * jnp.sin(y)
return z
def f2(x):
y = 2 * jnp.sin(x)
z = remat(lambda x: jnp.cos(x) * jnp.sin(y))(x)
return z
ans, f_lin = api.linearize(f2, 2.)
expected, f_lin_expected = api.linearize(f1, 2.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = f_lin(3.)
expected = f_lin_expected(3.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_remat_grad_python_control_flow(self):
@partial(api.remat, concrete=True)
def g(x):
if x > 0:
return lax.sin(x), 3.
else:
return lax.cos(x), 4.
def f(x):
x, _ = g(x)
return x
ans = f(2.)
expected = np.sin(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(f)(2.)
expected = np.cos(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', api.remat),
('_policy', partial(api.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_jit(self, remat):
@remat
def g(x):
return lax.sin(lax.sin(x))
def f_(x):
return g(x)
f = api.jit(f_)
ans = f(2.)
expected = np.sin(np.sin(2.))
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(f)(2.)
expected = np.cos(np.sin(2.)) * np.cos(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jit(api.grad(f_))(2.)
expected = np.cos(np.sin(2.)) * np.cos(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', api.remat),
('_policy', partial(api.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_vmap(self, remat):
@remat
def g(x):
return lax.sin(lax.sin(x))
x = np.arange(3.)
ans = api.vmap(g)(x)
expected = np.sin(np.sin(x))
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jacfwd(g)(x)
expected = np.diag(np.cos(np.sin(x)) * np.cos(x))
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jacrev(g)(x)
expected = np.diag(np.cos(np.sin(x)) * np.cos(x))
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', api.remat),
('_policy', partial(api.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_higher_order_autodiff(self, remat):
def f(x):
return lax.cos(lax.sin(x))
g = remat(f)
ans = api.grad(api.grad(g))(3.)
expected = api.grad(api.grad(f))(3.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_remat_scan(self):
to_scan = lambda c, x: (jnp.sin(c), None)
def f_noremat(x):
y, _ = lax.scan(to_scan, x, np.arange(3.))
return y
def f_yesremat(x):
y, _ = lax.scan(api.remat(to_scan), x, np.arange(3.))
return y
ans = f_yesremat(4.)
expected = f_noremat(4.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(f_yesremat)(4.)
expected = api.grad(f_noremat)(4.)
self.assertAllClose(ans, expected, check_dtypes=False)
jaxpr = api.make_jaxpr(api.linearize(f_yesremat, 4.)[1])(1.)
scan_eqn, = jaxpr.jaxpr.eqns
self.assertIn(' cos ', str(scan_eqn.params['jaxpr']))
jaxpr = api.make_jaxpr(api.vjp(f_yesremat, 4.)[1])(1.)
scan_eqn, = jaxpr.jaxpr.eqns
self.assertIn(' cos ', str(scan_eqn.params['jaxpr']))
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', api.remat),
('_policy', partial(api.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_no_redundant_flops(self, remat):
# see https://github.com/google/jax/pull/1749#issuecomment-558267584
@api.jit
def g(x):
return f(2., x)
@remat
def f(x, y):
return jnp.sin(x) * y
# We swap out sin_p's impl rule to count how many times it's invoked
called = []
sin_impl = lax.sin_p.impl
try:
lax.sin_p.def_impl(lambda x: called.append(1) or sin_impl(x))
api.grad(g)(3.)
finally:
lax.sin_p.def_impl(sin_impl)
num_calls = len(called)
self.assertLessEqual(num_calls, 1)
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', api.remat),
('_policy', partial(api.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_binomial_checkpointing(self, remat):
def binom_checkpoint(funs):
if len(funs) == 1:
return funs[0]
else:
f1 = binom_checkpoint(funs[:len(funs)//2])
f2 = binom_checkpoint(funs[len(funs)//2:])
return remat(lambda x: f1(f2(x)))
f1 = binom_checkpoint([jnp.sin, jnp.sin, jnp.sin, jnp.sin])
f2 = lambda x: jnp.sin(jnp.sin(jnp.sin(jnp.sin(x))))
x = 4.
self.assertAllClose(f1(x), f2(x), check_dtypes=False)
self.assertAllClose(api.grad(f1)(x), api.grad(f2)(x), check_dtypes=False)
def test_remat_symbolic_zeros(self):
# code from https://github.com/google/jax/issues/1907
key = jax.random.PRNGKey(0)
key, split = jax.random.split(key)
n = 5
def func(D0):
def shift(R, dR, **unused_kwargs):
return R + dR
def apply_fn(R):
return D0 * R
Rinit = jax.random.uniform(split, (n,3), minval=0.0, maxval=5.0,
dtype=jnp.float32)
def move(R,i):
F = apply_fn(R)
return shift(R, 0.001 * F), jnp.array([0.])
move = api.remat(move)
R, temp = lax.scan(move, Rinit, jnp.arange(2))
return R[0, 0]
api.grad(func)(5.0) # doesn't crash
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', api.remat),
('_policy', partial(api.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_jit2(self, remat):
@api.jit
def f(x):
y = 2 * x
@remat
def g():
return y
return g()
self.assertAllClose(f(3), 6, check_dtypes=False)
def test_remat_nontrivial_env(self):
# simplified from https://github.com/google/jax/issues/2030
@api.remat
def foo(state, dt=0.5, c=1):
u, u_t = state
u_tt = c**2 * u
u_t = u_t + u_tt * dt
return (u, u_t)
@partial(api.jit, static_argnums=(1,))
def _multi_step(state, count, dt, c):
f = lambda s, _: (foo(s, dt, c), _)
return lax.scan(f, state, None, count)
def multi_step(state, count, dt=1/jnp.sqrt(2), c=1):
return _multi_step(state, count, dt, c)
def loss(u0, target, steps, dt=1/jnp.sqrt(2), c=1):
init = (u0, jnp.zeros_like(u0))
(uf, _), _ = multi_step(init, steps, dt, c)
return ((uf - target) ** 2).mean()
target = jnp.zeros((128, 128))
u0 = jnp.ones_like(target)
loss(u0, target, 10) # doesn't crash
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', api.remat),
('_policy', partial(api.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_jit3(self, remat):
# https://github.com/google/jax/issues/2180
def f(w, x):
a = jnp.dot(x, w)
b = jnp.einsum("btd,bTd->btT", a, a)
c = jnp.einsum("btT,btd->btd", b, a)
return jnp.sum(c)
w = jnp.ones([1, 1])
x = jnp.ones([1, 1, 1])
f = remat(f)
api.grad(f)(w, x) # doesn't crash
@api.jit
def mul(a, b):
return a * b
def f(w, x):
a = mul(w, x)
b = mul(a, a)
return b
w = 1.
x = 1.
f = remat(f)
api.grad(f)(w, x) # doesn't crash
def test_remat_scan2(self):
# https://github.com/google/jax/issues/1963
def scan_bug(x0):
f = lambda x, _: (x + 1, None)
def scanned_f(x, _):
return lax.scan(f, x, xs=None, length=1)[0], None
x, _ = jax.remat(scanned_f)(x0, None)
return x
jax.grad(scan_bug)(1.0) # doesn't crash
def test_remat_jit_static_argnum_omnistaging(self):
# https://github.com/google/jax/issues/2833
# NOTE(mattjj): after #3370, this test doesn't actually call remat...
def named_call(f):
def named_f(*args):
f_ = lu.wrap_init(lambda: (f(*args),))
out, = core.call_p.bind(f_)
return out
return named_f
def f(a_bool, y):
if a_bool:
return y + 1
else:
return y
api.jit(named_call(f), static_argnums=0)(True, 1) # no crash
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', api.remat),
('_policy', partial(api.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_eval_counter(self, remat):
# https://github.com/google/jax/issues/2737
add_one_p = Primitive('add_one')
add_one = add_one_p.bind
num_evals = 0
@contextmanager
def assertEvals(n):
start = num_evals
yield
assert num_evals - start == n
def add_one_impl(x):
nonlocal num_evals
num_evals += 1
return x + 1
add_one_p.def_impl(add_one_impl)
def add_one_jvp(pin, tin):
pout = add_one(pin[0])
return pout, pout * tin[0]
ad.primitive_jvps[add_one_p] = add_one_jvp
add_one_p.def_abstract_eval(lambda x: x)
v = np.zeros((1,))
f = remat(add_one)
g = remat(lambda x: add_one(f(x)))
# 2 calls needed to evaluate g
with assertEvals(2):
_, vjp = jax.vjp(g, v)
# 2 calls made while transposing g, 1 call made while transposing f
with assertEvals(3):
vjp(v)
@jax._src.util.curry
def call(f, *args):
return jax.core.call(
jax.linear_util.wrap_init(lambda *args: [f(*args)]),
*args, name='foo')[0]
f = call(add_one)
g = remat(lambda x: add_one(f(x)))
# 2 calls needed to evaluate g
with assertEvals(2):
_, vjp = jax.vjp(g, v)
# 2 calls made while transposing g, no reevaluation for transposition of f
with assertEvals(2):
vjp(v)
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', api.remat),
('_policy', partial(api.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_escaped_tracer_remat(self, remat):
# b/169779185
def f():
seq = [jnp.zeros([])]
def g():
seq[0] += 1 # this is line 7 btw
return seq[0]
remat(g)()
remat(g)()
with self.assertRaisesRegex(UnexpectedTracerError, "global state"):
api.jit(f)()
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', api.remat),
('_policy', partial(api.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_no_cse_widget_on_primals(self, remat):
@remat
def g(x):
return lax.sin(lax.sin(x)), 3.
def f(x):
x, _ = g(x)
return x
c = api.xla_computation(f)(2.)
self.assertNotIn('while', c.as_hlo_text())
self.assertNotIn('conditional', c.as_hlo_text())
c = api.xla_computation(grad(f))(2.)
text = c.as_hlo_text()
self.assertTrue('while' in text or 'conditional' in text)
def test_no_cse_widget_with_prevent_cse_false(self):
@partial(api.remat, prevent_cse=False)
def g(x):
return lax.sin(lax.sin(x)), 3.
def f(x):
x, _ = g(x)
return x
c = api.xla_computation(f)(2.)
self.assertNotIn('while', c.as_hlo_text())
self.assertNotIn('conditional', c.as_hlo_text())
c = api.xla_computation(grad(f))(2.)
self.assertNotIn('while', c.as_hlo_text())
self.assertNotIn('conditional', c.as_hlo_text())
@parameterized.named_parameters(
{"testcase_name": f"_{policy_name}", "policy": policy,
"in_jaxpr2": in_jaxpr2, "not_in_jaxpr2": not_in_jaxpr2}
for policy_name, policy, in_jaxpr2, not_in_jaxpr2 in [
('save_anything', lambda *_, **__: True, [], [' sin ', ' cos ']),
('save_nothing', lambda *_, **__: False, [' sin ', ' cos '], []),
('save_sin', lambda p, *_, **__: str(p) == 'sin', [' cos '], [' sin ']),
])
def test_remat_custom_policy(self, policy, in_jaxpr2, not_in_jaxpr2):
for square in [lambda x: x * x, api.jit(lambda x: x * x)]:
f = api.remat(lambda x: jnp.sin(square(jnp.sin(x))),
policy=policy)
y, f_lin = api.linearize(f, 1.)
ydot = f_lin(2.)
jaxpr_text = str(f_lin.func.args[0])
for substr in in_jaxpr2:
self.assertIn(substr, jaxpr_text)
for substr in not_in_jaxpr2:
self.assertNotIn(substr, jaxpr_text)
y_expected, ydot_expected = api.jvp(lambda x: jnp.sin(square(jnp.sin(x))),
[1.], [2.])
self.assertAllClose(y, y_expected)
self.assertAllClose(ydot, ydot_expected)
jtu.check_grads(f, (3.,), order=2, modes=['fwd', 'rev'])
def test_remat_custom_policy_save_cos(self):
save_cos = lambda prim, *_, **__: str(prim) == 'cos'
f = api.remat(lambda x: jnp.sin(jnp.sin(x)), # different function
policy=save_cos)
_, f_lin = api.linearize(f, 1.)
jaxpr_text = str(f_lin.func.args[0])
self.assertNotIn(' sin ', jaxpr_text)
self.assertNotIn(' cos ', jaxpr_text)
jtu.check_grads(f, (3.,), order=2, modes=['fwd', 'rev'])
def test_remat_checkpoint_dots(self):
@partial(api.remat, policy=jax.checkpoint_policies.checkpoint_dots)
def f(x):
x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x)
x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x)
x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x)
return x
_, f_lin = api.linearize(f, jnp.ones((2, 2)))
jaxpr_text = str(f_lin.func.args[0])
self.assertEqual(jaxpr_text.count(' sin '), 2)
self.assertEqual(jaxpr_text.count(' dot_'), 6)
jtu.check_grads(f, (jnp.ones((2, 2)),), order=2, modes=['fwd', 'rev'])
def test_remat_checkpoint_dots_with_no_batch_dims(self):
@partial(api.remat, policy=jax.checkpoint_policies.checkpoint_dots_with_no_batch_dims)
def f(x):
x = jnp.einsum('ij,jk->ik', x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x)
x = jnp.einsum('ij,jk->ik', x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x)
x = jnp.einsum('ij,jk->ik', x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x)
return x
_, f_lin = api.linearize(f, jnp.ones((2, 2)))
jaxpr_text = str(f_lin.func.args[0])
self.assertEqual(jaxpr_text.count(' sin '), 2)
self.assertEqual(jaxpr_text.count(' dot_general'), 6)
jtu.check_grads(f, (jnp.ones((2, 2)),), order=2, modes=['fwd', 'rev'])
def test_remat_checkpoint_dots_with_no_batch_dims2(self):
@partial(api.remat, policy=jax.checkpoint_policies.checkpoint_dots_with_no_batch_dims)
def f(x):
x = jnp.einsum('nij,njk->nik', x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x)
x = jnp.einsum('nij,njk->nik', x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x)
x = jnp.einsum('nij,njk->nik', x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x)
return x
_, f_lin = api.linearize(f, jnp.ones((3, 2, 2)))
jaxpr_text = str(f_lin.func.args[0])
self.assertEqual(jaxpr_text.count(' sin '), 2)
self.assertEqual(jaxpr_text.count(' dot_general'), 9)
jtu.check_grads(f, (jnp.ones((3, 2, 2)),), order=2, modes=['fwd', 'rev'])
def test_remat_checkpoint_dots_jit(self):
@api.jit
@partial(api.remat, policy=jax.checkpoint_policies.checkpoint_dots)
def f(x):
x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x * 1e-3)
x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x * 1e-3)
x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x * 1e-3)
return x
_, f_lin = api.linearize(f, jnp.ones((2, 2)))
jaxpr_text = str(f_lin.func.args[0])
self.assertEqual(jaxpr_text.count(' sin '), 2)
self.assertEqual(jaxpr_text.count(' dot_'), 6)
jtu.check_grads(f, (jnp.ones((2, 2)),), order=2, modes=['fwd', 'rev'])
def test_remat_checkpoint_dots_inside_scan(self):
x = jnp.ones((5,))
def f(W):
@partial(api.remat, policy=jax.checkpoint_policies.checkpoint_dots)
def f(x):
x = jnp.sin(jnp.dot(x, W, precision=lax.Precision.HIGHEST))
x = jnp.sin(jnp.dot(x, W, precision=lax.Precision.HIGHEST))
x = jnp.sin(jnp.dot(x, W, precision=lax.Precision.HIGHEST))
return x
def body(x, _): return f(x), None
return lax.scan(body, x, None, length=2)[0]
_, f_vjp = api.vjp(f, jnp.ones((5, 5)))
jaxpr_text = str(f_vjp.args[0].func.args[1])
# Two sine calls in the backward pass because while we don't save sines
# within the (rematted) body function, we can save the scan carry, which
# effectively saves one sine. Three cosines for the Jacoian coefficients.
self.assertEqual(jaxpr_text.count(' sin '), 2)
self.assertEqual(jaxpr_text.count(' cos '), 3)
# Six calls to dot_general in the backward pass because we save the primal
# matmuls and only compure the backward pass ones (two for each primal one).
self.assertEqual(jaxpr_text.count(' dot_'), 6)
jtu.check_grads(api.jit(f), (jnp.ones((5, 5)),), order=2,
modes=['fwd', 'rev'])
def test_remat_custom_jvp_policy(self):
@api.custom_jvp
def sin(x):
return jnp.sin(x)
def sin_jvp(primals, tangents):
x, = primals
g, = tangents
return sin(x), jnp.cos(x) * g
sin.defjvp(sin_jvp)
@partial(api.remat, policy=jax.checkpoint_policies.checkpoint_dots)
def f(x):
x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)
x = sin(x * 1e-3)
x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)
x = sin(x * 1e-3)
x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)
x = sin(x * 1e-3)
return x
jtu.check_grads(f, (3.,), order=2, modes=['fwd', 'rev'])
def g(x):
return lax.scan(lambda x, _: (f(x), None), x, None, length=2)[0]
jtu.check_grads(g, (3.,), order=2, modes=['fwd', 'rev'])
def test_remat_custom_vjp_policy(self):
@api.custom_vjp
def sin(x):
return jnp.sin(x)
def sin_fwd(x):
return sin(x), x
def sin_bwd(x, y_bar):
return (jnp.cos(x) * y_bar,)
sin.defvjp(sin_fwd, sin_bwd)
@partial(api.remat, policy=jax.checkpoint_policies.checkpoint_dots)
def f(x):
@partial(api.named_call, name="dot")
def dot2(y, z):
return jnp.dot(x, jnp.dot(y, z, precision=lax.Precision.HIGHEST),
precision=lax.Precision.HIGHEST)
x = dot2(x, x)
x = sin(x * 1e-3)
x = dot2(x, x)
x = sin(x * 1e-3)
x = dot2(x, x)
x = sin(x * 1e-3)
return x
jtu.check_grads(f, (3.,), order=2, modes=['rev'])
def g(x):
return lax.scan(lambda x, _: (f(x), None), x, None, length=2)[0]
jtu.check_grads(g, (3.,), order=2, modes=['rev'])
def test_remat_dropvar_policy(self):
def f(x):
return x, x
@partial(api.remat, policy=jax.checkpoint_policies.checkpoint_dots)
def g(x):
x = api.grad(lambda x: f(x)[0])(x)
return x
api.grad(g)(3.)
def test_remat_custom_jvp_linear_policy(self):
@api.custom_jvp
def sum(x):
return jnp.sum(x, axis=0)
@sum.defjvp
def sum_jvp(primals, tangents):
(x,), (xdot,) = primals, tangents
return sum(x), sum(xdot)
@partial(api.remat, policy=jax.checkpoint_policies.checkpoint_dots)
def f(x):
return sum(x)
jtu.check_grads(f, (jnp.ones(3),), order=2, modes=['fwd', 'rev'])
def g(x):
return lax.scan(lambda _, x: (None, f(x)), None, x)[1]
jtu.check_grads(g, (jnp.ones((2, 3)),), order=2, modes=['fwd', 'rev'])
def test_constants_not_hoisted(self):
# The old implementation of remat worked by data dependence, and so
# (potentially large) constants would not be rematerialized and could be
# wastefully instantiated. This test checks that the newer remat
# implementation avoids that. See https://github.com/google/jax/pull/8191.
# no residuals from constants created inside jnp.einsum
@partial(new_checkpoint, policy=lambda *_, **__: False)
def f(x):
return jnp.einsum('ii->i', x)
res_avals = saved_residuals(f, jnp.ones((2, 2)))
self.assertLen(res_avals, 0)
# no residuals from jnp.zeros
@partial(new_checkpoint, policy=lambda *_, **__: False)
def f(x):
return jnp.zeros_like(x) * x
res_avals = saved_residuals(f, jnp.ones((2, 2)))
self.assertLen(res_avals, 0)
# no residuals from jnp.zeros, but input must be saved
@partial(new_checkpoint, policy=lambda *_, **__: False)
def f(x):
return jnp.zeros_like(x) * jnp.sin(x)
res_avals = saved_residuals(f, jnp.ones((2, 2)))
self.assertLen(res_avals, 1)
def test_name_denylist(self):
def f(x):
y = checkpoint_name(jnp.multiply(2., 2.), 'y')
z = checkpoint_name(jnp.multiply(2., 2.), 'z')
w = checkpoint_name(jnp.multiply(2., 2.), 'w')
u = jnp.multiply(2., 2.)
return (((x * y) * z) * w) * u
policy = jax.checkpoint_policies.save_any_names_but_these('y', 'z', 'w')
res = saved_residuals(new_checkpoint(f, policy=policy), 1.)
self.assertLen(res, 0) # can't save anything
policy = jax.checkpoint_policies.save_any_names_but_these('z', 'w')
res = saved_residuals(new_checkpoint(f, policy=policy), 1.)
self.assertLen(res, 1) # can save only y
policy = jax.checkpoint_policies.save_any_names_but_these('w')
res = saved_residuals(new_checkpoint(f, policy=policy), 1.)
self.assertLen(res, 2) # can save y and z
policy = jax.checkpoint_policies.save_any_names_but_these()
res = saved_residuals(new_checkpoint(f, policy=policy), 1.)
self.assertLen(res, 3) # can save y, z, and w
def test_name_allowlist(self):
def f(x):
y = checkpoint_name(jnp.multiply(2., 2.), 'y')
z = checkpoint_name(jnp.multiply(2., 2.), 'z')
w = checkpoint_name(jnp.multiply(2., 2.), 'w')
u = jnp.multiply(2., 2.)
return (((x * y) * z) * w) * u
policy = jax.checkpoint_policies.save_only_these_names('y', 'z', 'w')
res = saved_residuals(new_checkpoint(f, policy=policy), 1.)
self.assertLen(res, 3) # can save y, z, and w
policy = jax.checkpoint_policies.save_only_these_names('z', 'w')
res = saved_residuals(new_checkpoint(f, policy=policy), 1.)
self.assertLen(res, 2) # can save z and w
policy = jax.checkpoint_policies.save_only_these_names('w')
res = saved_residuals(new_checkpoint(f, policy=policy), 1.)
self.assertLen(res, 1) # can save w
policy = jax.checkpoint_policies.save_only_these_names()
res = saved_residuals(new_checkpoint(f, policy=policy), 1.)
self.assertLen(res, 0) # can't save anything!
def test_saved_residuals_utility(self):
def f(x, y):
x1, x2 = x
z = checkpoint_name(jnp.sin(3.), 'z')
return z * ((x1 * x2) * y) * np.array([3.])
res = saved_residuals(f, (2., 3.), y=4.)
self.assertLen(res, 6)
self.assertEqual(res[0][0].shape, (1,))
self.assertEqual(res[0][1], "from a constant")
self.assertEqual(res[1][0].shape, ())
self.assertEqual(res[1][1], "from the argument 'x'")
self.assertEqual(res[2][0].shape, ())
self.assertEqual(res[2][1], "from the argument 'x'")
self.assertEqual(res[3][0].shape, ())
self.assertEqual(res[3][1], "from the argument 'y'")
self.assertEqual(res[4][0].shape, ())
self.assertStartsWith(res[4][1], "named 'z'")
self.assertEqual(res[5][0].shape, ())
def test_saved_residuals_utility_literals(self):
res = saved_residuals(lambda x: x * 2., 3.)
self.assertLen(res, 1)
self.assertEqual(res[0][0].shape, ())
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', api.remat),
('_policy', partial(api.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_checkpoint_dropvars(self, remat):
@remat
def f(x):
_, x = api.jit(lambda: (x, x))()
return x
_ = api.grad(f)(3.) # doesn't crash
def test_dce_keeps_eqns_with_used_outputs_but_no_used_inputs(self):
@new_checkpoint
def f(x):
c = jax.jit(lambda: 3.)()
return c * x
_ = jax.grad(f)(3.) # doesn't crash
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', api.remat),
('_policy', partial(api.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_unit_dropvar_consistency_regression(self, remat):
@partial(remat, policy=lambda *_, **__: False)
def f(u, x):
x, _ = jax.jit(lambda x: (x, u))(x)
return x
_ = api.linearize(partial(f, core.unit), 3.)
class JaxprTest(jtu.JaxTestCase):
def test_scalar_literals(self):
jaxpr = api.make_jaxpr(lambda x: x + 2)(42)
self.assertLen(jaxpr.jaxpr.constvars, 0)
def test_abstract_inputs(self):
jaxpr = api.make_jaxpr(lambda x: x + 2.)(
types.SimpleNamespace(shape=(), dtype=np.dtype(np.float32)))
self.assertEqual(jaxpr.in_avals[0].shape, ())
self.assertEqual(jaxpr.in_avals[0].dtype, np.float32)
def test_const(self):
def fun(x):
return (x, 1., np.zeros(1, dtype=jnp.float32))
expected = "{ lambda a:f32[1]; b:f32[]. let in (b, 1.0, a) }"
jaxpr = api.make_jaxpr(fun)(jnp.float32(0.))
self.assertMultiLineStrippedEqual(expected, str(jaxpr))
def test_cond(self):
def f(x):
return lax.cond(x >= 0.,
x + 1.,
lambda xt: xt + x,
x + 2.,
lambda xf: xf - x)
expected = """{ lambda ; a:f32[]. let
b:bool[] = ge a 0.0
c:f32[] = add a 1.0
d:f32[] = add a 2.0
e:i32[] = convert_element_type[new_dtype=int32 weak_type=False] b
f:f32[] = cond[
branches=(
{ lambda ; g_:f32[] h:f32[] i:f32[] j:f32[]. let
k:f32[] = sub j h
in (k,) }
{ lambda ; l:f32[] m_:f32[] n:f32[] o:f32[]. let
p:f32[] = add n l
in (p,) }
)
linear=(False, False, False, False)
] e a a c d
in (f,) }"""
jaxpr = api.make_jaxpr(f)(jnp.float32(3.))
self.assertMultiLineStrippedEqual(expected, str(jaxpr))
def test_make_jaxpr_static_argnums(self):
def f(x, y):
return x + y
jaxpr = api.make_jaxpr(f, static_argnums=(1,))(2, 3)
self.assertIn('3', str(jaxpr))
def test_make_jaxpr_return_shape(self):
_, shape_tree = api.make_jaxpr(lambda x: (x + 1, jnp.zeros(2, jnp.float32)),
return_shape=True)(np.int32(1))
expected = (api.ShapeDtypeStruct(shape=(), dtype=jnp.int32),
api.ShapeDtypeStruct(shape=(2,), dtype=jnp.float32))
self.assertEqual(shape_tree, expected)
def test_make_jaxpr_axis_env(self):
def f(x):
return x - lax.psum(x, 'i')
jaxpr = api.make_jaxpr(f, axis_env=[('i', 4)])(2)
self.assertIn('psum', str(jaxpr))
def test_make_jaxpr_named(self):
def f(x):
return x - lax.psum(x, 'i')
x = api.ShapeDtypeStruct(
shape=(2, 3), dtype=jnp.dtype(jnp.float32), named_shape={'i': 10})
jaxpr = api.make_jaxpr(f, axis_env=[('i', 10)])(x)
named_shapes = [v.aval.named_shape for v in jaxpr.jaxpr.eqns[1].invars]
self.assertEqual(named_shapes, [{'i': 10}, {}])
@parameterized.parameters(True, False)
def test_vjp_reduce_axes_jaxpr(self, gy_batched):
def f(w, x):
return jnp.sin(jnp.dot(x, w))
w = api.ShapeDtypeStruct(
shape=(3, 4), dtype=jnp.float32, named_shape={})
x = api.ShapeDtypeStruct(
shape=(3,), dtype=jnp.float32, named_shape={'batch': 2})
gy = api.ShapeDtypeStruct(
shape=(4,), dtype=jnp.float32,
named_shape={'batch': 2} if gy_batched else {})
# per-example
jaxpr, shapes = api.make_jaxpr(
lambda w, x, gy: api.vjp(f, w, x)[1](gy), axis_env=[('batch', 2)],
return_shape=True)(w, x, gy)
expected = (api.ShapeDtypeStruct(
shape=(3, 4), dtype=jnp.float32, named_shape={'batch': 2}), x)
self.assertEqual(shapes, expected)
self.assertNotIn('psum', str(jaxpr))
# reduced
jaxpr, shapes = api.make_jaxpr(
lambda w, x, gy: api.vjp(f, w, x, reduce_axes=('batch',))[1](gy),
axis_env=[('batch', 2)],
return_shape=True)(w, x, gy)
expected = (w, x)
self.assertEqual(shapes, expected)
self.assertIn('psum', str(jaxpr))
class CustomJVPTest(jtu.JaxTestCase):
def test_basic(self):
@api.custom_jvp
def f(x):
return jnp.sin(x)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * jnp.cos(x) * g
f.defjvp(f_jvp)
x = 3.
self.assertAllClose(f(x), jnp.sin(x))
self.assertAllClose(api.jvp(f, (x,), (1.,)),
(jnp.sin(x), 2 * jnp.cos(x)))
self.assertAllClose(api.grad(f)(x), 2 * jnp.cos(x))
def test_invariance(self):
@api.custom_jvp
def f(x):
return jnp.cos(2 * x) / 2.
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return (f(x), 3 * g)
f.defjvp(f_jvp)
def f2(x):
y, _ = api.jvp(f, (x,), (x,))
return y
def f3(x):
y, _ = api.jvp(f2, (x,), (x,))
return y
x = 1.
self.assertAllClose(api.jvp(f, (x,), (x,)),
api.jvp(f2, (x,), (x,)),
check_dtypes=False)
self.assertAllClose(api.jvp(f, (x,), (x,)),
api.jvp(f3, (x,), (x,)),
check_dtypes=False)
def test_python_control_flow(self):
@api.custom_jvp
def f(x):
if x > 0:
return jnp.sin(x)
else:
return jnp.cos(x)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
if x > 0:
return f(x), 2 * g
else:
return f(x), 3 * g
f.defjvp(f_jvp)
x = 2.
self.assertAllClose(f(x), jnp.sin(x))
self.assertAllClose(f(-x), jnp.cos(-x))
self.assertAllClose(api.jvp(f, (x,), (1.,)),
(jnp.sin(x), 2.),
check_dtypes=False)
self.assertAllClose(api.jvp(f, (-x,), (1.,)),
(jnp.cos(-x), 3.),
check_dtypes=False)
self.assertAllClose(api.grad(f)(x), 2., check_dtypes=False)
self.assertAllClose(api.grad(f)(-x), 3., check_dtypes=False)
def test_vmap(self):
@api.custom_jvp
def f(x):
assert jnp.ndim(x) == 0
return jnp.sin(x)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
assert jnp.ndim(x) == jnp.ndim(g) == 0
return f(x), 2 * jnp.cos(x) * g
f.defjvp(f_jvp)
x = jnp.arange(3.)
xx = jnp.arange(6.).reshape(2, 3)
# vmap of f
self.assertAllClose(api.vmap(f)(x), jnp.sin(x))
self.assertAllClose(api.vmap(api.vmap(f))(xx), jnp.sin(xx))
# vmap of jvp of f
self.assertAllClose(api.vmap(lambda x: api.jvp(f, (x,), (x,)))(x),
(jnp.sin(x), 2 * jnp.cos(x) * x))
self.assertAllClose(api.vmap(api.vmap(lambda x: api.jvp(f, (x,), (x,))))(xx),
(jnp.sin(xx), 2 * jnp.cos(xx) * xx))
# jvp of vmap of f
self.assertAllClose(api.jvp(api.vmap(f), (x,), (x,)),
(jnp.sin(x), 2 * jnp.cos(x) * x))
self.assertAllClose(api.jvp(api.vmap(api.vmap(f)), (xx,), (xx,)),
(jnp.sin(xx), 2 * jnp.cos(xx) * xx))
# vmap of jvp of vmap of f
self.assertAllClose(api.vmap(lambda x: api.jvp(api.vmap(f), (x,), (x,)))(xx),
(jnp.sin(xx), 2 * jnp.cos(xx) * xx))
def test_jit(self):
@api.custom_jvp
def f(x):
return jnp.sin(x)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * jnp.cos(x) * g
f.defjvp(f_jvp)
x = 3.
# jit
self.assertAllClose(api.jit(f)(x), jnp.sin(x))
self.assertAllClose(api.jit(api.jit(f))(x), jnp.sin(x))
# jit of jvp
self.assertAllClose(api.jit(lambda x: api.jvp(f, (x,), (x,)))(x),
(jnp.sin(x), 2 * jnp.cos(x) * x),
check_dtypes=False)
# jvp of jit
self.assertAllClose(api.jvp(api.jit(f), (x,), (x,)),
(jnp.sin(x), 2 * jnp.cos(x) * x),
check_dtypes=False)
def test_pytrees(self):
@api.custom_jvp
def f(x):
return {'b': jnp.sin(x['a'])}
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), {'b': 2 * jnp.cos(x['a']) * g['a']}
f.defjvp(f_jvp)
x = {'a': 3.}
self.assertAllClose(f(x)['b'], jnp.sin(x['a']))
self.assertAllClose(api.jvp(f, (x,), (x,)),
({'b': jnp.sin(x['a'])},
{'b': 2 * jnp.cos(x['a']) * x['a']}),
check_dtypes=False)
def test_kwargs(self):
# from https://github.com/google/jax/issues/1938
@api.custom_jvp
def my_fun(x, y, c=1.):
return c * (x + y)
def my_jvp(primals, tangents):
x, y, c = primals
t_x, t_y, t_c = tangents
return my_fun(x, y, c), t_c
my_fun.defjvp(my_jvp)
f = lambda x, y: jnp.square(my_fun(x, y, c=2.)).sum()
f(10., 5.) # doesn't crash
api.jvp(f, (10., 5.), (1., 1.)) # doesn't crash
def test_initial_style(self):
@api.custom_jvp
def f(x):
return 3 * x
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * g
f.defjvp(f_jvp)
def foo(x):
out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
return out
ans = api.grad(foo)(3.)
expected = 2.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.jit(foo))(3.)
expected = 2.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jit(api.grad(foo))(3.)
expected = 2.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.grad(foo))(3.)
expected = 0.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.grad(api.jit(foo)))(3.)
expected = 0.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.jit(api.grad(foo)))(3.)
expected = 0.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jit(api.grad(api.grad(foo)))(3.)
expected = 0.
self.assertAllClose(ans, expected, check_dtypes=False)
def test_initial_style_vmap(self):
@api.custom_jvp
def f(x):
assert jnp.ndim(x) == 0
return 3 * x
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * g
f.defjvp(f_jvp)
def foo(x):
out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
return out
ans = api.vmap(foo)(jnp.ones(3))
expected = 3. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.vmap(api.jit(foo))(jnp.ones(3))
expected = 3. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jit(api.vmap(foo))(jnp.ones(3))
expected = 3. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(lambda x: api.vmap(foo)(x).sum())(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(lambda x: api.vmap(api.jit(foo))(x).sum())(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(lambda x: api.jit(api.vmap(foo))(x).sum())(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.jit(lambda x: api.vmap(foo)(x).sum()))(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jit(api.grad(lambda x: api.vmap(foo)(x).sum()))(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_initial_style_vmap_with_collective(self):
@api.custom_jvp
def f(x):
return lax.psum(x, 'foo')
@f.defjvp
def f_jvp(xs, ts):
x, = xs
t, = ts
return lax.psum(x, 'foo'), t
def g(x):
jaxpr = api.make_jaxpr(f)(x)
return core.eval_jaxpr(jaxpr.jaxpr, [], x)[0]
v = api.vmap(lambda _, x: g(x), axis_name='foo', in_axes=(0, None),
out_axes=None)(jnp.arange(4.), 2.)
self.assertAllClose(v, 8.)
def test_closed_over_tracers_error_message(self):
def f(x):
@api.custom_jvp
def g(y):
return x + y
def g_jvp(primals, tangents):
return g(x), 2 * primals[0]
g.defjvp(g_jvp)
return g(1.)
self.assertRaises(ad.CustomJVPException, lambda: api.jvp(f, (3.,), (1.,)))
self.assertRaises(ad.CustomJVPException, lambda: api.grad(f)(3.))
def test_nondiff_arg(self):
@partial(api.custom_jvp, nondiff_argnums=(0,))
def app(f, x):
return f(x)
def app_jvp(f, primals, tangents):
(x,), (t,) = primals, tangents
return app(f, x), 3 * t
app.defjvp(app_jvp)
ans = app(lambda x: 2 * x, 1)
expected = 2
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jvp(lambda x: app(lambda y: 2 * y, x), (1.,), (1.,))
expected = (2., 3.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_nondiff_arg_jit_tracer(self):
@partial(api.custom_jvp, nondiff_argnums=(0,))
def f(x, y):
return x * y
def f_jvp(x, primals, tangents):
(y,), (t_y,) = primals, tangents
return f(x, y), 5 * t_y
f.defjvp(f_jvp)
@jit
def g(x, y):
return f(x, y)
ans = api.jvp(lambda y: g(2., y), (3.,), (1.,))
expected = (6., 5.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_nondiff_arg_hiding_jvp_tracer(self):
def f(x):
@partial(api.custom_jvp, nondiff_argnums=(0,))
def g(h, x):
return h(x)
@g.defjvp
def g_jvp(h, primals, tangents):
x, = primals
t, = tangents
return g(h, x), 2. * t
h = lambda y: x + y # capture x
return g(h, x)
with self.assertRaisesRegex(ad.CustomJVPException, "Detected differentiation"):
api.jvp(f, (2.,), (1.,))
def test_vmap_axes(self):
raise unittest.SkipTest("TODO") # TODO(mattjj): write test
def test_pmap(self):
raise unittest.SkipTest("TODO") # TODO(mattjj): write test
def test_missing_jvp_rule_error_message(self):
@api.custom_jvp
def foo(x):
return x ** 2
self.assertRaisesRegex(
AttributeError,
r"No JVP defined for custom_jvp function foo using defjvp.",
lambda: foo(2))
self.assertRaisesRegex(
AttributeError,
r"No JVP defined for custom_jvp function foo using defjvp.",
lambda: api.jvp(foo, (2.,), (1.,)))
self.assertRaisesRegex(
AttributeError,
r"No JVP defined for custom_jvp function foo using defjvp.",
lambda: api.grad(foo)(2.))
def test_jvp_rule_inconsistent_pytree_structures_error_message(self):
@api.custom_jvp
def f(x):
return (x**2,)
@f.defjvp
def foo_jvp(primals, tangents):
x, = primals
t, = tangents
return f(x), [2 * x * t, x]
f(2.) # doesn't crash
self.assertRaisesRegex(
TypeError,
re.escape(
"Custom JVP rule must produce primal and tangent outputs "
"with equal container (pytree) structures, but got "
"{} and {} respectively.".format(
tree_util.tree_structure((1,)),
tree_util.tree_structure([1, 2]))
),
lambda: api.jvp(f, (2.,), (1.,)))
def test_primal_tangent_aval_disagreement_error_message(self):
@api.custom_jvp
def f(x):
return x ** 2
@f.defjvp
def foo_jvp(primals, tangents):
x, = primals
t, = tangents
return f(x), jnp.reshape(t, (1,))
f(2.) # doesn't crash
self.assertRaisesRegex(
TypeError,
re.escape(
"Custom JVP rule must produce primal and tangent outputs "
"with equal shapes and dtypes, but got float32[] and float32[1] "
"respectively."),
lambda: api.jvp(f, (jnp.float32(2.),), (jnp.float32(1.),)))
def test_jvp_rule_doesnt_return_pair_error_message(self):
# https://github.com/google/jax/issues/2516
@api.custom_jvp
def f(x):
return x ** 2
@f.defjvp
def foo_jvp(primals, tangents):
x, = primals
t, = tangents
return t
f(2.) # doesn't crash
self.assertRaisesRegex(
TypeError,
re.escape(
"Custom JVP rule must produce a pair (list or tuple of length two) "
"representing primal and tangent outputs, got 1.0"),
lambda: api.jvp(f, (2.,), (1.,)))
def test_multiple_rule_invocations(self):
@jax.custom_jvp
def expit(x):
return 1 / (1 + lax.exp(-x))
@expit.defjvp
def _expit_jvp(primals, tangents):
(x,), (t,) = primals, tangents
ans = expit(x)
t_out = t * ans * (1 - ans)
return ans, t_out
def scanned_fun(c, _):
return [expit(c[0])] + [c[i-1] + c[i] for i in range(1, len(c))], None
def foo(x):
c, _ = lax.scan(scanned_fun, [x, 0., 0., 0., 0.], None, length=10)
return c[-1]
# just make sure these don't crash
foo(3.)
grad(foo)(3.)
grad(lambda x: jax.vmap(foo)(x).sum())(jnp.arange(3.))
def test_hard_stuff(self):
arr = jnp.ones((5, 2, 2))
api.jit(jax.vmap(jnp.linalg.det))(arr) # doesn't crash
def test_hard_stuff2(self):
@jax.custom_jvp
def f(x):
return lax.tie_in(x, np.zeros(x.shape, x.dtype))
@f.defjvp
def f_jvp(primals, tangents):
x, = primals
t, = tangents
return f(x), t
# don't crash
jax.jit(jax.vmap(f))(jnp.arange(3.))
jax.jit(jax.vmap(jax.grad(f)))(jnp.arange(3.))
jax.jit(jax.grad(lambda x: jax.vmap(f)(x).sum()))(jnp.arange(3.))
jax.grad(lambda x: jax.vmap(f)(x).sum())(jnp.arange(3.))
jax.jvp(jax.vmap(f), (jnp.arange(3.),), (jnp.ones(3),))
def test_hard_stuff3(self):
@jax.custom_jvp
def relu(x):
return jnp.maximum(x, 0)
@relu.defjvp
def _relu_jvp(primals, tangents):
x, = primals
t, = tangents
return relu(x), lax.select(x > 0, t, lax.full_like(t, 0))
def scanned_fun(c, _):
return [relu(c[0])] + [c[i-1] + c[i] for i in range(1, len(c))], None
def f(x):
c, _ = lax.scan(scanned_fun, [x, 0., 0., 0., 0.], None, length=10)
return c[-1]
# don't crash
jax.jit(jax.vmap(f))(jnp.arange(3.))
jax.jit(jax.vmap(jax.grad(f)))(jnp.arange(3.))
jax.jit(jax.grad(lambda x: jax.vmap(f)(x).sum()))(jnp.arange(3.))
jax.grad(lambda x: jax.vmap(f)(x).sum())(jnp.arange(3.))
jax.jvp(jax.jit(jax.vmap(f)), (jnp.arange(3.),), (jnp.ones(3),))
def test_eval_shape(self):
@jax.custom_jvp
def expit(x):
return 1 / (1 + lax.exp(-x))
@expit.defjvp
def _expit_jvp(primals, tangents):
(x,), (t,) = primals, tangents
ans = expit(x)
t_out = t * ans * (1 - ans)
return ans, t_out
# don't crash
api.eval_shape(expit, jnp.ones((2, 3)))
api.eval_shape(api.grad(lambda x: expit(x).sum()), jnp.ones((2, 3)))
def test_jaxpr_zeros(self):
# from https://github.com/google/jax/issues/2657
@api.custom_jvp
def f(A, b):
return A @ b
def f_jvp(primals, tangents):
A, b = primals
dA, db = tangents
z = f(A, b)
dz = A @ db + dA @ b
return z, dz
f.defjvp(f_jvp)
def experiment(theta):
def step(q, _):
z = f(jnp.eye(3), jnp.ones(3) * theta)
q += z[0]
return q, q
q = 0.
q, _ = lax.scan(step, q, None, 4)
return q
grad(experiment)(1.) # doesn't crash
def test_linear_in_scan(self):
@api.custom_jvp
def f(x):
return -x
@f.defjvp
def f_jvp(primals, tangents):
x, = primals
x_dot, = tangents
return f(x), f(x_dot)
def foo(x):
out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
return out
ans = api.grad(foo)(3.)
expected = -1.
self.assertAllClose(ans, expected, check_dtypes=False)
def test_custom_jvps_first_rule_is_none(self):
# https://github.com/google/jax/issues/3389
@api.custom_jvp
def f(x, y):
return x ** 2 * y
f.defjvps(None, lambda x_dot, primal_out, x, y: 2 * x * y * x_dot)
ans = grad(f, 1)(2., 3.) # doesn't crash
expected = 12.
self.assertAllClose(ans, expected, check_dtypes=False)
def test_concurrent_initial_style(self):
# https://github.com/google/jax/issues/3843
def unroll(param, sequence):
def scan_f(prev_state, inputs):
return prev_state, jax.nn.sigmoid(param * inputs)
return jnp.sum(jax.lax.scan(scan_f, None, sequence)[1])
def run():
return jax.grad(unroll)(jnp.array(1.0), jnp.array([1.0]))
expected = run()
# we just don't want this to crash
n_workers = 2
with concurrent.futures.ThreadPoolExecutor(max_workers=n_workers) as e:
futures = []
for _ in range(n_workers):
futures.append(e.submit(run))
results = [f.result() for f in futures]
for ans in results:
self.assertAllClose(ans, expected)
def test_nondiff_argnums_vmap_tracer(self):
# https://github.com/google/jax/issues/3964
@partial(jax.custom_jvp, nondiff_argnums=(0, 2))
def sample(shape, param, seed):
return jax.random.uniform(key=seed, shape=shape, minval=param)
@sample.defjvp
def sample_jvp(shape, seed, primals, tangents):
param, = primals
dparam, = tangents
dparam = jnp.broadcast_to(dparam, shape)
samples = sample(shape, param, seed)
return samples, samples * dparam # dummy jvp for proof of concept
# check these don't crash
jax.vmap(lambda seed: sample((2,3), 1., seed))(
jax.random.split(jax.random.PRNGKey(1), 10))
jax.jvp(lambda x: sample((2, 3), x, jax.random.PRNGKey(1)),
(1.,), (1.,))
def test_fun_with_nested_calls_2(self):
def call(f, *args):
f = api.custom_jvp(f)
f.defjvp(lambda primals, tangents: (f(*primals), sum(tangents)))
return f(*args)
def fun_with_nested_calls_2(x):
def bar(y):
def baz(w):
q = call(lambda x: y, x)
q = q + call(lambda: y)
q = q + call(lambda y: w + y, y)
q = call(lambda w: call(jnp.sin, x) * y, 1.0) + q
return q
return api.jit(baz)(x)
return call(bar, x)
# test these don't crash
self.assertAllClose(api.jit(fun_with_nested_calls_2)(3.),
fun_with_nested_calls_2(3.))
api.vmap(fun_with_nested_calls_2)(jnp.arange(3.))
def test_closure_with_vmap(self):
# https://github.com/google/jax/issues/3822
alpha = np.float32(2.)
def sample(seed):
@api.custom_jvp
def f(alpha):
return jax.random.gamma(seed, alpha, shape=[])
@f.defjvp
def f_jvp(primal, tangent):
alpha = primal
dalpha = tangent
sample = f(alpha)
partial_alpha = lax.random_gamma_grad(alpha, sample)
return sample, partial_alpha * dalpha
return f(alpha)
api.vmap(sample)(jax.random.split(jax.random.PRNGKey(1), 3)) # don't crash
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_float0(self):
@api.custom_jvp
def f(x, y):
return x, y
def f_jvp(primals, _):
# we need a defined (non-float0) tangent to trigger the rule
return primals, (2., 1)
f.defjvp(f_jvp)
primals = (2., 3)
tangents = (np.ones(()), np.zeros((), float0),)
expected_tangents = (2., np.zeros((), float0))
self.assertArraysEqual(api.jvp(f, primals, tangents),
(primals, expected_tangents))
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_float0_initial_style(self):
@api.custom_jvp
def f(x, y):
return x, y
def f_jvp(primals, _):
x, y = primals
return (x, y), (2., 1)
f.defjvp(f_jvp)
def foo(x, y):
out, _ = lax.scan(lambda c, _: (f(*c), None), (x, y), None, length=1)
return out
primals = (2., 3)
tangents = (np.ones(()), np.zeros((), float0),)
expected_tangents = (2., np.zeros((), float0))
self.assertArraysEqual(api.jvp(foo, primals, tangents),
(primals, expected_tangents))
def test_remat(self):
@api.custom_jvp
def f(x):
return jnp.sin(x)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * jnp.cos(x) * g
f.defjvp(f_jvp)
@api.remat
def g(x):
return f(f(x))
ans = g(2.)
expected = np.sin(np.sin(2.))
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(g)(2.)
expected = 4. * api.grad(lambda x: jnp.sin(jnp.sin(x)))(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_remat_higher_order(self):
@api.custom_jvp
def f(x):
return jnp.sin(x)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * jnp.cos(x) * g
f.defjvp(f_jvp)
def g(x):
return f(f(x))
ans = api.grad(api.grad(api.remat(g)))(2.)
expected = api.grad(api.grad(g))(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.remat(api.grad(g)))(2.)
expected = api.grad(api.grad(g))(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.grad(api.grad(api.remat(g))))(2.)
expected = api.grad(api.grad(api.grad(g)))(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_initial_style_vmap_2(self):
# This is like test_initial_style_vmap except the primal function closes
# over an array constant.
y = jnp.array([1., 2., 3.])
@api.custom_jvp
def f(x):
assert jnp.ndim(x) == 0
return 3 * x * jnp.sum(y)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * g
f.defjvp(f_jvp)
def foo(x):
out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
return out
ans = api.grad(lambda x: api.vmap(foo)(x).sum())(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(lambda x: api.vmap(api.jit(foo))(x).sum())(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(lambda x: api.jit(api.vmap(foo))(x).sum())(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.jit(lambda x: api.vmap(foo)(x).sum()))(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jit(api.grad(lambda x: api.vmap(foo)(x).sum()))(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_custom_jvp_vmap_broadcasting_interaction(self):
# https://github.com/google/jax/issues/6452
def f2(y, z):
v1 = z
v2 = jnp.sum(y) + z
return jnp.logaddexp(v1, v2)
def f1(y, z):
v = api.vmap(lambda _y: f2(_y, z))(y)
return jnp.sum(v)
y = jnp.ones((3, 2))
f = lambda z: f1(y, z)
z = 0.1
val, g = api.value_and_grad(f)(z)
self.assertEqual(val.shape, ())
self.assertEqual(g.shape, ())
def test_custom_jvp_vmap_broadcasting_interaction_2(self):
# https://github.com/google/jax/issues/5849
@api.custom_jvp
def transform(box, R):
if jnp.isscalar(box) or box.size == 1:
return R * box
elif box.ndim == 2:
return jnp.einsum('ij,j->i', box, R)
raise ValueError()
@transform.defjvp
def transform_jvp(primals, tangents):
box, R = primals
dbox, dR = tangents
return (transform(box, R), dR + transform(dbox, R))
def periodic_general(box):
def displacement_fn(Ra, Rb, **kwargs):
_box = kwargs.get('box', box)
return transform(_box, Ra - Rb)
return displacement_fn
N = 250
scalar_box = 1.0
displacement = periodic_general(scalar_box)
key = jax.random.PRNGKey(0)
R = jax.random.uniform(key, (N, 2))
def energy_fn(box):
d = partial(displacement, box=box)
d = api.vmap(api.vmap(d, (None, 0)), (0, None))
return jnp.sum(d(R, R) ** 2)
self.assertEqual(grad(energy_fn)(scalar_box).shape, ())
def test_custom_jvp_implicit_broadcasting(self):
# https://github.com/google/jax/issues/6357
if config.x64_enabled:
raise unittest.SkipTest("test only applies when x64 is disabled")
@jax.custom_jvp
def projection_unit_simplex(x: jnp.ndarray) -> jnp.ndarray:
"""Projection onto the unit simplex."""
s = 1.0
n_features = x.shape[0]
u = jnp.sort(x)[::-1]
cssv = jnp.cumsum(u) - s
ind = jnp.arange(n_features) + 1
cond = u - cssv / ind > 0
idx = jnp.count_nonzero(cond)
threshold = cssv[idx - 1] / idx.astype(x.dtype)
return jax.nn.relu(x - threshold)
@projection_unit_simplex.defjvp
def projection_unit_simplex_jvp(primals, tangents):
x, = primals
x_dot, = tangents
primal_out = projection_unit_simplex(x)
supp = primal_out > 0
card = jnp.count_nonzero(supp)
tangent_out = supp * x_dot - (jnp.dot(supp, x_dot) / card) * supp
return primal_out, tangent_out
rng = np.random.RandomState(0)
x = rng.rand(5).astype(np.float32)
J_rev = jax.jacrev(projection_unit_simplex)(x)
J_fwd = jax.jacfwd(projection_unit_simplex)(x)
p = projection_unit_simplex(x)
support = (p > 0).astype(jnp.int32)
cardinality = jnp.count_nonzero(support)
J_true = jnp.diag(support) - jnp.outer(support, support) / cardinality
self.assertAllClose(J_true, J_fwd)
self.assertAllClose(J_true, J_rev)
proj = jax.vmap(projection_unit_simplex)
def fun(X):
return jnp.sum(proj(X) ** 2)
rng = np.random.RandomState(0)
X = rng.rand(4, 5).astype(np.float32)
U = rng.rand(4, 5)
U /= np.sqrt(np.sum(U ** 2))
U = U.astype(np.float32)
eps = 1e-3
dir_deriv_num = (fun(X + eps * U) - fun(X - eps * U)) / (2 * eps)
dir_deriv = jnp.vdot(jax.grad(fun)(X), U)
self.assertAllClose(dir_deriv, dir_deriv_num, atol=1e-3)
def test_vmap_inside_defjvp(self):
# https://github.com/google/jax/issues/3201
seed = 47
key = jax.random.PRNGKey(seed)
mat = jax.random.normal(key, (2, 3))
@jax.custom_jvp
def f(mat, aux):
num_rows, num_cols = mat.shape
return jnp.ones((num_rows, 1)) / num_cols
@f.defjvp
def f_jvp(primals, tangents):
mat, aux = primals
vec, _ = tangents
output = f(*primals)
num_rows, num_cols = mat.shape
size = num_rows * num_cols
# -----
bd_mat = mat.reshape(1, 1, num_rows, num_cols)
bd_mat = jnp.tile(bd_mat, reps=(num_rows, num_cols))
bd_mat = bd_mat.reshape(size, num_rows, num_cols)
# -----
rowsum = jnp.sum(mat, axis=1, keepdims=True)
colsum = jnp.sum(mat, axis=0, keepdims=True)
bd_rowsum = jnp.tile(rowsum, reps=(1, num_rows))
bd_colsum = jnp.tile(colsum, reps=(num_cols, 1))
# -----
bd_vec = vec.reshape(size, 1)
# -----
def operate(mx, val):
buf = 0
for i in range(2):
buf = buf + jnp.matmul(mx, bd_colsum) / jnp.power(aux, i)
buf = jnp.matmul(bd_rowsum, buf)
return buf * val
# -----
# Vertorizing will raise shape error
bd_buf = jax.vmap(operate, in_axes=(0, 0), out_axes=0)(bd_mat, bd_vec)
# -----
bd_buf = bd_buf / aux
jvp = jnp.sum(bd_buf, axis=0)
jvp = jnp.mean(jvp, axis=1, keepdims=True)
# -----
# JVP ends successfully, but still raise an error
return (output, jvp)
jax.grad(lambda mat, aux: jnp.sum(f(mat, aux)))(mat, 0.5) # doesn't crash
def test_custom_jvp_unbroadcasting(self):
# https://github.com/google/jax/issues/3056
a = jnp.array([1., 1.])
@jax.custom_jvp
def f(x):
return a * x
@f.defjvp
def f_jvp(primals, tangents):
x, = primals
dx, = tangents
return a * x, a * dx
shape = grad(lambda x: jnp.sum(f(x)))(jnp.array(1.)).shape
self.assertEqual(shape, ())
class CustomVJPTest(jtu.JaxTestCase):
def test_basic(self):
@api.custom_vjp
def f(x):
return jnp.sin(x)
def f_fwd(x):
return f(x), jnp.cos(x)
def f_rev(cos_x, g):
return (2 * cos_x * g,)
f.defvjp(f_fwd, f_rev)
x = 3.
self.assertAllClose(f(x), jnp.sin(x))
self.assertAllClose(api.grad(f)(x), 2 * jnp.cos(x))
self.assertAllClose(api.value_and_grad(f)(x),
(jnp.sin(x), 2 * jnp.cos(x)))
def test_invariance(self):
@api.custom_vjp
def f(x):
return jnp.cos(2 * x) / 2.
def f_fwd(x):
return (f(x), x)
def f_rev(x, g):
return (g * 3,)
f.defvjp(f_fwd, f_rev)
def f2(x):
y, _ = api.value_and_grad(f)(x)
return y
def f3(x):
y, _ = api.value_and_grad(f2)(x)
return y
x = 1.
self.assertAllClose(f(x), f2(x), check_dtypes=False)
self.assertAllClose(f(x), f3(x), check_dtypes=False)
self.assertAllClose(api.grad(f)(x), api.grad(f2)(x),
check_dtypes=False)
self.assertAllClose(api.grad(f)(x), api.grad(f3)(x),
check_dtypes=False)
def test_python_control_flow(self):
@api.custom_vjp
def f(x):
if x > 0:
return jnp.sin(x)
else:
return jnp.cos(x)
def f_fwd(x):
if x > 0:
return f(x), x
else:
return f(x), x
def f_rev(x, g):
if x > 0:
return (2 * g,)
else:
return (3 * g,)
f.defvjp(f_fwd, f_rev)
x = 2.
self.assertAllClose(f(x), jnp.sin(x))
self.assertAllClose(f(-x), jnp.cos(-x))
self.assertAllClose(api.value_and_grad(f)(x), (jnp.sin(x), 2.),
check_dtypes=False)
self.assertAllClose(api.value_and_grad(f)(-x), (jnp.cos(-x), 3.),
check_dtypes=False)
def test_vmap(self):
@api.custom_vjp
def f(x):
assert jnp.ndim(x) == 0
return jnp.sin(x)
def f_fwd(x):
assert jnp.ndim(x) == 0
return f(x), jnp.cos(x)
def f_rev(cos_x, g):
return (2 * cos_x * g,)
f.defvjp(f_fwd, f_rev)
x = jnp.arange(3.)
xx = jnp.arange(6.).reshape(2, 3)
# vmap of f
self.assertAllClose(api.vmap(f)(x), jnp.sin(x))
self.assertAllClose(api.vmap(api.vmap(f))(xx), jnp.sin(xx))
# vmap of grad of f
self.assertAllClose(api.vmap(api.grad(f))(x), 2 * jnp.cos(x))
self.assertAllClose(api.vmap(api.value_and_grad(f))(x),
(jnp.sin(x), 2 * jnp.cos(x)))
self.assertAllClose(api.vmap(api.vmap(api.grad(f)))(xx), 2 * jnp.cos(xx))
self.assertAllClose(api.vmap(api.vmap(api.value_and_grad(f)))(xx),
(jnp.sin(xx), 2 * jnp.cos(xx)))
# grad of vmap of f
self.assertAllClose(api.grad(lambda x: api.vmap(f)(x).sum())(x),
2 * jnp.cos(x))
self.assertAllClose(api.grad(lambda x: api.vmap(api.vmap(f))(x).sum())(xx),
2 * jnp.cos(xx))
# vmap of grad of vmap of f
self.assertAllClose(api.vmap(api.grad(lambda x: api.vmap(f)(x).sum()))(xx),
2 * jnp.cos(xx))
def test_jit(self):
@api.custom_vjp
def f(x):
return jnp.sin(x)
def f_fwd(x):
return f(x), jnp.cos(x)
def f_rev(cos_x, g):
return (2 * cos_x * g,)
f.defvjp(f_fwd, f_rev)
x = 3.
# jit
self.assertAllClose(api.jit(f)(x), jnp.sin(x))
self.assertAllClose(api.jit(api.jit(f))(x), jnp.sin(x))
# jit of grad
self.assertAllClose(api.jit(api.grad(f))(x), 2 * jnp.cos(x),
check_dtypes=False)
# grad of jit
self.assertAllClose(api.grad(api.jit(f))(x), 2 * jnp.cos(x),
check_dtypes=False)
def test_pytrees(self):
@api.custom_vjp
def f(x):
return {'b': jnp.sin(x['a'])}
def f_fwd(x):
return f(x), {'r': jnp.cos(x['a'])}
def f_bwd(res, g):
cos_x = res['r']
return ({'a': 2 * cos_x * g['b']},)
f.defvjp(f_fwd, f_bwd)
x = {'a': 3.}
self.assertAllClose(f(x)['b'], jnp.sin(x['a']))
self.assertAllClose(api.grad(lambda x: f(x)['b'])(x),
{'a': 2 * jnp.cos(x['a'])})
def test_jvp_error(self):
@api.custom_vjp
def f(x):
return jnp.sin(x)
def f_fwd(x):
return f(x), jnp.cos(x)
def f_rev(cos_x, g):
return (2 * cos_x * g,)
f.defvjp(f_fwd, f_rev)
self.assertRaisesRegex(
TypeError,
r"can't apply forward-mode autodiff \(jvp\) to a custom_vjp function.",
lambda: api.jvp(f, (3.,), (1.,)))
self.assertRaisesRegex(
TypeError,
r"can't apply forward-mode autodiff \(jvp\) to a custom_vjp function.",
lambda: api.jvp(api.vmap(f), (jnp.arange(3.),), (jnp.ones(3),)))
self.assertRaisesRegex(
TypeError,
r"can't apply forward-mode autodiff \(jvp\) to a custom_vjp function.",
lambda: api.jvp(jit(f), (3.,), (1.,)))
def test_kwargs(self):
# from https://github.com/google/jax/issues/1938
@api.custom_vjp
def my_fun(x, y, c=1.):
return c * (x + y)
my_fun.defvjp(lambda x, y, c=1.: (my_fun(c, y, c), None),
lambda _, g: (g, g, g))
f = lambda x, y: jnp.square(my_fun(x, y, c=2.)).sum()
f(10., 5.) # doesn't crash
api.grad(f)(10., 5.) # doesn't crash
def test_initial_style(self):
@api.custom_vjp
def f(x):
return jnp.sin(x)
def f_fwd(x):
return f(x), jnp.cos(x)
def f_rev(cos_x, g):
return (2 * cos_x * g,)
f.defvjp(f_fwd, f_rev)
def foo(x):
out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
return out
ans = api.grad(foo)(3.)
expected = 2. * jnp.cos(3.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.grad(foo))(3.)
expected = -2. * jnp.sin(3.)
self.assertAllClose(ans, expected)
def test_initial_style_vmap(self):
@api.custom_vjp
def f(x):
assert jnp.ndim(x) == 0
return 3 * x
def f_fwd(x):
return f(x), jnp.cos(x)
def f_rev(cos_x, g):
return (2 * cos_x * g,)
f.defvjp(f_fwd, f_rev)
def foo(x):
out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
return out
ans = api.vmap(foo)(jnp.arange(3.))
expected = 3. * jnp.arange(3.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(lambda x: api.vmap(foo)(x).sum())(jnp.arange(3.))
expected = 2. * jnp.cos(jnp.arange(3.))
self.assertAllClose(ans, expected, check_dtypes=False)
def test_nondiff_arg(self):
@partial(api.custom_vjp, nondiff_argnums=(0,))
def app(f, x):
return f(x)
def app_fwd(f, x):
return app(f, x), jnp.cos(x)
def app_rev(f, cos_x, g):
return (cos_x * g,)
app.defvjp(app_fwd, app_rev)
ans = app(lambda x: 2 * x, 1)
expected = 2
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.value_and_grad(lambda x: app(lambda y: 2 * y, x))(1.)
expected = (2., jnp.cos(1.))
self.assertAllClose(ans, expected, check_dtypes=False)
def test_closed_over_tracer(self):
# This test is similar to test_nondiff_arg_tracer except it uses lexical
# closure rather than the nondiff_argnums mechanism. We decided to disallow
# tracers in nondiff_argnums to greatly simplify bookkeeping while still
# supporting the cases for which it is necessary.
def outer(x):
@api.custom_vjp
def f(y):
return x * y
def f_fwd(y):
return f(y), jnp.cos(y)
def f_rev(cos_y, g):
return (cos_y * g,)
f.defvjp(f_fwd, f_rev)
return f
@jit
def g(x, y):
return outer(x)(y)
ans = g(2, 3.)
expected = 6.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(g, 1)(2., 3.)
expected = jnp.cos(3.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_closed_over_tracer2(self):
def outer(x):
@api.custom_vjp
def f(y):
return x * y
def f_fwd(y):
return f(y), jnp.cos(y)
def f_rev(cos_y, g):
return (cos_y * g,)
f.defvjp(f_fwd, f_rev)
return f
@api.vmap
def g(x):
return outer(x)(3.)
ans = g(np.arange(3.))
expected = np.arange(3.) * 3
self.assertAllClose(ans, expected, check_dtypes=False)
def test_closed_over_tracer3(self):
def outer(x):
@api.custom_vjp
def f(y):
return x * y
def f_fwd(y):
return f(y), (x, jnp.cos(y))
def f_rev(res, g):
x, cos_y = res
return (cos_y * g * x,)
f.defvjp(f_fwd, f_rev)
return api.grad(f)
@api.vmap
def g(x):
return outer(x)(3.)
ans = g(np.arange(3.))
expected = np.cos(3.) * np.arange(3.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_nondiff_arg_tracer_error(self):
# This is similar to the old (now skipped) test_nondiff_arg_tracer, except
# we're testing for the error message that that usage pattern now raises.
@partial(api.custom_vjp, nondiff_argnums=(0,))
def f(x, y):
return x * y
def f_fwd(x, y):
return f(x, y), jnp.cos(y)
def f_rev(x, cos_y, g):
return (cos_y * g,)
f.defvjp(f_fwd, f_rev)
@jit
def g(x, y):
return f(x, y)
with self.assertRaisesRegex(UnexpectedTracerError, "custom_vjp"):
_ = g(2, 3.)
with self.assertRaisesRegex(UnexpectedTracerError, "custom_vjp"):
_ = api.grad(g, 1)(2., 3.)
def test_vmap_axes(self):
raise unittest.SkipTest("TODO") # TODO(mattjj): write test
def test_pmap(self):
raise unittest.SkipTest("TODO") # TODO(mattjj): write test
def test_missing_vjp_rule_error(self):
@api.custom_vjp
def foo(x):
return x ** 2
self.assertRaisesRegex(
AttributeError,
r"No VJP defined for custom_vjp function foo using defvjp.",
lambda: foo(2))
self.assertRaisesRegex(
AttributeError,
r"No VJP defined for custom_vjp function foo using defvjp.",
lambda: api.grad(foo)(2.))
def test_vjp_rule_inconsistent_pytree_structures_error(self):
@api.custom_vjp
def f(x):
return x
def foo_fwd(x):
return x, None
def foo_bwd(_, g):
return (g, g)
f.defvjp(foo_fwd, foo_bwd)
f(2) # doesn't crash
self.assertRaisesRegex(
TypeError,
re.escape(
"Custom VJP rule must produce an output with the same container "
"(pytree) structure as the args tuple of the primal function, "
"and in particular must produce a tuple of length equal to the "
"number of arguments to the primal function, but got VJP output "
"structure {} for primal input structure {}.".format(
tree_util.tree_structure((1, 1)),
tree_util.tree_structure((1,)))
),
lambda: api.grad(f)(2.))
def test_vjp_bwd_returns_non_tuple_error(self):
@api.custom_vjp
def f(x):
return x
def foo_fwd(x):
return x, None
def foo_bwd(_, g):
return 2. * g # Should be a tuple
f.defvjp(foo_fwd, foo_bwd)
with self.assertRaisesRegex(TypeError, "Custom VJP rule .* must produce a tuple"):
api.grad(f)(3.)
def test_issue2511(self):
arr = jnp.ones((5, 2, 2))
foo = lambda x: api.vmap(jnp.linalg.det, (0,))(x)
api.jit(foo)(arr) # doesn't crash
def test_lowering_out_of_traces(self):
# https://github.com/google/jax/issues/2578
class F(collections.namedtuple("F", ["a"])):
def __call__(self, x):
return jax.nn.relu(self.a) * x
@jax.jit
def g(f, x):
return f(x)
jax.grad(g, argnums=(1,))(F(2.0), 0.) # doesn't crash
def test_clip_gradient(self):
# https://github.com/google/jax/issues/2784
@api.custom_vjp
def _clip_gradient(lo, hi, x):
return x # identity function when not differentiating
def clip_gradient_fwd(lo, hi, x):
return x, (lo, hi,)
def clip_gradient_bwd(res, g):
lo, hi = res
return (None, None, jnp.clip(g, lo, hi),)
_clip_gradient.defvjp(clip_gradient_fwd, clip_gradient_bwd)
def clip_gradient(x):
lo = -0.1
hi = x + 0.1
return _clip_gradient(lo, hi, x)
g = jax.grad(clip_gradient)(0.1) # doesn't crash
self.assertAllClose(g, jnp.array(0.2))
def test_nestable_vjp(self):
# Verify that https://github.com/google/jax/issues/3667 is resolved.
def f(x):
return x ** 2
@api.custom_vjp
def g(x):
return f(x)
def g_fwd(x):
y, f_vjp = api.vjp(f, x)
return y, f_vjp
def g_bwd(f_vjp, y_bar):
return f_vjp(y_bar)
g.defvjp(g_fwd, g_bwd)
# Check that VJP can be nested in simple situations. For this to pass,
# vjp has to return a PyTree.
_, g_vjp = api.vjp(g, 1.0)
y, = g_vjp(1.0)
self.assertAllClose(y, jnp.array(2.0))
# Check that VJP can be nested in complex situations. For this to pass,
# vjp can't treat the closed-over tracer x as a static argument.
@jit
def z(x):
_, g_vjp = api.vjp(g, x)
return g_vjp
y, = z(1.0)(3.0)
self.assertAllClose(y, jnp.array(6.0))
def test_initial_style_vmap_2(self):
# https://github.com/google/jax/issues/4173
x = jnp.ones((10, 3))
# Create the custom function
@api.custom_vjp
def custom_fun(x):
return x.sum()
def forward(x):
return x.sum(), (jnp.ones_like(x),)
def backward(res, g):
return g * res[0],
custom_fun.defvjp(forward, backward)
def train_fun(x):
def summed_fun(x):
return api.vmap(custom_fun)(x).sum()
return api.grad(summed_fun)(x)
def scan_body(carry, inputs):
x = carry
return carry, train_fun(x)
scan_range = jnp.arange(4)
lax.scan(scan_body, x, scan_range) # don't crash
def test_initial_style_vmap_3(self):
# This is like test_initial_style_vmap except the primal function closes
# over an array constant.
y = jnp.array([1., 2., 3.])
@api.custom_vjp
def f(x):
assert jnp.ndim(x) == 0
return 3 * x * jnp.sum(y)
def f_fwd(x):
return f(x), jnp.cos(x)
def f_rev(cos_x, g):
return (2 * cos_x * g,)
f.defvjp(f_fwd, f_rev)
def foo(x):
out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
return out
ans = api.vmap(foo)(jnp.arange(3.))
expected = 3. * jnp.arange(3.) * 6
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(lambda x: api.vmap(foo)(x).sum())(jnp.arange(3.))
expected = 2. * jnp.cos(jnp.arange(3.))
self.assertAllClose(ans, expected, check_dtypes=False)
def test_initial_style_vmap_with_collective(self):
@api.custom_vjp
def f(x):
return lax.psum(x, 'foo')
def f_fwd(x):
return lax.psum(x, 'foo'), None
def f_bwd(res, dx):
return dx
f.defvjp(f_fwd, f_bwd)
def g(x):
jaxpr = api.make_jaxpr(f)(x)
return core.eval_jaxpr(jaxpr.jaxpr, [], x)[0]
out = api.vmap(lambda _, x: g(x), axis_name='foo', in_axes=(0, None),
out_axes=None)(jnp.arange(4.), 2.)
self.assertAllClose(out, 8.)
def test_bwd_closes_over_tracer(self):
def f(y):
@jax.custom_vjp
def f(x):
return 2. * jnp.sin(x)
def fwd(x):
return f(x), ()
def bwd(_, g):
return (2. * jnp.cos(y) * g,) # capture!
f.defvjp(fwd, bwd)
return jax.grad(f)(1.)
ans = jax.jit(f)(2.)
self.assertAllClose(ans, 2. * jnp.cos(2.))
ans = jax.vmap(f)(jnp.arange(3.))
self.assertAllClose(ans, 2. * jnp.cos(jnp.arange(3.)))
ans = jax.jit(jax.vmap(f))(jnp.arange(3.))
self.assertAllClose(ans, 2. * jnp.cos(jnp.arange(3.)))
ans = jax.vmap(jax.jit(f))(jnp.arange(3.))
self.assertAllClose(ans, 2. * jnp.cos(jnp.arange(3.)))
ans = jax.grad(f)(4.)
self.assertAllClose(ans, -2. * jnp.sin(4.))
def test_fwd_closes_over_tracer(self):
def f(y):
@jax.custom_vjp
def f(x):
return 2. * jnp.sin(x)
def fwd(x):
return f(x), y
def bwd(y, g):
return (2. * jnp.cos(y) * g,) # capture!
f.defvjp(fwd, bwd)
return jax.grad(f)(1.)
ans = jax.jit(f)(2.)
self.assertAllClose(ans, 2. * jnp.cos(2.))
ans = jax.vmap(f)(jnp.arange(3.))
self.assertAllClose(ans, 2. * jnp.cos(jnp.arange(3.)))
ans = jax.jit(jax.vmap(f))(jnp.arange(3.))
self.assertAllClose(ans, 2. * jnp.cos(jnp.arange(3.)))
ans = jax.vmap(jax.jit(f))(jnp.arange(3.))
self.assertAllClose(ans, 2. * jnp.cos(jnp.arange(3.)))
ans = jax.grad(f)(4.)
self.assertAllClose(ans, -2. * jnp.sin(4.))
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_float0(self):
@api.custom_vjp
def f(x, _):
return x
def f_fwd(x, _):
# we need a defined (non-float0) tangent to trigger the rule
return x, (2., 1)
def f_rev(*_):
return (2., 1)
f.defvjp(f_fwd, f_rev)
x = 2.
y = 3
self.assertEqual(api.grad(f, allow_int=True, argnums=(0, 1))(x, y),
(2., np.zeros(shape=(), dtype=float0)))
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_float0_initial_style(self):
@api.custom_vjp
def f(x):
return x
def f_fwd(x):
return x, (2., x)
def f_rev(*_):
return ((2., 1),)
f.defvjp(f_fwd, f_rev)
def foo(x, y):
out, _ = lax.scan(lambda c, _: (f(c), None), (x, y), None, length=1)
return out[0]
x = 2.
y = 3
self.assertEqual(api.grad(foo, allow_int=True, argnums=(0, 1))(x, y),
(2., np.zeros(shape=(), dtype=float0)))
def test_remat(self):
@api.custom_vjp
def f(x):
return jnp.sin(x)
def f_fwd(x):
return f(x), jnp.cos(x)
def f_rev(cos_x, g):
return (2 * cos_x * g,)
f.defvjp(f_fwd, f_rev)
@api.remat
def g(x):
return f(f(x))
ans = g(2.)
expected = np.sin(np.sin(2.))
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(g)(2.)
expected = 4. * api.grad(lambda x: jnp.sin(jnp.sin(x)))(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_remat_higher_order(self):
@api.custom_vjp
def f(x):
return jnp.sin(x)
def f_fwd(x):
return f(x), jnp.cos(x)
def f_rev(cos_x, g):
return (2 * cos_x * g,)
f.defvjp(f_fwd, f_rev)
def g(x):
return f(f(x))
ans = api.grad(api.grad(api.remat(g)))(2.)
expected = api.grad(api.grad(g))(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.remat(api.grad(g)))(2.)
expected = api.grad(api.grad(g))(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.grad(api.grad(api.remat(g))))(2.)
expected = api.grad(api.grad(api.grad(g)))(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_bwd_nones(self):
@api.custom_vjp
def f(x, y):
return x * jnp.sin(y)
def f_fwd(x, y):
return f(x, y), jnp.cos(y)
def f_rev(cos, g):
return (None, 2 * cos * g)
f.defvjp(f_fwd, f_rev)
ans = api.grad(lambda x: f(x, x))(3.)
expected = 2 * jnp.cos(3.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_bwd_nones_vmap(self):
@api.custom_vjp
def f(x, y):
return x * jnp.sin(y)
def f_fwd(x, y):
return f(x, y), jnp.cos(y)
def f_rev(cos, g):
return (None, 2 * cos * g)
f.defvjp(f_fwd, f_rev)
ans = api.grad(lambda x: api.vmap(f)(x, x).sum())(jnp.arange(3.))
expected = 2 * jnp.cos(jnp.arange(3.))
self.assertAllClose(ans, expected, check_dtypes=False)
def test_bwd_nones_pytree(self):
@api.custom_vjp
def f(xs, y):
x1, x2 = xs
return x1 * x2 * jnp.sin(y)
def f_fwd(xs, y):
return f(xs, y), jnp.cos(y)
def f_rev(cos, g):
return (None, 2 * cos * g)
f.defvjp(f_fwd, f_rev)
ans = api.grad(lambda x: f((x, x), x))(3.)
expected = 2 * jnp.cos(3.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_custom_vjp_closure_4521(self):
# https://github.com/google/jax/issues/4521
@api.custom_vjp
def g(x, y):
return None
def g_fwd(x, y):
return None, y
def g_bwd(residuals, z_bar):
assert False
g.defvjp(g_fwd, g_bwd)
def f(xs, y):
v_g = api.vmap(g, in_axes=(0, None), out_axes=None)
v_g(xs, y)
def scan_body(xs, _):
y = jnp.zeros(1)
_, vjp_f = api.vjp(f, xs, y)
vjp_f(None)
return xs, None
lax.scan(scan_body, jnp.ones(5), None, 100) # doesn't crash
def test_float0_bwd_none(self):
@api.custom_vjp
def f(i, x):
return jnp.sin(x)
def f_fwd(i, x):
return f(i, x), jnp.cos(x)
def f_rev(cos_x, g):
return (None, 2 * cos_x * g)
f.defvjp(f_fwd, f_rev)
ans = api.grad(f, 1)(jnp.array([1, 2]), 3.) # doesn't crash
expected = 2 * jnp.cos(3.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_custom_gradient(self):
@api.custom_gradient
def f(x):
return x ** 2, lambda g: (g * x,)
self.assertAllClose(f(3.), 9., check_dtypes=False)
self.assertAllClose(api.grad(f)(3.), 3., check_dtypes=False)
self.assertAllClose(api.grad(api.grad(f))(3.), 1., check_dtypes=False)
def test_custom_gradient_2(self):
@api.custom_gradient
def f(x, y):
return x * y, lambda g: (y, x)
self.assertAllClose(f(3., 4.), 12., check_dtypes=False)
self.assertAllClose(api.grad(f, argnums=(0, 1))(3., 4.), (4., 3.),
check_dtypes=False)
def test_custom_gradient_3(self):
@api.custom_gradient
def f(x):
vjp = lambda g: (jnp.cos(x) * jnp.array([3., 4., 5.]),)
return jnp.sum(jnp.sin(x)), vjp
self.assertAllClose(f(jnp.arange(3)), jnp.sum(jnp.sin(jnp.arange(3.))),
check_dtypes=False)
self.assertAllClose(
api.grad(f)(jnp.arange(3.)),
api.grad(lambda x: jnp.sum(jnp.sin(x)))(jnp.arange(3.)) * jnp.array([3., 4., 5.]),
check_dtypes=False)
def test_custom_gradient_can_return_singleton_value_in_vjp(self):
@api.custom_gradient
def f(x):
return x ** 2, lambda g: g * x
self.assertAllClose(f(3.), 9., check_dtypes=False)
self.assertAllClose(api.grad(f)(3.), 3., check_dtypes=False)
self.assertAllClose(api.grad(api.grad(f))(3.), 1., check_dtypes=False)
def test_closure_convert(self):
def cos_after(fn, x):
converted_fn, aux_args = api.closure_convert(fn, x)
self.assertLessEqual(len(aux_args), 1)
return _cos_after(converted_fn, x, *aux_args)
@partial(api.custom_vjp, nondiff_argnums=(0,))
def _cos_after(fn, x, *args):
return jnp.cos(fn(x, *args))
def fwd(fn, x, *args):
y = _cos_after(fn, x, *args)
return y, (x, args)
def rev(fn, res, g):
x, args = res
x_bar = 17. * x
args_bars = [42. * a for a in args]
return (x_bar, *args_bars)
_cos_after.defvjp(fwd, rev)
def dist(c, x):
return jnp.sum((x - c) ** 2.)
def solve(c, x):
def closure(x):
return dist(c, x)
return cos_after(closure, x)
c, x = 2. * jnp.ones(2), jnp.ones(2)
expected = jnp.cos(dist(c, x))
self.assertAllClose(solve(c, x), expected, check_dtypes=False)
g_c, g_x = api.grad(solve, argnums=(0, 1))(c, x)
self.assertAllClose(g_c, 42. * c, check_dtypes=False)
self.assertAllClose(g_x, 17. * x, check_dtypes=False)
def test_closure_convert_mixed_consts(self):
# Like test_closure_convert, but close over values that
# participate in AD as well as values that do not.
# See https://github.com/google/jax/issues/6415
def cos_after(fn, x):
converted_fn, aux_args = api.closure_convert(fn, x)
self.assertLessEqual(len(aux_args), 1)
return _cos_after(converted_fn, x, *aux_args)
@partial(api.custom_vjp, nondiff_argnums=(0,))
def _cos_after(fn, x, *args):
return jnp.cos(fn(x, *args))
def fwd(fn, x, *args):
y = _cos_after(fn, x, *args)
return y, (x, args)
def rev(fn, res, g):
x, args = res
x_bar = 17. * x
args_bars = [42. * a for a in args]
return (x_bar, *args_bars)
_cos_after.defvjp(fwd, rev)
def dist(c, s, x):
return jnp.sum(s * (x - c) ** 2.)
def solve(c, s, x):
def closure(x):
return dist(c, s, x)
return cos_after(closure, x)
c, s, x = 2. * jnp.ones(2), 3. * jnp.ones(2), jnp.ones(2)
expected = jnp.cos(dist(c, s, x))
self.assertAllClose(solve(c, s, x), expected, check_dtypes=False)
g_c, g_x = api.grad(solve, argnums=(0, 2))(c, s, x)
self.assertAllClose(g_c, 42. * c, check_dtypes=False)
self.assertAllClose(g_x, 17. * x, check_dtypes=False)
def test_float0_cotangents_automatically_handled(self):
@jax.custom_vjp
def f(x, y):
return x
def f_fwd(x, y):
return x, None
def f_bwd(_, zbar):
return (0., 1)
f.defvjp(f_fwd, f_bwd)
jax.jit(lambda x: jax.vjp(f, 0., x)[1](1.))(1) # doesn't crash
class CustomTransposeTest(jtu.JaxTestCase):
def transpose(self, f, x_example):
def transposed(y):
x, = api.linear_transpose(f, x_example)(y)
return x
return transposed
def test_linear_call(self):
def f(x, y):
def fn(r, x): return x / r
def tp(r, t): return t / r
return x + api.linear_call(fn, tp, y, x)
def f_ref(x, y):
return x + x / y
x = jnp.ones(2) * 6.
y = jnp.ones(2) * 3.
self.assertAllClose(f(x, y), f_ref(x, y))
f1 = lambda x: f(x, y)
f1_ref = lambda x: f_ref(x, y)
self.assertAllClose(self.transpose(f1, x)(x),
self.transpose(f1_ref, x)(x))
def test_linear_call_incorrect_transpose(self):
def f(x, y):
def fn(r, x): return x / r
def tp(r, t): return t / (2. * r) # nb: not the true transpose
return x + api.linear_call(fn, tp, y, x)
def f_ref(x, y):
return x + x / y
x = jnp.ones(2) * 6.
y = jnp.ones(2) * 3.
self.assertAllClose(f(x, y), f_ref(x, y))
f1 = lambda x: f(x, y)
f1_ref = lambda x: f_ref(x, 2. * y) # nb: double the reference divisor
self.assertAllClose(self.transpose(f1, x)(x),
self.transpose(f1_ref, x)(x))
def test_linear_call_transpose_transpose_transpose(self):
def fn(r, x): return x / r
def tp(r, t): return t / (2. * r) # nb: untrue transpose
def f_(x, y):
return x + api.linear_call(fn, tp, y, x)
x = jnp.ones(2) * 6.
y = jnp.ones(2) * 3.
f = lambda x: f_(x, y)
ft = self.transpose(f, x)
ftt = self.transpose(ft, x)
fttt = self.transpose(ftt, x)
self.assertAllClose(ft(x), x + tp(y, x))
self.assertAllClose(f(x), ftt(x))
self.assertAllClose(ft(x), fttt(x))
def test_linear_call_scalar_to_vector(self):
def f(c, x):
def fn(_, x):
return [x, x]
def tp(_, t):
t1, t2 = t
return t1 + t2
return api.linear_call(fn, tp, (), c * x)
def f_ref(c, x):
return [c * x, c * x]
c, x = 2., 3.
t = [4., 5.]
self.assertAllClose(f(c, x), f_ref(c, x))
self.assertAllClose(self.transpose(partial(f, c), x)(t),
self.transpose(partial(f_ref, c), x)(t))
def test_linear_call_nested(self):
# identity function with an untrue transpose of 0
def id_(x):
def f(_, x): return x
def t(_, t): return 0.
return api.linear_call(f, t, (), x)
# identity function with an untrue transpose of 7, and where both
# forward and transpose have custom transpositions that should
# never end up invoked.
def f(x):
def f_(_, x): return id_(x)
def t_(_, t): return id_(7.)
return api.linear_call(f_, t_, (), x)
x = 5.
id_t = self.transpose(id_, x)
id_tt = self.transpose(id_t, x)
ft = self.transpose(f, x)
ftt = self.transpose(ft, x)
fttt = self.transpose(ftt, x)
self.assertAllClose(id_(x), x)
self.assertAllClose(id_t(x), 0.)
self.assertAllClose(id_tt(x), x)
self.assertAllClose(f(x), x)
self.assertAllClose(ft(x), 7.)
self.assertAllClose(ftt(x), x)
self.assertAllClose(fttt(x), 7.)
def test_linear_call_jit(self):
def f(x, y):
def fn(r, x): return x / r
def tp(r, t): return t / r
return x + api.linear_call(fn, tp, y, x)
x = jnp.ones(2) * 6.
y = jnp.ones(2) * 3.
self.assertAllClose(f(x, y), jax.jit(f)(x, y))
f1 = lambda x: f(x, y)
self.assertAllClose(self.transpose(f1, x)(x),
jax.jit(self.transpose(f1, x))(x))
class InvertibleADTest(jtu.JaxTestCase):
@jtu.ignore_warning(message="Values that an @invertible function closes")
def test_invertible_basic(self):
def f(x):
return lax.mul(lax.mul(lax.exp(x), 4.), x)
finv = jax.invertible(f)
x = jnp.ones((5,))
jaxpr = jax.make_jaxpr(lambda p, ct: jax.vjp(finv, p)[1](ct))(x, x)
# expected = """
# { lambda ; a b.
# let c = exp a
# d = mul c 4.0
# e = mul d a
# f = mul b a
# g = div e a
# h = mul b g
# i = mul f 4.0
# j = div g 4.0
# k = mul f j
# _ = reduce_sum[ axes=(0,) ] k
# _ = log j
# l = mul i j
# m = add_any h l
# in (m,) }
# """
# self.assertMultiLineStrippedEqual(expected, str(jaxpr)) # no jaxpr test
self.assertIn('div', str(jaxpr))
self.assertIn('log', str(jaxpr)) # assumes no DCE
self.assertAllClose(jax.value_and_grad(lambda x: np.sum(f(x)))(x),
jax.value_and_grad(lambda x: np.sum(finv(x)))(x),
check_dtypes=True)
def test_invertible_blocks(self):
# NB: This is the reversible ResNet block
def mk_reversible_block(f, g):
@jax.custom_ivjp
def rev_block(x1, x2):
y1 = f(x2) + x1
y2 = g(y1) + x2
return y1, y2
@rev_block.defivjp
def rev_block_ivjp(xs, ys, dys):
(y1, y2) = ys
(dy1, dy2) = dys
dgo, dx2 = dy2, dy2
go, gvjp = jax.vjp(g, y1)
dy1 += gvjp(dgo)[0]
del gvjp
x2 = y2 - go
dfo, dx1 = dy1, dy1
fo, fvjp = jax.vjp(f, x2)
dx2 += fvjp(dfo)[0]
del fvjp
x1 = y1 - fo
return (x1, x2), (dx1, dx2)
return rev_block
rev_block = mk_reversible_block(jnp.sin, jnp.cos)
def g(x1, x2):
for i in range(2):
x1, x2 = rev_block(x1, x2)
return x1, x2
def reduce(f, x1, x2):
y1, y2 = f(x1, x2)
return np.sum(y1) + np.sum(y2)
x = np.ones((1,))
# FIXME: This breaks when argnums is left as default (i.e. 0), because JVP prunes
# zero tangents from call primitives.
self.assertAllClose(jax.value_and_grad(partial(reduce, jax.invertible(g)), argnums=(0, 1))(x, x + 2),
jax.value_and_grad(partial(reduce, g), argnums=(0, 1))(x, x + 2),
check_dtypes=True)
def test_invertible_partial_diff(self):
# Check that we don't have to differentiate with respect to inputs
# of the invertible function.
def f(x, y):
return lax.mul(lax.mul(lax.exp(x), 4.), x), lax.add(y, 4.)
finv = jax.invertible(f)
o = np.ones((5,))
self.assertAllClose(jax.value_and_grad(lambda x: np.sum(f(x, o)[0]))(o),
jax.value_and_grad(lambda x: np.sum(finv(x, o)[0]))(o),
check_dtypes=True)
def test_invertible_pytree(self):
def f(x, y):
return lax.add(lax.mul(lax.exp(x[0]), x[1]), y)
finv = jax.invertible(f)
o = np.ones((5,))
self.assertAllClose(jax.value_and_grad(lambda x: np.sum(f((x, x), x)[0]))(o),
jax.value_and_grad(lambda x: np.sum(finv((x, x), x)[0]))(o),
check_dtypes=True)
class BufferDonationTest(jtu.BufferDonationTestCase):
@jtu.skip_on_devices("cpu") # In/out aliasing not supported on CPU.
def test_pmap_donate_argnums_invalidates_input(self):
move = api.pmap(lambda x: x + x - x, donate_argnums=0)
n = jax.local_device_count()
x = api.pmap(lambda x: x)(jnp.ones([n]))
y = move(x)
self.assertDeleted(x)
np.testing.assert_allclose(y, [1.] * n)
def test_pmap_nested_donate_ignored(self):
pmap_fun = jit(lambda x: api.pmap(lambda y: y ** 2, donate_argnums=0)(x))
a = api.pmap(lambda x: x)(jnp.array([1]))
# NOTE(mattjj): stopped raising error here and instead just ignored
# with self.assertRaisesRegex(ValueError, "nested.*not supported"):
# pmap_fun(a)
pmap_fun(a) # doesn't crash
class NamedCallTest(jtu.JaxTestCase):
def test_default_name(self):
@api.named_call
def my_test_function(x):
return x**2
@jax.jit
def f(x):
return my_test_function(x)
c = jax.xla_computation(f)(2)
self.assertIn("my_test_function", c.as_hlo_text())
def test_non_jaxtype_arg(self):
# For the test to fail without the invalid JaxType filter we need to pass
# in a valid JaxType that forces the invalid Jaxtype to be raised to an
# abstract value.
def f(not_a_jaxtype, a_jaxtype):
# then Jax needs to try and evaluate the abstractified non-JaxType
if not_a_jaxtype:
return a_jaxtype
return 0
f = api.named_call(f, name="test")
out = jax.jit(f, static_argnums=(0,))("not a Jaxtype", 1)
self.assertEqual(out, 1)
@parameterized.parameters(jax.jit, jax.grad, jax.vmap, jax.remat)
def test_jax_transforms(self, transform):
f = jnp.sum
x = jnp.array([1.])
unnamed_out = transform(f)(x)
named_out = transform(api.named_call(f, name="test"))(x)
self.assertEqual(unnamed_out, named_out)
def test_static_argnums(self):
f = api.named_call(lambda x, y: y if x else None, name="test")
f = jax.jit(f, static_argnums=(0,))
out = f(True, 5)
self.assertEqual(out, 5)
def test_partial_eval(self):
f = api.named_call(lambda x, y: y if x else None, name="test")
f = jax.jit(functools.partial(f, True))
out = f(5)
self.assertEqual(out, 5)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_jit_type={}_func={}".format(jit_type, func),
"jit_type": jit_type, "func": func}
for func in ['identity', 'asarray', 'device_put']
for jit_type in [None, "python", "cpp"]
if not (jit_type is None and func == 'identity')))
def test_integer_overflow(self, jit_type, func):
funcdict = {
'identity': lambda x: x,
'asarray': jnp.asarray,
'device_put': api.device_put,
}
jit = {
'python': api._python_jit,
'cpp': api._cpp_jit,
None: lambda x: x,
}
f = jit[jit_type](funcdict[func])
int_dtype = dtypes.canonicalize_dtype(jnp.int_)
int_max = np.iinfo(int_dtype).max
int_min = np.iinfo(int_dtype).min
self.assertEqual(f(int_max).dtype, int_dtype)
self.assertEqual(f(int_min).dtype, int_dtype)
self.assertRaises(OverflowError, f, int_max + 1)
self.assertRaises(OverflowError, f, int_min - 1)
class BackendsTest(jtu.JaxTestCase):
@unittest.skipIf(not sys.executable, "test requires sys.executable")
@jtu.skip_on_devices("gpu", "tpu")
def test_cpu_warning_suppression(self):
warning_expected = (
"import jax; "
"jax.numpy.arange(10)")
warning_not_expected = (
"import jax; "
"jax.config.update('jax_platform_name', 'cpu'); "
"jax.numpy.arange(10)")
result = subprocess.run([sys.executable, '-c', warning_expected],
check=True, capture_output=True)
assert "No GPU/TPU found" in result.stderr.decode()
result = subprocess.run([sys.executable, '-c', warning_not_expected],
check=True, capture_output=True)
assert "No GPU/TPU found" not in result.stderr.decode()
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| 31.378099 | 127 | 0.607553 |
import collections
import collections.abc
from contextlib import contextmanager
import copy
import enum
from functools import partial
import operator
import re
import subprocess
import sys
import types
import unittest
import warnings
import weakref
import functools
import itertools as it
import operator as op
from absl import logging
from absl.testing import absltest, parameterized
import numpy as np
import concurrent.futures
import jax
import jax.numpy as jnp
from jax import float0, jit, grad, device_put, jacfwd, jacrev, hessian
from jax import core, dtypes, lax
from jax._src import api
from jax.core import Primitive
from jax.errors import UnexpectedTracerError
from jax.interpreters import ad
from jax.interpreters import xla
from jax.interpreters import pxla
from jax.interpreters.sharded_jit import PartitionSpec as P
import jax._src.lib
from jax._src.lib import xla_client
from jax._src import test_util as jtu
from jax import tree_util
from jax import linear_util as lu
import jax._src.util
from jax._src.ad_checkpoint import saved_residuals
from jax.ad_checkpoint import checkpoint as new_checkpoint, checkpoint_name
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
python_version = (sys.version_info[0], sys.version_info[1])
numpy_version = tuple(map(int, np.__version__.split('.')[:3]))
class CPPJitTest(jtu.BufferDonationTestCase):
@property
def jit(self):
return api._cpp_jit
@unittest.skipIf(jax._src.lib._xla_extension_version < 40,
"Test requires jaxlib 0.1.73")
def test_jit_repr(self):
def my_function():
return
jitted = jit(my_function)
self.assertEqual(repr(jitted), f"<CompiledFunction of {repr(my_function)}>")
@unittest.skipIf(jax._src.lib._xla_extension_version < 40,
"Test requires jaxlib 0.1.73")
def test_jit_repr_errors(self):
class Callable:
def __call__(self): pass
def __repr__(self):
raise ValueError("invalid repr")
jitted = jit(Callable())
self.assertEqual(repr(jitted), "<CompiledFunction>")
del jitted.__wrapped__
self.assertEqual(repr(jitted), "<CompiledFunction>")
def test_jit_of_noncallable(self):
self.assertRaisesRegex(TypeError, "Expected a callable value.*",
lambda: self.jit(3))
def test_jit_of_generator(self):
def gen(x):
yield x
self.assertRaisesRegex(TypeError,
"Expected a function, got a generator function.*",
lambda: self.jit(gen))
@parameterized.parameters([
(1, 2, 3, 4, 5),
(
np.asarray(1, np.int32),
np.asarray(2, np.int32),
np.asarray(3, np.int32),
np.asarray(4, np.int32),
np.asarray(5, np.int32),
),
])
def test_jit_static_args(self, one, two, three, four, five):
side = []
def f(x, y, z, flag=False, flag2=False):
del flag2
assert flag
side.append(None)
return 100 * x + 10 * y + z
f1 = self.jit(f, static_argnums=(3, 4))
assert f1(one, two, three, True, False) == 123
assert len(side) == 1
assert f1(one, two, three, True, False) == 123
assert len(side) == 1
assert f1(two, one, three, True, False) == 213
assert len(side) == 1
assert f1(two, one, three, True, True) == 213
assert len(side) == 2
side[:] = []
f2 = self.jit(f, static_argnums=(0, 2, 3, 4))
assert f2(1, 2, 3, True, False) == 123
assert len(side) == 1
assert f2(1, 3, 3, True, False) == 133
assert len(side) == 1
assert f2(2, 2, 3, True, False) == 223
assert len(side) == 2
assert f2(2, 4, 3, True, False) == 243
assert len(side) == 2
assert f2(2, 4, 3, True, True) == 243
assert len(side) == 3
assert f2(2, 5, 3, True, True) == 253
assert len(side) == 3
def test_static_args_equality(self):
class A():
def __hash__(self):
return 1
def __eq__(self, other):
return isinstance(other, A)
side = []
def f(x, static_arg):
del static_arg
side.append(None)
return x * 100
f1 = self.jit(f, static_argnums=(1,))
self.assertEqual(f1(1, A()), 100)
self.assertLen(side, 1)
self.assertEqual(f1(1, A()), 100)
self.assertLen(side, 1)
if self.jit == api._cpp_jit:
f1_cpp = getattr(f1, "_cpp_jitted_f", f1)
self.assertEqual(f1_cpp._cache_size(), 1)
@parameterized.parameters([
(1, 2, 3),
(
np.asarray(1, np.int32),
np.asarray(2, np.int32),
np.asarray(3, np.int32),
),
])
def test_jit_kwargs(self, one, two, three):
side = []
if hasattr(self.jit, "cache_clear"):
self.jit.cache_clear()
def f(x, y, z):
side.append(None)
return 100 * x + 10 * y + z
f = self.jit(f)
assert f(one, two, three) == 123
assert len(side) == 1
assert f(one, two, three) == 123
assert len(side) == 1
assert f(one, two, z=three) == 123
assert len(side) == 2
assert f(one, two, z=three) == 123
assert len(side) == 2
f(one, two, z=np.zeros(3))
if config.x64_enabled:
# In the above call, three is of a new type (int64), thus it should
# trigger a new compilation.
assert len(side) == 3
def test_jit_device(self):
device = jax.devices()[-1]
x = self.jit(lambda x: x, device=device)(3.)
self.assertIsInstance(x, xla.DeviceArray)
self.assertEqual(x.device_buffer.device(), device)
def test_complex_support(self):
self.assertEqual(self.jit(lambda x: x + 1)(1 + 1j), 2 + 1j)
def test_jit_with_many_args_works(self):
@self.jit
def f(args_list):
return sum(args_list)
self.assertEqual(f(list(range(500))), sum(range(500)))
# Jit and Donate arguments
def test_jit_donate_argnums_warning_raised(self):
x = jnp.array([1.0, 2.0], jnp.float32)
y = jnp.array([1, 2], jnp.int32)
f = self.jit(lambda x, y: x.sum() + y.sum(), donate_argnums=(0, 1))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
f(x, y)
self.assertLen(w, 1)
self.assertTrue(issubclass(w[-1].category, UserWarning))
self.assertIn(
"Some donated buffers were not usable: f32[2]{0}, s32[2]{0}",
str(w[-1].message))
@jtu.skip_on_devices("cpu") # In/out aliasing not supported on CPU.
def test_jit_donate_argnums_invalidates_input(self):
# We can't just use `lambda x: x` because JAX simplifies this away to an
move = self.jit(lambda x: x + x - x, donate_argnums=0)
x = jnp.ones([])
y = move(x)
self.assertDeleted(x)
self.assertEqual(y, 1.)
@jtu.skip_on_devices("cpu")
def test_jit_donate_argnums_static_argnums(self):
jit_fun = self.jit(
lambda a, b, c, d: ((a + b + c), (a + b + d)),
static_argnums=(0, 1),
donate_argnums=(2, 3))
c = jax.device_put(jnp.array([1., 1.]))
d = jax.device_put(jnp.array([1., 1., 1.]))
e, f = jit_fun(1, 2, c, d)
np.testing.assert_allclose(e, jnp.array([4., 4.]))
np.testing.assert_allclose(f, jnp.array([4., 4., 4.]))
self.assertDeleted(c)
self.assertDeleted(d)
@jtu.skip_on_devices("cpu")
def test_jnp_array_copy(self):
@partial(self.jit, donate_argnums=(0,))
def _test(array):
return array.at[0].set(77)
x = jnp.asarray([0, 1])
x_copy = jnp.array(x, copy=True)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_test(x)
print(x_copy)
def test_jit_global_cache(self):
def f(x):
assert python_should_be_executing
return x
python_should_be_executing = True
self.jit(f)(2)
python_should_be_executing = False
self.jit(f)(3)
def test_jit_shallow_copy(self):
def f(x):
return copy.copy(x)
self.jit(f)(1)
def test_jit_deep_copy(self):
def f(x):
return copy.deepcopy(x)
self.jit(f)(1)
def test_disable_jit(self):
effects = []
@self.jit
def f(x):
effects.append(1)
return x
with api.disable_jit():
f(2)
f(2)
assert len(effects) == 2
f(2)
f(2)
assert len(effects) == 3
def test_static_argnum_on_method(self):
class A:
@functools.partial(self.jit, static_argnums=(0,))
def my_func_jit(self, x):
return x+2
A().my_func_jit(3)
def test_static_argnum_on_static_method_is_not_supported(self):
with self.assertRaisesRegex(TypeError, "Expected a callable value"):
class A:
@functools.partial(self.jit, static_argnums=(0,))
@classmethod
def my_classmethod_jit(cls, x):
return x+2
def test_staticmethod_is_not_supported(self):
with self.assertRaisesRegex(TypeError,
"staticmethod arguments are not supported"):
class A:
@functools.partial(self.jit)
@staticmethod
def my_staticmethod_jit(x):
return x + 2
def test_concurrent_jit(self):
@self.jit
def f(x):
return x + x - 3.
xs = [np.random.randn(i) for i in range(10)]
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = [executor.submit(partial(f, x)) for x in xs]
ys = [f.result() for f in futures]
for x, y in zip(xs, ys):
self.assertAllClose(x * 2 - 3., y)
def test_trivial_computations(self):
x = jnp.array([1, 2, 3])
y = self.jit(lambda x: x)(x)
self.assertIs(x, y)
z1, z2 = self.jit(lambda x: (x, x))(x)
self.assertIs(z1, z2)
x1, x2 = jnp.array([1, 2]), jnp.array([2, 3])
z1, z2, z3 = self.jit(lambda x, y: (y, 1, x))(x1, x2)
self.assertIs(z1, x2)
self.assertIs(z3, x1)
self.assertEqual(z2, 1)
def test_trivial_computations_with_tokens(self):
@self.jit
def noop(arr, token):
return arr, token
arr = jax.numpy.ones(10)
token = jax.lax.create_token()
self.assertEqual(token, noop(arr, token)[1])
def test_jit_bad_input(self):
def f(x):
return x
self.assertRaisesRegex(
TypeError, ".* 'foo' of type <.*'str'> is not a valid JAX type",
lambda: self.jit(f)("foo"))
def test_jit_on_all_devices(self):
# Verifies we can run the same computation on every device present, even
# if they are, for example, different models of GPU.
data = np.random.rand(1000).astype(np.float32)
f = self.jit(jnp.negative)
for device in jax.local_devices():
x = device_put(data, device=device)
np.testing.assert_array_equal(-data, f(x))
def test_jit_nested_donate_ignored(self):
jit_fun = self.jit(lambda x: self.jit(lambda y: y**2, donate_argnums=0)(x))
a = jax.device_put(jnp.array(1))
# NOTE(mattjj): stopped raising error here and instead just ignored
# with self.assertRaisesRegex(ValueError, "nested.*not supported"):
# jit_fun(a)
jit_fun(a) # doesn't crash
def test_jit_reference_dropping(self):
x = jnp.ones(10)
f = (lambda x: lambda: x)(x)
g = self.jit(f)
x = weakref.ref(x) # no more strong ref to x in this scope
assert x() is not None # x is still around
f() # f runs
g() # g runs
g() # g runs a second time
del f # delete the raw callable
assert x() is not None # x is still around
g() # g still runs
del g # no more references to x
assert x() is None # x is gone
def test_jit_raises_on_first_invocation_on_non_hashable_static_argnum(self):
if self.jit != api._python_jit:
raise unittest.SkipTest("this test only applies to _python_jit")
f = lambda x, y: x + 3
jitted_f = self.jit(f, static_argnums=(1,))
msg = ("Non-hashable static arguments are not supported, as this can lead "
"to unexpected cache-misses. Static argument (index 1) of type "
"<class 'numpy.ndarray'> for function <lambda> is non-hashable.")
with self.assertRaisesRegex(ValueError, re.escape(msg)):
jitted_f(1, np.asarray(1))
def test_cpp_jit_raises_on_non_hashable_static_argnum(self):
if self.jit != api._cpp_jit:
raise unittest.SkipTest("this test only applies to _cpp_jit")
f = lambda x, y: x + 3
jitted_f = api._cpp_jit(f, static_argnums=[1])
jitted_f(1, 1)
msg = ("Non-hashable static arguments are not supported. An error occured "
".*while trying to hash an object of type "
"<class 'numpy\\.ndarray'>, 1. The error was:\nTypeError: "
"unhashable type: 'numpy\\.ndarray'")
with self.assertRaisesRegex(ValueError, msg):
jitted_f(1, np.asarray(1))
class HashableWithoutEq:
def __hash__(self):
return 1
def __eq__(self, other):
raise NotImplementedError(
"A Python error is as is, without stack trace")
with self.assertRaisesRegex(
ValueError,
re.escape("static arguments should be comparable using __eq__")):
jitted_f(1, HashableWithoutEq())
def test_cpp_jitted_function_returns_PyBuffer(self):
if self.jit != api._cpp_jit:
raise unittest.SkipTest("this test only applies to _cpp_jit")
jitted_f = self.jit(lambda a: a + 1)
jitted_f(1)
self.assertIsInstance(jitted_f(2), xla._CppDeviceArray)
@jtu.skip_on_devices("cpu")
def test_explicit_backend(self):
f = lambda x: x + 1
jitted_f = jit(f, backend=jtu.device_under_test())
jitted_f_cpu = jit(f, backend="cpu")
result = jitted_f(1.)
result_cpu = jitted_f_cpu(1.)
self.assertEqual(result.device_buffer.platform(), jtu.device_under_test())
self.assertEqual(result_cpu.device_buffer.platform(), "cpu")
@jtu.skip_on_devices("cpu")
def test_device_to_device_copy_between_backends(self):
# b/186624243
f = lambda x: x + 1
jitted_f = jit(f, backend=jtu.device_under_test())
jitted_f_cpu = jit(f, backend="cpu")
x = np.arange(30).reshape(1, 10, 3)
result = jitted_f(x)
result_cpu = jitted_f_cpu(result)
result_2 = jitted_f(result_cpu)
result_cpu_2 = jitted_f_cpu(result_2)
self.assertAllClose(result_2, x + 3)
self.assertAllClose(result_cpu_2, x + 4)
@jtu.skip_on_devices("cpu")
def test_mismatched_nested_backends(self):
@partial(jit, backend=jtu.device_under_test())
def f(x):
return jit(lambda x: x + 1, backend="cpu")(x)
with self.assertRaisesRegex(
ValueError,
f"Outer-jit backend specification {jtu.device_under_test()} must match "
f"explicit inner-jit backend specification cpu."):
f(1.)
def test_omnistaging(self):
# See https://github.com/google/jax/issues/5206
# TODO(frostig): remove once we always enable_custom_prng
def _prng_key_as_array(key):
return key.unsafe_raw_array() if config.jax_enable_custom_prng else key
# TODO(frostig): remove once we always enable_custom_prng
def _array_as_prng_key(arr):
arr = np.array(arr, dtype=np.uint32)
if config.jax_enable_custom_prng:
return jax._src.prng.PRNGKeyArray(
jax._src.prng.threefry_prng_impl, arr)
else:
return arr
key_list = [None]
def init():
key, subkey = jax.random.split(key_list[0])
key_list[0] = key
return jax.random.normal(subkey, ())
key_list[0] = _array_as_prng_key([2384771982, 3928867769])
init()
self.jit(init)()
self.assertIsInstance(_prng_key_as_array(key_list[0]), core.Tracer)
def test_jit_wrapped_attributes(self):
def f(x: int) -> int:
return x + 1
f.some_value = 4
jf = self.jit(f)
for attr in ["doc", "name", "module", "qualname", "annotations"]:
self.assertEqual(
{attr: getattr(f, f"__{attr}__")},
{attr: getattr(jf, f"__{attr}__")})
self.assertEqual(f.some_value, jf.some_value)
def test_jit_python_builtin(self):
x = jnp.array([1, 2])
expected = x + 1
jit_add = self.jit(operator.add, static_argnums=(1,))
actual = jit_add(x, 1)
self.assertArraysEqual(expected, actual)
def test__infer_argnums_and_argnames(self):
def f(x, y=1):
pass
argnums, argnames = api._infer_argnums_and_argnames(
f, argnums=None, argnames=None)
assert argnums == ()
assert argnames == ()
argnums, argnames = api._infer_argnums_and_argnames(
f, argnums=0, argnames=None)
assert argnums == (0,)
assert argnames == ('x',)
argnums, argnames = api._infer_argnums_and_argnames(
f, argnums=None, argnames='y')
assert argnums == (1,)
assert argnames == ('y',)
argnums, argnames = api._infer_argnums_and_argnames(
f, argnums=0, argnames='y') # no validation
assert argnums == (0,)
assert argnames == ('y',)
def g(x, y, *args):
pass
argnums, argnames = api._infer_argnums_and_argnames(
g, argnums=(1, 2), argnames=None)
assert argnums == (1, 2)
assert argnames == ('y',)
def h(x, y, **kwargs):
pass
argnums, argnames = api._infer_argnums_and_argnames(
h, argnums=None, argnames=('foo', 'bar'))
assert argnums == ()
assert argnames == ('foo', 'bar')
def test_jit_with_static_argnames(self):
def f(x):
assert x == 'foo'
return 1
f_nums = self.jit(f, static_argnums=0)
assert f_nums('foo') == 1
assert f_nums(x='foo') == 1
f_names = self.jit(f, static_argnames='x')
assert f_names('foo') == 1
assert f_names(x='foo') == 1
def test_new_static_argnum_on_keyword_arguments(self):
f = self.jit(lambda x: x, static_argnums=0)
y = f(x=4)
assert y == 4
def test_new_static_argnum_with_default_arguments(self):
f = self.jit(lambda x=4: x, static_argnums=0)
y = f()
assert y == 4
def test_jit_with_mismatched_static_argnames(self):
x_is_tracer, y_is_tracer = False, False
def f(x, y):
assert isinstance(x, core.Tracer) == x_is_tracer
assert isinstance(y, core.Tracer) == y_is_tracer
return 1
# If both static_argnums and static_argnames are provided, they are allowed
# to disagree and `jit` will respect the user's choices.
f_nums = self.jit(f, static_argnums=1, static_argnames=())
x_is_tracer, y_is_tracer = True, False
assert f_nums(2, 'foo') == 1
x_is_tracer, y_is_tracer = True, True
assert f_nums(1, y=2) == 1
f_names = self.jit(f, static_argnums=(), static_argnames='y')
x_is_tracer, y_is_tracer = True, True
assert f_names(2, 3) == 1
x_is_tracer, y_is_tracer = True, False
assert f_names(1, y='foo') == 1
f_mixed = self.jit(f, static_argnums=(1,), static_argnames='x')
x_is_tracer, y_is_tracer = True, False
assert f_mixed(2, 'foo') == 1
x_is_tracer, y_is_tracer = True, True
assert f_mixed(1, y=3) == 1
x_is_tracer, y_is_tracer = False, True
assert f_mixed(x='foo', y=3) == 1
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_num_args={}".format(num_args),
"num_args": num_args}
for num_args in [2, 3, 4]))
def test_jit_with_pruned_args(self, num_args):
def f(*args):
used = np.array(2)
return args[1] + used
f_pruned = self.jit(f)
args = range(num_args)
with jtu.count_device_put() as count:
np.testing.assert_allclose(f_pruned(*args), 3)
self.assertEqual(count[0], 1)
@unittest.skipIf(jax._src.lib._xla_extension_version <= 36,
"Test requires jaxlib 0.1.71")
def testBuffersAreFreedPromptly(self):
@self.jit
def f(x):
return x + 1
refs = []
x = np.ones((10000,), np.float32)
for step in range(1000):
x = f(x)
refs.append(weakref.ref(x))
x = np.asarray(x)
# block_until_ready() here because it would force a garbage collection.
live_refs = len([ref for ref in refs if ref() is not None])
self.assertLessEqual(live_refs, 100)
def test_jit_lower_compile(self):
def f(x):
return jnp.sqrt(x ** 2) + 1.
f_jit = self.jit(f)
f_low = f_jit.lower(1.)
f_exe = f_low.compile()
self.assertAllClose(f_exe(1.), 2.)
def test_jit_lower_compile_in_tree_mismatch(self):
def f(x):
return jnp.sqrt(x ** 2) + 1.
f_jit = self.jit(f)
f_low = f_jit.lower(1.)
f_exe = f_low.compile()
self.assertRaisesRegex(
TypeError, "function compiled for .*, called with .*",
lambda: f_exe([1.]))
def test_jit_lower_compile_trivial(self):
def f(x): return x
out = self.jit(f).lower(1.).compile()(4.)
self.assertAllClose(out, 4.)
def test_jit_lower_compile_trivial_in_tree_mismatch(self):
def f(x): return x
f_exe = self.jit(f).lower(1.).compile()
self.assertRaisesRegex(
TypeError, "function compiled for .*, called with .*",
lambda: f_exe([4.]))
def test_jit_lower_compile_arg_type_mismatch(self):
def f(x):
return jnp.sqrt(x ** 2) + 1.
x = jnp.array(1, dtype=int)
x_f32 = x.astype(jnp.float32)
x_i32 = x.astype(jnp.int32)
f_exe = self.jit(f).lower(x_f32).compile()
self.assertRaisesRegex(
TypeError,
"Computation compiled for input types:\n.*float32.*\n"
"called with:\n.*int32.*",
lambda: f_exe(x_i32))
def test_jit_lower_compile_multi_arg(self):
def f(*args):
x, *_ = args
return jnp.sqrt(x ** 2) + 1.
f_exe = self.jit(f).lower(1., 1.).compile()
self.assertAllClose(f_exe(1., 1.), 2.)
def test_jit_lower_compile_trivial_multi_arg(self):
def f(*args):
x, *_ = args
return x
f_exe = self.jit(f).lower(1., 1.).compile()
self.assertAllClose(f_exe(1., 1.), 1.)
class PythonJitTest(CPPJitTest):
@property
def jit(self):
return api._python_jit
class APITest(jtu.JaxTestCase):
def test_grad_bad_input(self):
def f(x):
return x
self.assertRaisesRegex(
TypeError, ".* 'foo' of type <.*'str'> is not a valid JAX type",
lambda: grad(f)("foo"))
def test_grad_argnums(self):
def f(x, y, z, flag=False):
assert flag
return 1.0 * x + 2.0 * y + 3.0 * z
assert grad(f)(1.0, 1.0, 1.0, flag=True) == 1.0
assert grad(f, argnums=1)(1.0, 1.0, 1.0, flag=True) == 2.0
assert grad(f, argnums=(2, 0))(1.0, 1.0, 1.0, flag=True) == (3.0, 1.0)
def test_value_and_grad_argnums(self):
def f(x, y, z, flag=False):
assert flag
return 1.0 * x + 2.0 * y + 3.0 * z
y = f(1.0, 1.0, 1.0, flag=True)
assert api.value_and_grad(f)(1.0, 1.0, 1.0, flag=True) == (y, 1.0)
assert api.value_and_grad(f, argnums=1)(1.0, 1.0, 1.0, flag=True) == (y, 2.0)
assert api.value_and_grad(f, argnums=(2, 0))(1.0, 1.0, 1.0, flag=True) == (y, (3.0, 1.0))
def test_grad_of_jit(self):
side = []
@jit
def f(x):
side.append(None)
return x * x
assert grad(f)(1.0) == 2.0
assert len(side) == 1
assert grad(f)(2.0) == 4.0
assert len(side) == 1
def test_jit_of_grad(self):
side = []
@jit
def f(x):
side.append(None)
return x * x
g = jit(grad(f))
assert g(1.0) == 2.0
assert len(side) == 1
assert g(2.0) == 4.0
assert len(side) == 1
def test_bad_input(self):
def f(x):
return x
self.assertRaisesRegex(
TypeError, ".* 'foo' of type <.*'str'> is not a valid JAX type",
lambda: grad(f)("foo"))
self.assertRaisesRegex(
TypeError, ".* 'foo' of type <.*'str'> is not a valid JAX type",
lambda: jit(f)("foo"))
def test_grad_tuple_output(self):
jtu.check_raises(lambda: grad(lambda x: (x,x))(1.0), TypeError,
"Gradient only defined for scalar-output functions. ")
def test_grad_unit_output(self):
jtu.check_raises(lambda: grad(lambda x: ())(np.zeros(3)), TypeError,
"Gradient only defined for scalar-output functions. ")
def test_grad_nonscalar_output(self):
jtu.check_raises(lambda: grad(lambda x: x)(np.zeros(3)), TypeError,
"Gradient only defined for scalar-output functions. ")
def test_unwrapped_numpy(self):
def f(x):
return np.exp(x)
with self.assertRaisesRegex(Exception, "The numpy.ndarray conversion .*"):
grad(f)(np.zeros(3))
def test_binop_mismatch(self):
def f(x, y):
return x + y
jtu.check_raises(
lambda: f(jnp.zeros(3), jnp.zeros(4)),
TypeError,
"add got incompatible shapes for broadcasting: (3,), (4,).")
jtu.check_raises(
lambda: grad(f)(np.zeros(3), np.zeros(4)),
TypeError,
"add got incompatible shapes for broadcasting: (3,), (4,).")
def test_dot_mismatch(self):
def f(x, y):
return jnp.dot(x, y)
self.assertRaisesRegex(
TypeError, "Incompatible shapes for dot: got \\(3L?,\\) and \\(4L?,\\).",
lambda: grad(f)(np.zeros(3), np.zeros(4)))
def test_abstract_error_message(self):
for castfun in [float, complex, int]:
def f(x):
return castfun(x)
self.assertRaisesRegex(
TypeError,
f"[Tt]ry using `x.astype\\({castfun.__name__}\\)`",
lambda: jit(f)(1.0))
def test_switch_value_jit(self):
def f(x):
y = x > 0
if y:
return x
else:
return -x
assert grad(f)(1.0) == 1.0
assert grad(f)(-1.0) == -1.0
with self.assertRaisesRegex(core.ConcretizationTypeError,
"Abstract tracer value"):
jit(f)(1)
def test_list_index_err(self):
L = [1, 2, 3]
def f(n):
return L[n]
assert jit(f, static_argnums=(0,))(0) == L[0]
self.assertRaisesRegex(
TypeError,
r"The __index__\(\) method was called on the JAX Tracer object.*",
lambda: jit(f)(0))
def test_range_err(self):
def f(x, n):
for i in range(n):
x = x + i
return x
assert jit(f, static_argnums=(1,))(0, 5) == 10
self.assertRaisesRegex(
TypeError,
r"The __index__\(\) method was called on the JAX Tracer object.*",
lambda: jit(f)(0, 5))
def test_cast_int(self):
f = lambda x: int(x)
self.assertRaisesRegex(
TypeError,
"('(?:JaxprTracer|DynamicJaxprTracer)' object cannot be interpreted as an integer"
"|Abstract tracer value encountered where concrete value is expected.*)", lambda: jit(f)(0))
def test_casts(self):
for castfun in [hex, oct]:
f = lambda x: castfun(x)
self.assertRaisesRegex(
TypeError,
r"The __index__\(\) method was called on the JAX Tracer object.*", lambda: jit(f)(0))
def test_unimplemented_interpreter_rules(self):
foo_p = Primitive('foo')
def foo(x):
return foo_p.bind(x)
jtu.check_raises(lambda: foo(1.0), NotImplementedError,
"Evaluation rule for 'foo' not implemented")
jtu.check_raises(lambda: jit(foo)(1.0), NotImplementedError,
"Abstract evaluation for 'foo' not implemented")
jtu.check_raises(lambda: grad(foo)(1.0), NotImplementedError,
"Differentiation rule for 'foo' not implemented")
foo_p.def_abstract_eval(lambda x: x)
jtu.check_raises(lambda: jit(foo)(1.0), NotImplementedError,
"XLA translation rule for primitive 'foo' not found")
foo_p.def_impl(lambda x: x)
ad.defjvp(foo_p, lambda g, x: foo(g))
jtu.check_raises(lambda: grad(foo)(1.0), NotImplementedError,
"Transpose rule (for reverse-mode differentiation) for 'foo' not implemented")
def test_is_subclass(self):
self.assertTrue(issubclass(xla.DeviceArray, jnp.ndarray))
self.assertTrue(issubclass(xla._CppDeviceArray, jnp.ndarray))
self.assertTrue(issubclass(pxla.ShardedDeviceArray, jnp.ndarray))
self.assertTrue(issubclass(pxla._ShardedDeviceArray, jnp.ndarray))
self.assertFalse(issubclass(np.ndarray, jnp.ndarray))
self.assertFalse(issubclass(xla.DeviceArray, np.ndarray))
self.assertFalse(issubclass(xla._CppDeviceArray, np.ndarray))
self.assertFalse(issubclass(pxla.ShardedDeviceArray, np.ndarray))
self.assertFalse(issubclass(pxla._ShardedDeviceArray, np.ndarray))
def test_is_instance(self):
def f(x):
self.assertIsInstance(x, jnp.ndarray)
self.assertNotIsInstance(x, np.ndarray)
return x + 2
jit(f)(3)
jax.vmap(f)(np.arange(3))
def test_device_put_and_get(self):
x = np.arange(12.).reshape((3, 4)).astype("float32")
dx = api.device_put(x)
self.assertIsInstance(dx, xla.DeviceArray)
self.assertIsInstance(dx, jnp.ndarray)
self.assertNotIsInstance(dx, np.ndarray)
x2 = api.device_get(dx)
self.assertNotIsInstance(x2, jnp.ndarray)
self.assertIsInstance(x2, np.ndarray)
assert np.all(x == x2)
y = [x, (2 * x, 3 * x)]
dy = api.device_put(y)
y2 = api.device_get(dy)
self.assertIsInstance(y2, list)
self.assertIsInstance(y2[0], np.ndarray)
assert np.all(y2[0] == x)
self.assertIsInstance(y2[1], tuple)
self.assertIsInstance(y2[1][0], np.ndarray)
assert np.all(y2[1][0] == 2 * x)
self.assertIsInstance(y2[1][1], np.ndarray)
assert np.all(y2[1][1] == 3 * x)
def test_device_get_scalar(self):
x = np.arange(12.).reshape((3, 4)).astype("float32")
x = api.device_put(x)
self.assertIsInstance(x, xla.DeviceArray)
y = [x, 2]
y2 = api.device_get(y)
self.assertIsInstance(y2, list)
self.assertIsInstance(y2[0], np.ndarray)
assert np.all(y2[0] == x)
self.assertIsInstance(y2[1], int)
self.assertEqual(y2[1], 2)
@parameterized.parameters([(3,)], [(2, 0)])
def test_device_put_across_devices(self, shape):
if len(api.local_devices()) < 2:
raise unittest.SkipTest("this test requires multiple devices")
d1, d2 = api.local_devices()[:2]
data = np.random.randn(*shape).astype(np.float32)
x = api.device_put(data, device=d1)
self.assertEqual(x.device_buffer.device(), d1)
y = api.device_put(x, device=d2)
self.assertEqual(y.device_buffer.device(), d2)
np.testing.assert_array_equal(data, np.array(y))
# Make sure these don't crash
api.device_put(x)
api.device_put(y)
@jtu.skip_on_devices("cpu")
def test_device_put_across_platforms(self):
default_device = jax.devices()[0]
cpu_device = jax.devices("cpu")[0]
np_arr = np.array([1,2,3])
scalar = 1
device_arr = jnp.array([1,2,3])
assert device_arr.device_buffer.device() is default_device
for val in [np_arr, device_arr, scalar]:
x = api.device_put(val, device=cpu_device)
self.assertEqual(x.device_buffer.device(), cpu_device)
@jtu.skip_on_devices("tpu")
def test_jacobian(self):
R = np.random.RandomState(0).randn
A = R(4, 3)
x = R(3)
f = lambda x: jnp.dot(A, x)
assert np.allclose(jacfwd(f)(x), A)
assert np.allclose(jacrev(f)(x), A)
f = lambda x: jnp.tanh(jnp.dot(A, x))
assert np.allclose(jacfwd(f)(x), jacrev(f)(x))
@jtu.skip_on_devices("tpu")
def test_hessian(self):
R = np.random.RandomState(0).randn
A = R(4, 4)
x = R(4)
f = lambda x: jnp.dot(x, jnp.dot(A, x))
assert np.allclose(hessian(f)(x), A + A.T)
def test_std_basis(self):
basis = api._std_basis(jnp.zeros(3))
assert getattr(basis, "shape", None) == (3, 3)
assert np.allclose(basis, np.eye(3))
basis = api._std_basis(jnp.zeros((3, 3)))
assert getattr(basis, "shape", None) == (9, 3, 3)
assert np.allclose(basis, np.eye(9).reshape(9, 3, 3))
basis = api._std_basis([0., (jnp.zeros(3), jnp.zeros((3, 4)))])
assert isinstance(basis, list) and len(basis) == 2
assert getattr(basis[0], "shape", None) == (16,)
assert isinstance(basis[1], tuple) and len(basis[1]) == 2
assert getattr(basis[1][0], "shape", None) == (16, 3)
assert getattr(basis[1][1], "shape", None) == (16, 3, 4)
@jtu.skip_on_devices("tpu")
def test_jacobian_on_pytrees(self):
for jacfun in [jacfwd, jacrev]:
ans = jacfun(lambda x, y: (x, y))(0., 1.)
expected = (1., 0.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = jacfun(lambda x, y: (x, y), 1)(0., 1.)
expected = (0., 1.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = jacfun(lambda x, y: (x, y), (0, 1))(0., 1.)
expected = ((1., 0.),
(0., 1.),)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = jacfun(lambda x: x[:2])((1., 2., 3.))
expected = ((1., 0., 0.),
(0., 1., 0.))
self.assertAllClose(ans, expected, check_dtypes=False)
R = np.random.RandomState(0).randn
x = R(2)
y = R(3)
ans = jacfun(lambda x, y: {'x': x, 'xy': jnp.outer(x, y)})(x, y)
expected = {'x': np.eye(2),
'xy': np.kron(np.eye(2), y[:, None]).reshape(2, 3, 2)}
self.assertAllClose(ans, expected, check_dtypes=False)
@jtu.skip_on_devices("tpu")
def test_hessian_on_pytrees(self):
ans = hessian(lambda x: jnp.array(x)**2)((1., 2.))
expected = ((np.array([2., 0.]), np.array([0., 0.])),
(np.array([0., 0.]), np.array([0., 2.])))
self.assertAllClose(ans, expected, check_dtypes=False)
@jtu.skip_on_devices("tpu")
def test_issue1372(self):
def quad(x):
return jnp.dot(x, x)
def f(x, u):
return quad(x) + quad(u)
x, u = jnp.ones(5), jnp.ones(2)
rev = jacrev
fwd = jacfwd
self.assertEqual(rev(rev(f, 0), 0)(x, u).shape, (5, 5))
self.assertEqual(rev(fwd(f, 0), 0)(x, u).shape, (5, 5))
self.assertEqual(fwd(rev(f, 0), 0)(x, u).shape, (5, 5))
self.assertEqual(fwd(fwd(f, 0), 0)(x, u).shape, (5, 5))
self.assertEqual(rev(rev(f, 1), 1)(x, u).shape, (2, 2))
self.assertEqual(rev(fwd(f, 1), 1)(x, u).shape, (2, 2))
self.assertEqual(fwd(rev(f, 1), 1)(x, u).shape, (2, 2))
self.assertEqual(fwd(fwd(f, 1), 1)(x, u).shape, (2, 2))
self.assertEqual(rev(rev(f, 1), 0)(x, u).shape, (2, 5))
self.assertEqual(rev(fwd(f, 1), 0)(x, u).shape, (2, 5))
self.assertEqual(rev(rev(f, 0), 1)(x, u).shape, (5, 2))
self.assertEqual(rev(fwd(f, 0), 1)(x, u).shape, (5, 2))
self.assertEqual(fwd(rev(f, 1), 0)(x, u).shape, (2, 5))
self.assertEqual(fwd(fwd(f, 1), 0)(x, u).shape, (2, 5))
self.assertEqual(fwd(rev(f, 0), 1)(x, u).shape, (5, 2))
self.assertEqual(fwd(fwd(f, 0), 1)(x, u).shape, (5, 2))
def test_large_device_constant(self):
ans = jit(lambda x: 2 * x)(jnp.ones(int(2e6)))
self.assertAllClose(ans, np.ones(int(2e6)) * 2., check_dtypes=False)
def test_grad_and_aux_basic(self):
g, aux = grad(lambda x: (x**3, [x**2]), has_aux=True)(3.)
self.assertAllClose(g, grad(lambda x: x**3)(3.))
self.assertAllClose(aux, [9.], check_dtypes=False)
def test_grad_and_aux_error(self):
with self.assertRaisesRegex(TypeError, "two-element tuple"):
grad(lambda x: (1, 2, 3), has_aux=True)(1.)
with self.assertRaisesRegex(TypeError, "two-element tuple"):
grad(lambda x: x, has_aux=True)(1.)
with self.assertRaisesRegex(TypeError, "two-element tuple"):
grad(lambda x: (x,), has_aux=True)(1.)
def test_grad_and_aux_nested(self):
def f(x):
g, aux = grad(lambda x: (x**3, [x**3]), has_aux=True)(x)
return aux[0]
f2 = lambda x: x**3
self.assertEqual(grad(f)(4.), grad(f2)(4.))
self.assertEqual(jit(grad(f))(4.), grad(f2)(4.))
self.assertEqual(jit(grad(jit(f)))(4.), grad(f2)(4.))
def f(x):
g, aux = grad(lambda x: (x**3, [x**3]), has_aux=True)(x)
return aux[0] * jnp.sin(x)
f2 = lambda x: x**3 * jnp.sin(x)
self.assertEqual(grad(f)(4.), grad(f2)(4.))
self.assertEqual(jit(grad(f))(4.), grad(f2)(4.))
self.assertEqual(jit(grad(jit(f)))(4.), grad(f2)(4.))
def test_grad_and_aux_constant(self):
g, aux = grad(lambda x: (x**3, [4.]), has_aux=True)(4.)
self.assertEqual(g, grad(lambda x: x**3)(4.))
self.assertEqual(aux, [4.])
g, aux = grad(lambda x: (x**3, [x**2, 4.]), has_aux=True)(4.)
self.assertEqual(g, grad(lambda x: x**3)(4.))
self.assertEqual(aux, [4.**2, 4.])
def test_grad_and_aux_no_tracers(self):
# see https://github.com/google/jax/issues/1950
def f(x):
aux = dict(identity=x, p1=x+1)
return x ** 2, aux
_, aux = jax.grad(f, has_aux=True)(3.)
self.assertIsInstance(aux, dict)
for val in aux.values():
self.assertNotIsInstance(val, core.Tracer)
def test_jvp_mismatched_arguments(self):
self.assertRaisesRegex(
TypeError,
("primal and tangent arguments to jax.jvp must have the same tree "
"structure"),
lambda: api.jvp(lambda x, y: x * y, (np.float32(2),), ()))
# If primals and tangents must both be tuples or both lists
self.assertRaisesRegex(
TypeError,
("primal and tangent arguments to jax.jvp must have the same tree "
"structure"),
lambda: api.jvp(lambda x, y: x * y, (np.float32(2),), [np.float32(2)]))
self.assertRaisesRegex(
TypeError,
"primal and tangent arguments to jax.jvp do not match.",
lambda: api.jvp(lambda x: -x, (np.float16(2),), (np.float32(4),)))
# If primals and tangents are not of the same shape then raise error
fun = lambda x: x+1
with self.assertRaisesRegex(
ValueError, "jvp called with different primal and tangent shapes"):
api.jvp(fun, (jnp.array([1.,2.,3.]),), (jnp.array([1.,2.,3.,4.]),))
with self.assertRaisesRegex(
ValueError, "jvp called with different primal and tangent shapes"):
api.jvp(fun, (jnp.float32(10.),), (jnp.array([1.,2.,3.], dtype=jnp.float32),))
with self.assertRaisesRegex(
ValueError, "jvp called with different primal and tangent shapes"):
api.jvp(fun, (jnp.array([1.,2.,3.], dtype=jnp.float32),), (jnp.float32(20.),))
with self.assertRaisesRegex(
ValueError, "jvp called with different primal and tangent shapes"):
api.jvp(fun, (jnp.array([1.,2.,3.]),), (20.,))
def test_jvp_non_tuple_arguments(self):
def f(x, y): return x + y
self.assertRaisesRegex(
TypeError,
"primal and tangent arguments to jax.jvp must be tuples or lists; found float and tuple.",
lambda: api.jvp(f, 0., (1.,)))
self.assertRaisesRegex(
TypeError,
"primal and tangent arguments to jax.jvp must be tuples or lists; found tuple and ndarray.",
lambda: api.jvp(f, (0.,), np.array([1., 2.])))
def test_vjp_mismatched_arguments(self):
_, pullback = api.vjp(lambda x, y: x * y, np.float32(3), np.float32(4))
self.assertRaisesRegex(
TypeError,
"Tree structure of cotangent input.*does not match",
lambda: pullback((np.float32(7), np.float32(100))))
self.assertRaisesRegex(
TypeError,
"Type of cotangent input to vjp pullback.*is not the expected tangent type",
lambda: pullback((np.float16(42))))
def test_vjp_bad_cotangent_shape(self):
x = np.ones((2, 5), dtype=np.float32)
y = np.ones((5, 3), dtype=np.float32)
def f_jax(x, y):
return jnp.matmul(x, y)
res, pullback = jax.vjp(f_jax, x, y)
with self.assertRaisesRegex(
ValueError,
"Shape of cotangent input to vjp pullback function .* must be the same as the shape of corresponding primal input .*"):
pullback(np.ones((2, 4), dtype=np.float32))
def test_jvp_jit_cached(self):
def func(x):
def inner(y):
return y * x
# Must have two calls to the inner jit (the second one hits the cache)
res1 = api.jit(inner)(4.)
res2 = api.jit(inner)(5.)
return res1 + res2
self.assertAllClose((45., 9.), api.jvp(func, (5.,), (1.,)))
def test_linear_transpose_abstract(self):
x = types.SimpleNamespace(shape=(3,), dtype=np.dtype(np.float32))
y = jnp.arange(3, dtype=np.float32)
transpose_fun = api.linear_transpose(lambda x: 2 * x, x)
z, = transpose_fun(y)
self.assertArraysEqual(2 * y, z, check_dtypes=True)
def test_linear_transpose_integer(self):
f = lambda x: 2 * x
transpose = api.linear_transpose(f, 1)
actual, = transpose(3)
expected = 6
self.assertEqual(actual, expected)
def test_linear_transpose_error(self):
with self.assertRaisesRegex(
TypeError, "linear_transpose only supports"):
api.linear_transpose(lambda x: 2. * x, 1)
transpose_fun = api.linear_transpose(lambda x: [x, x], 1.0)
with self.assertRaisesRegex(TypeError, "cotangent tree does not match"):
transpose_fun(1.0)
transpose_fun = api.linear_transpose(lambda x: jnp.stack([x, x]), 1.0)
with self.assertRaisesRegex(TypeError, "cotangent type does not match"):
transpose_fun(1.0)
transpose_fun = api.linear_transpose(lambda x: 1j * x, 1.0)
with self.assertRaisesRegex(TypeError, "cotangent type does not match"):
transpose_fun(1.0)
transpose_fun = api.linear_transpose(lambda x: x, 1.0)
with self.assertRaisesRegex(TypeError, "cotangent type does not match"):
transpose_fun(1j)
def test_linear_transpose_complex(self):
f = lambda x: (1 + 2j) * x
transpose = api.linear_transpose(f, 1j)
actual, = transpose(3 + 4j)
expected = -5 + 10j
self.assertEqual(actual, expected)
def test_linear_transpose_zeros(self):
f = lambda x: x[0]
transpose = api.linear_transpose(f, [1., 2.])
actual, = transpose(3.)
expected = [3., 0.]
self.assertEqual(actual, expected)
def test_complex_grad_raises_error(self):
self.assertRaises(TypeError, lambda: grad(lambda x: jnp.sin(x))(1 + 2j))
def test_holomorphic_grad(self):
out = grad(lambda x: jnp.sin(x), holomorphic=True)(1 + 2j)
expected = 2.0327230070196656 - 3.0518977991518j
self.assertAllClose(out, expected, check_dtypes=False)
def test_nonholomorphic_grad(self):
zs = 0.5j * np.arange(5) + np.arange(5)
def f(z):
return jnp.sum(jnp.cos(jnp.abs(z)))
ans = grad(f)(zs)
expected = np.array([ 0. + 0.j,
-0.80430663 + 0.40215331j,
-0.70368982 + 0.35184491j,
0.1886467 - 0.09432335j,
0.86873727 - 0.43436864j])
self.assertAllClose(ans, expected, check_dtypes=False,
atol=jtu.default_gradient_tolerance,
rtol=jtu.default_gradient_tolerance)
def test_complex_output_jacrev_raises_error(self):
self.assertRaises(TypeError, lambda: jacrev(lambda x: jnp.sin(x))(1 + 2j))
def test_nonholomorphic_jacrev(self):
# code based on https://github.com/google/jax/issues/603
zs = 0.5j * np.arange(5) + np.arange(5)
def f(z):
return jnp.cos(jnp.linalg.norm(2 * z))
ans = jacrev(f)(zs)
expected = grad(f)(zs)
self.assertAllClose(ans, expected)
def test_heterogeneous_jacfwd(self):
# See https://github.com/google/jax/issues/7157
# See https://github.com/google/jax/issues/7780
x = np.array([2.0], dtype=np.float16)
y = np.array([3.0], dtype=np.float32)
a = (x, y)
def f(tup):
jtu._check_dtypes_match(tup, a)
x, y = tup
return x, y, x + y
actual = jacfwd(f)(a)
desired = ((np.array(1., dtype=np.float16), np.array(0., dtype=np.float16)),
(np.array(0., dtype=np.float32), np.array(1., dtype=np.float32)),
(np.array(1., dtype=np.float32), np.array(1., dtype=np.float32)))
jtu._check_dtypes_match(actual, desired)
jtu.check_eq(actual, desired)
def test_heterogeneous_jacrev(self):
# See https://github.com/google/jax/issues/7157
# See https://github.com/google/jax/issues/7780
x = np.array([2.0], dtype=np.float16)
y = np.array([3.0], dtype=np.float32)
a = (x, y)
def f(tup):
jtu._check_dtypes_match(tup, a)
x, y = tup
return x, y, x + y
actual = jacrev(f)(a)
desired = ((np.array(1., dtype=np.float16), np.array(0., dtype=np.float32)),
(np.array(0., dtype=np.float16), np.array(1., dtype=np.float32)),
(np.array(1., dtype=np.float16), np.array(1., dtype=np.float32)))
jtu._check_dtypes_match(actual, desired)
jtu.check_eq(actual, desired)
def test_heterogeneous_grad(self):
# See https://github.com/google/jax/issues/7157
x = np.array(1.0+1j)
y = np.array(2.0)
a = (x, y)
def f(tup):
jtu._check_dtypes_match(tup, a)
x, y = tup
return jnp.square(jnp.abs(x)) + y
actual = grad(f)(a)
desired = (np.array(2 - 2j), np.array(1.))
jtu._check_dtypes_match(actual, desired)
jtu.check_eq(actual, desired)
def test_complex_input_jacfwd_raises_error(self):
self.assertRaises(TypeError, lambda: jacfwd(lambda x: jnp.sin(x))(1 + 2j))
def test_legacy_devicearray_repr(self):
dx = device_put(3.)
str(dx.item()) # doesn't crash
def test_devicearray_repr(self):
x = device_put(jnp.zeros(3))
self.assertIsInstance(x, xla.DeviceArray)
repr(x)
x = device_put(jnp.ones(3) + 1j * jnp.ones(3))
self.assertIsInstance(x, xla.DeviceArray)
repr(x) # doesn't crash
def test_devicearray_delete(self):
x = device_put(1.)
x.delete()
self.assertRaisesRegex(RuntimeError, "DeviceArray has been deleted.",
lambda: repr(x))
def test_devicearray_block_until_ready(self):
x = device_put(1.)
y = x.block_until_ready()
self.assertTrue(y is x)
def test_devicearray_weakref_friendly(self):
x = device_put(1.)
y = weakref.ref(x)
self.assertEqual(y(), 1.)
del x
self.assertIsNone(y())
def test_namedtuple_transparency(self):
Point = collections.namedtuple("Point", ["x", "y"])
def f(pt):
return jnp.sqrt(pt.x ** 2 + pt.y ** 2)
pt = Point(1., 2.)
f(pt)
g = api.grad(f)(pt)
self.assertIsInstance(g, Point)
f_jit = api.jit(f)
self.assertAllClose(f(pt), f_jit(pt), check_dtypes=False)
def test_namedtuple_subclass_transparency(self):
# See https://github.com/google/jax/issues/806
Point = collections.namedtuple("Point", ["x", "y"])
class ZeroPoint(Point):
def is_zero(self):
return (self.x == 0) and (self.y == 0)
pt = ZeroPoint(0., 0.)
def f(pt):
return 0. if pt.is_zero() else jnp.sqrt(pt.x ** 2 + pt.y ** 2)
f(pt) # doesn't crash
_ = api.grad(f)(pt)
self.assertIsInstance(pt, ZeroPoint)
@parameterized.parameters(1, 2, 3)
def test_shape_dtype_struct(self, i):
s = api.ShapeDtypeStruct(shape=(i, 2, 3), dtype=jnp.float32)
self.assertEqual(s.shape, (i, 2, 3))
self.assertEqual(s.dtype, jnp.float32)
self.assertEqual(s.ndim, 3)
self.assertEqual(s.size, i * 2 * 3)
self.assertLen(s, i)
for f in (str, repr):
self.assertEqual(
f(s), "ShapeDtypeStruct(shape=({}, 2, 3), dtype=float32)".format(i))
def test_shape_dtype_struct_scalar(self):
s = api.ShapeDtypeStruct(shape=(), dtype=jnp.float32)
self.assertEmpty(s.shape)
self.assertEqual(s.size, 1)
self.assertEqual(s.ndim, 0)
with self.assertRaisesRegex(TypeError, "len[(][)] of unsized object"):
_ = len(s)
def test_shape_dtype_struct_hash(self):
s1 = api.ShapeDtypeStruct(shape=(2, 3), dtype=jnp.float32)
s2 = api.ShapeDtypeStruct(shape=(2, 3), dtype=jnp.float32)
s3 = api.ShapeDtypeStruct(shape=(2, 4), dtype=jnp.float32)
self.assertEqual(hash(s1), hash(s2))
self.assertNotEqual(hash(s1), hash(s3))
def test_eval_shape(self):
def fun(x, y):
return jnp.tanh(jnp.dot(x, y) + 3.)
x = jnp.ones((2, 3))
y = jnp.ones((3, 4))
out_shape = api.eval_shape(fun, x, y)
self.assertEqual(out_shape.shape, (2, 4))
def test_eval_shape_constants(self):
def fun():
x = jnp.ones((2, 3))
y = jnp.ones((3, 4))
return jnp.tanh(jnp.dot(x, y) + 3.)
out_shape = api.eval_shape(fun)
self.assertEqual(out_shape.shape, (2, 4))
def test_eval_shape_tuple_unpacking(self):
def fun(x, y):
a, b = x
return a + b + y
x = (jnp.ones(2), jnp.ones(2))
y = 3.
out_shape = api.eval_shape(fun, x, y)
self.assertEqual(out_shape.shape, (2,))
def test_eval_shape_tuple_itemgetting(self):
def fun(x, y):
return x[0] + x[1] + y
x = (jnp.ones(2), jnp.ones(2))
y = 3.
out_shape = api.eval_shape(fun, x, y)
self.assertEqual(out_shape.shape, (2,))
def test_eval_shape_output_dict(self):
def fun(x, y):
return {'hi': x[0] + x[1] + y}
x = (jnp.ones(2), jnp.ones(2))
y = 3.
out_shape = api.eval_shape(fun, x, y)
out_shape = tree_util.tree_map(np.shape, out_shape)
self.assertEqual(out_shape, {'hi': (2,)})
def test_eval_shape_shape_error(self):
def fun(x, y):
return jnp.tanh(jnp.dot(x, y) + 3.)
x = jnp.ones((3, 3))
y = jnp.ones((4, 4))
self.assertRaises(TypeError, lambda: api.eval_shape(fun, x, y))
def test_eval_shape_duck_typing(self):
def fun(A, b, x):
return jnp.dot(A, x) + b
class MyArgArray(object):
def __init__(self, shape, dtype):
self.shape = shape
self.dtype = np.dtype(dtype)
A = MyArgArray((3, 4), jnp.float32)
b = MyArgArray((5,), jnp.float32)
x = MyArgArray((4, 5), jnp.float32)
out_shape = api.eval_shape(fun, A, b, x)
self.assertEqual(out_shape.shape, (3, 5))
def test_eval_shape_duck_typing2(self):
class EasyDict(dict):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__dict__ = self
x = EasyDict(shape=(3,), dtype=np.dtype('float32'))
out_shape = api.eval_shape(lambda x: x, x)
self.assertEqual(out_shape.shape, (3,))
def test_eval_shape_names(self):
def fun(x, y):
return lax.psum(x, 'i') + y
class MyArgArray(object):
def __init__(self, shape, dtype, named_shape):
self.shape = shape
self.dtype = jnp.dtype(dtype)
self.named_shape = named_shape
x = MyArgArray((3, 2), jnp.float32, {'i': 10})
y = MyArgArray((3, 2), jnp.float32, {'j': 5})
with core.extend_axis_env('i', 10, None):
with core.extend_axis_env('j', 5, None):
out_shape = api.eval_shape(fun, x, y)
self.assertEqual(out_shape.named_shape, {'j': 5})
def test_issue_871(self):
T = jnp.array([[1., 2.], [3., 4.], [5., 6.]])
x = jnp.array([1, 2, 3])
msg = ("linearized function called on tangent values inconsistent with "
"the original primal values")
y, f_jvp = api.linearize(jnp.sum, x)
with self.assertRaisesRegex(ValueError, msg):
f_jvp(T)
y, f_jvp = api.linearize(api.jit(jnp.sum), x)
with self.assertRaisesRegex(ValueError, msg):
f_jvp(T)
def test_grad_of_int_errors(self):
# Errors without allow_int=True
dfn = grad(lambda x: x ** 2)
self.assertRaisesRegex(
TypeError,
(r"grad requires real- or complex-valued inputs \(input dtype that is a "
r"sub-dtype of np.inexact\), but got int.*."),
lambda: dfn(3))
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_jvp_of_int_identity(self):
primals = (1,)
tangents = (np.zeros(shape=(), dtype=float0),)
_, out = api.jvp(lambda x: x, primals, tangents)
self.assertEqual(out, np.zeros(shape=(), dtype=float0))
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_jvp_of_int_add(self):
primals = (2,)
tangents = (np.zeros(shape=(), dtype=float0),)
_, out_tangent = api.jvp(lambda x: x+1, primals, tangents)
self.assertEqual(out_tangent, np.zeros(shape=(), dtype=float0))
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_jit_jvp_of_int(self):
primals = (2,)
tangents = (np.zeros(shape=(), dtype=float0),)
_, out_tangent = api.jvp(jax.jit(lambda x: x+1), primals, tangents)
self.assertEqual(out_tangent, np.zeros(shape=(), dtype=float0))
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_vjp_of_int_index(self):
primal, fn_vjp = api.vjp(lambda x, i: x[i], np.ones(2)*2, 1)
tangent_x, tangent_i = fn_vjp(1.)
self.assertEqual(primal, 2.)
self.assertAllClose(tangent_x, jnp.array([0., 1.]))
self.assertEqual(tangent_i, np.zeros(shape=(), dtype=float0))
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_vjp_of_int_shapes(self):
out, fn_vjp = api.vjp(lambda x: lax.reshape(x, (2, 2)), np.ones((4, 1),
dtype=int))
tangent, = fn_vjp(out)
self.assertArraysEqual(tangent, np.zeros(shape=(4, 1), dtype=float0))
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_jit_vjp_of_int(self):
primal, fn_vjp = api.vjp(lambda x, y: x+y, 2, 1)
tangent_x, tangent_i = jax.jit(fn_vjp)(1)
self.assertEqual(primal, 3)
self.assertEqual(tangent_x, np.zeros(shape=(), dtype=float0))
self.assertEqual(tangent_i, np.zeros(shape=(), dtype=float0))
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_vjp_of_int_fulllike(self):
# Regression test for tangent and cotangent mismatch in convert_element_type
# transpose rule wrt a ConstVar
f = lax.full_like
out, vjp = api.vjp(f, np.zeros((2, 2)), 1)
self.assertAllClose(out, jnp.ones((2, 2)))
tangent_x, tangent_y = vjp(out)
self.assertAllClose(tangent_x, jnp.zeros((2, 2)))
self.assertEqual(tangent_y, np.zeros(shape=(), dtype=float0))
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_grad_of_int(self):
# Need real-valued output, but testing integer input.
out = api.grad(lambda x: x+0., allow_int=True)(1)
self.assertEqual(out, np.zeros(shape=(), dtype=float0))
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_grad_of_bool(self):
def cond(pred):
return lax.cond(pred, lambda _: 1., lambda _: 2., 1.)
value, grd = api.value_and_grad(cond, allow_int=True)(True)
self.assertEqual(value, 1.)
self.assertEqual(grd, np.zeros(shape=(), dtype=float0))
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_grad_of_int_index(self):
grad_x, grad_i = api.grad(lambda x, i: x[i], argnums=(0, 1),
allow_int=True)(np.ones(2), 1)
self.assertAllClose(grad_x, jnp.array([0., 1.]))
self.assertEqual(grad_i, np.zeros(shape=(), dtype=float0))
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_jit_grad_of_int(self):
grad_f = api.grad(lambda x, i: x[i], argnums=(0, 1), allow_int=True)
grad_x, grad_i = jax.jit(grad_f)(np.ones(2), 1)
self.assertAllClose(grad_x, jnp.array([0., 1.]))
self.assertEqual(grad_i, np.zeros(shape=(), dtype=float0))
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_float0_reshape(self):
# dtype-agnostic operations are supported
float0_array = jax.grad(lambda x: jnp.sum(x+0.),
allow_int=True)(np.ones((2, 4), dtype=int))
self.assertArraysEqual(float0_array.reshape((4, 2)),
np.zeros((4, 2), dtype=float0))
self.assertArraysEqual(float0_array.transpose(),
np.zeros((4, 2), dtype=float0))
def test_float0_error(self):
# float0 is incompatible with other dtypes
float0_array = jax.grad(lambda x: x+0., allow_int=True)(1)
error_text = "float0s do not support any operations by design"
with self.assertRaisesRegex(TypeError, error_text):
# dispatch via DeviceArray
_ = float0_array + jnp.zeros(())
with self.assertRaisesRegex(TypeError, error_text):
# dispatch via lax
_ = lax.add(float0_array, jnp.zeros(()))
def test_grad_complex_result_errors(self):
dfn = grad(lambda x: x ** 2 + 1j)
self.assertRaisesRegex(
TypeError,
(r"grad requires real-valued outputs \(output dtype that is a "
r"sub-dtype of np.floating\), but got complex.*"),
lambda: dfn(3.))
def test_holomorphic_grad_of_float_errors(self):
dfn = grad(lambda x: x ** 2, holomorphic=True)
self.assertRaisesRegex(
TypeError,
(r"grad with holomorphic=True requires inputs with complex dtype, "
r"but got float.*"),
lambda: dfn(3.))
def test_holomorphic_jacrev_of_float_errors(self):
dfn = jacrev(lambda x: x ** 2, holomorphic=True)
self.assertRaisesRegex(
TypeError,
(r"jacrev with holomorphic=True requires inputs with complex dtype, "
r"but got float.*"),
lambda: dfn(3.))
def test_holomorphic_jacfwd_of_float_errors(self):
dfn = jacfwd(lambda x: x ** 2, holomorphic=True)
self.assertRaisesRegex(
TypeError,
(r"jacfwd with holomorphic=True requires inputs with complex dtype, "
r"but got float.*"),
lambda: dfn(3.))
def test_jacfwd_of_complex_errors(self):
dfn = jacfwd(lambda x: x ** 2)
self.assertRaisesRegex(
TypeError,
(r"jacfwd requires real-valued inputs \(input dtype that is a "
r"sub-dtype of np.floating\), but got complex.*"),
lambda: dfn(3. + 1j))
def test_xla_computation(self):
# these tests basically check the examples in the xla_computation docstring
def e(x):
return jnp.sin(jnp.cos(x))
c = api.xla_computation(e)(2.)
self.assertIn('cosine', c.as_hlo_text())
self.assertIn('sine', c.as_hlo_text())
def f(x):
return x - lax.psum(x, 'i')
axis_env = [('i', 4)]
c = api.xla_computation(f, axis_env=axis_env)(2)
self.assertIn('all-reduce', c.as_hlo_text())
self.assertIn('replica_groups={{0,1,2,3}}', c.as_hlo_text())
def g(x):
rowsum = lax.psum(x, 'i')
colsum = lax.psum(x, 'j')
allsum = lax.psum(x, ('i', 'j'))
return rowsum, colsum, allsum
axis_env = [('i', 4), ('j', 2)]
c = api.xla_computation(g, axis_env=axis_env)(5.)
self.assertIn('all-reduce', c.as_hlo_text())
self.assertIn('replica_groups={{0,2,4,6},{1,3,5,7}}', c.as_hlo_text())
self.assertIn('replica_groups={{0,1},{2,3},{4,5},{6,7}}', c.as_hlo_text())
self.assertIn('replica_groups={{0,1,2,3,4,5,6,7}}', c.as_hlo_text())
def h(x):
rowsum = lax.psum(x, 'i', axis_index_groups=[[0, 1], [2, 3]])
colsum = lax.psum(x, 'j')
return rowsum, colsum
axis_env = [('i', 4), ('j', 2)]
c = api.xla_computation(h, axis_env=axis_env)(5.)
self.assertIn('all-reduce', c.as_hlo_text())
self.assertIn('replica_groups={{0,2},{4,6},{1,3},{5,7}}', c.as_hlo_text())
self.assertIn('replica_groups={{0,1},{2,3},{4,5},{6,7}}', c.as_hlo_text())
def test_xla_computation_args(self):
def foo(x, y, z):
return x + y + z
c = api.xla_computation(foo)(1., 2., 3.)
self.assertEqual(len(c.program_shape().parameter_shapes()), 3)
c = api.xla_computation(foo, tuple_args=True)(1., 2., 3.)
param_shapes = c.program_shape().parameter_shapes()
self.assertEqual(len(param_shapes), 1)
self.assertEqual(param_shapes[0].xla_element_type(),
xla_client.PrimitiveType.TUPLE)
def test_xla_computation_duck_typing(self):
def foo(x, y, z):
return x + y + z
x = jax.ShapeDtypeStruct((), np.float32)
y = jax.ShapeDtypeStruct((), np.float32)
z = jax.ShapeDtypeStruct((), np.float32)
c = api.xla_computation(foo)(x, y, z)
self.assertEqual(len(c.program_shape().parameter_shapes()), 3)
c = api.xla_computation(foo, tuple_args=True)(1., 2., 3.)
param_shapes = c.program_shape().parameter_shapes()
self.assertEqual(len(param_shapes), 1)
self.assertEqual(param_shapes[0].xla_element_type(),
xla_client.PrimitiveType.TUPLE)
def test_staging_out_multi_replica(self):
def f(x):
return api.pmap(jnp.mean)(x)
xla_comp = api.xla_computation(f)
xla_comp(jnp.arange(8)).as_hlo_text() # doesn't crash
def test_xla_computation_instantiate_constant_outputs(self):
def f():
return jnp.zeros((3, 4))
xla_comp = api.xla_computation(f)()
out_shape, = xla_comp.program_shape().result_shape().tuple_shapes()
self.assertEqual(out_shape.dimensions(), (3, 4))
def test_xla_computation_static_argnums(self):
def f(x, y):
return x + y
xla_comp = api.xla_computation(f, static_argnums=(1,))(2, 3)
hlo_text = xla_comp.as_hlo_text()
self.assertIn("constant(3)", hlo_text)
self.assertIn("parameter.1", hlo_text)
self.assertNotIn("parameter.2", hlo_text)
def test_xla_computation_return_shape(self):
_, shape_tree = api.xla_computation(lambda x: (x + 1, jnp.zeros(2, jnp.float32)),
return_shape=True)(np.int32(1))
expected = (api.ShapeDtypeStruct(shape=(), dtype=jnp.int32),
api.ShapeDtypeStruct(shape=(2,), dtype=jnp.float32))
self.assertEqual(shape_tree, expected)
def test_xla_computation_partitioned(self):
def f(x, y):
return jnp.dot(x, y) + 1
x = jax.ShapeDtypeStruct((8, 8), np.float32)
y = jax.ShapeDtypeStruct((8, 16), np.float32)
xla_comp = api.xla_computation(f, in_parts=(P(2, 2), None),
out_parts=P(4, 1))(x, y)
hlo_text = xla_comp.as_hlo_text()
self.assertIn('sharding={devices=[2,2]0,1,2,3}', hlo_text)
self.assertIn('sharding={replicated}', hlo_text)
self.assertIn('sharding={{devices=[4,1]0,1,2,3}}', hlo_text)
def test_xla_computation_replicated_and_partitioned(self):
def f(x, y):
return jnp.dot(x, y), lax.psum(x, 'i')
x = jax.ShapeDtypeStruct((8, 8), np.float32)
y = jax.ShapeDtypeStruct((8, 16), np.float32)
axis_env = [('i', 4)]
xla_comp = api.xla_computation(f, axis_env=axis_env,
in_parts=(P(2, 2), None),
out_parts=(P(4, 1), None))(x, y)
hlo_text = xla_comp.as_hlo_text()
self.assertIn('all-reduce', hlo_text)
self.assertIn('replica_groups={{0,1,2,3}}', hlo_text)
self.assertIn('sharding={devices=[2,2]0,1,2,3}', hlo_text)
self.assertIn('sharding={replicated}', hlo_text)
self.assertIn('sharding={{devices=[4,1]0,1,2,3}, {replicated}}', hlo_text)
def test_xla_computation_psum_constant(self):
f = lambda: jax.lax.psum(1, "i")
api.xla_computation(f, axis_env=[("i", 2)])()
@jtu.skip_on_devices("cpu", "gpu")
@jtu.ignore_warning(message="Some donated buffers were not usable")
def test_xla_computation_donate_argnums(self):
api.xla_computation(lambda x: None, donate_argnums=(0,))(3) # doesn't crash
def test_xla_computation_lower_fun_axis_env(self):
axis_name = 'i'
def fn(x):
y = lax.all_gather(
x, axis_name=axis_name)
return y * lax.axis_index(axis_name).astype(jnp.float32)
input_x = jnp.ones((5,6,4))
axis_env = [(axis_name, api.local_device_count())]
_ = api.xla_computation(fn, axis_env=axis_env, backend='cpu')(input_x)
def test_xla_computation_axis_env(self):
def fn(x):
z = x * jax.lax.axis_index('i').astype(jnp.float32)
def inner_fn(carry, a):
return carry + a, ()
return jax.lax.scan(inner_fn, jnp.zeros_like(z[0]), z)
x = jnp.ones((5, 6, 4))
_ = jax.xla_computation(fn, axis_env=(('i', 8),), backend='cpu')(x)
def test_concurrent_device_get_and_put(self):
def f(x):
for _ in range(100):
y = jax.device_put(x)
x = jax.device_get(y)
return x
xs = [np.random.randn(i) for i in range(10)]
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = [executor.submit(partial(f, x)) for x in xs]
ys = [f.result() for f in futures]
for x, y in zip(xs, ys):
self.assertAllClose(x, y)
def test_dtype_warning(self):
if config.x64_enabled:
raise unittest.SkipTest("test only applies when x64 is disabled")
def check_warning(warn, nowarn):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
nowarn()
prev_len = len(w)
nowarn()
assert len(w) == prev_len
warn()
assert len(w) > 0
msg = str(w[-1].message)
expected_prefix = "Explicitly requested dtype "
self.assertEqual(expected_prefix, msg[:len(expected_prefix)])
prev_len = len(w)
nowarn()
assert len(w) == prev_len
check_warning(lambda: jnp.array([1, 2, 3], dtype="float64"),
lambda: jnp.array([1, 2, 3], dtype="float32"))
check_warning(lambda: jnp.array([1, 2, 3], dtype="float64"),
lambda: jnp.array([1, 2, 3], dtype=float))
check_warning(lambda: jnp.ones(3, dtype=np.float64),
lambda: jnp.ones(3))
check_warning(lambda: jnp.ones(3, dtype=np.float64),
lambda: jnp.ones(3, dtype=float))
check_warning(lambda: jnp.ones_like(3, dtype=np.int64),
lambda: jnp.ones_like(3, dtype=np.int32))
check_warning(lambda: jnp.zeros(3, dtype="int64"),
lambda: jnp.zeros(3, dtype="int32"))
check_warning(lambda: jnp.zeros_like(3, dtype="float64"),
lambda: jnp.zeros_like(3, dtype="float32"))
check_warning(lambda: jnp.full((2, 3), 1, dtype="int64"),
lambda: jnp.full((2, 3), 1))
check_warning(lambda: jnp.ones(3).astype("float64"),
lambda: jnp.ones(3).astype("float32"))
check_warning(lambda: jnp.eye(3, dtype=np.float64),
lambda: jnp.eye(3))
check_warning(lambda: jnp.arange(3, dtype=np.float64),
lambda: jnp.arange(3, dtype=np.float32))
check_warning(lambda: jnp.linspace(0, 3, dtype=np.float64),
lambda: jnp.linspace(0, 3, dtype=np.float32))
check_warning(lambda: jnp.tri(2, dtype="float64"),
lambda: jnp.tri(2, dtype="float32"))
check_warning(lambda: jnp.arange(1).astype("float64"),
lambda: jnp.arange(1).astype(float))
check_warning(lambda: jnp.arange(1.0).astype("int64"),
lambda: jnp.arange(1.0).astype(int))
def test_error_for_invalid_dtype(self):
with self.assertRaisesRegex(TypeError, ".*not a valid JAX array type.*"):
lax.add(jnp.array(7), np.array("hello"))
def test_vmap_preserves_docstr(self):
def superfun(a):
pass
self.assertRegex(api.vmap(superfun).__doc__, "\n".join([
"Vectorized version of superfun.*",
"",
"Original documentation:",
"",
superfun.__doc__,
]))
def test_vmap_in_axes_list(self):
dictionary = {'a': 5., 'b': jnp.ones(2)}
x = jnp.zeros(3)
y = jnp.arange(3.)
def f(dct, x, y):
return dct['a'] + dct['b'] + x + y
out1 = api.vmap(f, (None, 0, 0))(dictionary, x, y)
out2 = api.vmap(f, [None, 0, 0])(dictionary, x, y)
self.assertAllClose(out1, out2)
def test_vmap_in_axes_tree_prefix_error(self):
value_tree = jnp.ones(3)
self.assertRaisesRegex(
ValueError,
"vmap in_axes specification must be a tree prefix of the corresponding "
r"value, got specification \(0, 0\) for value tree "
+ re.escape(f"{tree_util.tree_structure((value_tree,))}."),
lambda: api.vmap(lambda x: x, in_axes=(0, 0))(value_tree)
)
def test_vmap_in_axes_leaf_types(self):
with self.assertRaisesRegex(
TypeError, r"vmap in_axes must be an int, None, or .*"):
api.vmap(lambda x: x, in_axes=(jnp.array([1., 2.]),))(jnp.array([1., 2.]))
def test_vmap_out_axes_leaf_types(self):
with self.assertRaisesRegex(
TypeError, r"vmap out_axes must be an int, None, or .*"):
api.vmap(lambda x: x, out_axes=(jnp.array([1., 2.]),))(jnp.array([1., 2.]))
def test_vmap_unbatched_object_passthrough_issue_183(self):
fun = lambda f, x: f(x)
vfun = api.vmap(fun, (None, 0))
ans = vfun(lambda x: x + 1, jnp.arange(3))
self.assertAllClose(ans, np.arange(1, 4), check_dtypes=False)
def test_vmap_mismatched_axis_sizes_error_message_issue_705(self):
def h(a, b):
return jnp.sum(a) + jnp.sum(b)
X = np.random.randn(10, 4)
U = np.random.randn(10, 2)
with self.assertRaisesRegex(
ValueError,
"vmap got inconsistent sizes for array axes to be mapped:\n"
r"arg 0 has shape \(10, 4\) and axis 0 is to be mapped" "\n"
r"arg 1 has shape \(10, 2\) and axis 1 is to be mapped" "\n"
"so\n"
"arg 0 has an axis to be mapped of size 10\n"
"arg 1 has an axis to be mapped of size 2"):
api.vmap(h, in_axes=(0, 1))(X, U)
with self.assertRaisesRegex(
ValueError,
"vmap got inconsistent sizes for array axes to be mapped:\n"
r"arg 0 has shape \(10, 4\) and axis 0 is to be mapped" "\n"
r"arg 1 has shape \(10, 2\) and axis 1 is to be mapped" "\n"
r"arg 2 has shape \(10, 4\) and axis 0 is to be mapped" "\n"
"so\n"
"args 0, 2 have axes to be mapped of size 10\n"
"arg 1 has an axis to be mapped of size 2"):
api.vmap(lambda x, y, z: None, in_axes=(0, 1, 0))(X, U, X)
with self.assertRaisesRegex(
ValueError,
"vmap got inconsistent sizes for array axes to be mapped:\n"
"the tree of axis sizes is:\n"
r"\(10, \[2, 2\]\)"):
api.vmap(h, in_axes=(0, 1))(X, [U, U])
error = (r"vmap was requested to map its argument along axis 0, which "
r"implies that its rank should be at least 1, but is only 0 "
r"\(its shape is \(\)\)")
with self.assertRaisesRegex(ValueError, error):
api.vmap(lambda x: x)(1.)
with self.assertRaisesRegex(
ValueError, "vmap must have at least one non-None value in in_axes"):
api.vmap(lambda x: x, in_axes=None)(jnp.array([1., 2.]))
error = (r"vmap was requested to map its argument along axis 1, which "
r"implies that its rank should be at least 2, but is only 1 "
r"\(its shape is \(2,\)\)")
with self.assertRaisesRegex(ValueError, error):
api.vmap(lambda x: x, in_axes=1)(jnp.array([1., 2.]))
with self.assertRaisesRegex(
ValueError,
"vmap out_axes specification must be a tree prefix of the "
"corresponding value.*"):
api.vmap(lambda x: x, in_axes=0, out_axes=(2, 3))(jnp.array([1., 2.]))
with self.assertRaisesRegex(
ValueError,
r"vmap has mapped output \(axis_name=foo\) but out_axes is None"):
api.vmap(lambda x: x, out_axes=None, axis_name="foo")(jnp.array([1., 2.]))
with self.assertRaisesRegex(
ValueError,
"vmap has mapped output but out_axes is None"):
api.vmap(lambda x: x, out_axes=None)(jnp.array([1., 2.]))
def test_vmap_structured_in_axes(self):
A, B, C, D = 2, 3, 4, 5
K = 6
x = np.ones((K, A, B))
y = np.ones((B, K, C))
z = np.ones((C, D, K))
def foo(tree_arg):
x, (y, z) = tree_arg
return jnp.dot(x, jnp.dot(y, z))
tree = (x, (y, z))
vfoo = api.vmap(foo, in_axes=((0, (1, 2)),))
self.assertEqual(vfoo(tree).shape, (6, 2, 5))
Point = collections.namedtuple("Point", ["x", "y"])
tree = (x, Point(y, z))
vfoo = api.vmap(foo, in_axes=((0, Point(1, 2)),))
self.assertEqual(vfoo(tree).shape, (6, 2, 5))
def foo(tree_arg):
x, dct = tree_arg
y, z = dct['a'], dct['b']
return jnp.dot(x, jnp.dot(y, z))
tree = (x, {'a': y, 'b': z})
vfoo = api.vmap(foo, in_axes=((0, {'a': 1, 'b': 2}),))
self.assertEqual(vfoo(tree).shape, (6, 2, 5))
tree = (x, collections.OrderedDict([('a', y), ('b', z)]))
vfoo = api.vmap(
foo, in_axes=((0, collections.OrderedDict([('a', 1), ('b', 2)])),))
self.assertEqual(vfoo(tree).shape, (6, 2, 5))
def test_vmap_in_axes_bool_error(self):
with self.assertRaisesRegex(TypeError, "must be an int"):
api.vmap(lambda x: x, in_axes=False)(jnp.zeros(3))
def test_pmap_in_axes_bool_error(self):
with self.assertRaisesRegex(TypeError, "must be an int"):
api.pmap(lambda x: x, in_axes=False)(jnp.zeros(1))
def test_pmap_global_cache(self):
def f(x, y):
return x, y
x = np.ones((1, 1, 1))
with jtu.assert_num_jit_and_pmap_compilations(1):
for _ in range(2):
api.pmap(f)(x, x)
with jtu.assert_num_jit_and_pmap_compilations(1):
for _ in range(2):
api.pmap(f, 'i')(x, x)
for x_in, y_in, x_out, y_out in it.product(*((0, 1, 2) for _ in range(4))):
with jtu.assert_num_jit_and_pmap_compilations(1):
for _ in range(2):
api.pmap(f, 'i', in_axes=(x_in, y_in), out_axes=(x_out, y_out))(x, x)
with jtu.assert_num_jit_and_pmap_compilations(1):
for _ in range(2):
api.jvp(api.pmap(f), (x, x), (x, x))
with jtu.assert_num_jit_and_pmap_compilations(2):
for _ in range(2):
api.vjp(api.pmap(f), x, x)[1]((x, x))
def test_device_array_repr(self):
rep = jnp.ones(()) + 1.
self.assertStartsWith(repr(rep), "DeviceArray")
def test_device_array_hash(self):
rep = jnp.ones(()) + 1.
self.assertIsInstance(rep, jax.interpreters.xla.DeviceArray)
self.assertNotIsInstance(rep, collections.abc.Hashable)
with self.assertRaisesRegex(TypeError, 'unhashable type'):
hash(rep)
def test_grad_without_enough_args_error_message(self):
def f(x, y): return x + y
df = api.grad(f, argnums=0)
self.assertRaisesRegex(
TypeError,
"differentiating with respect to argnums=0 requires at least 1 "
"positional arguments to be passed by the caller, but got only 0 "
"positional arguments.",
lambda: partial(df, x=0.)(y=1.))
def test_grad_of_jit_compilation_caching(self):
if not hasattr(self, "assertLogs"):
raise unittest.SkipTest("test requires assertLogs (python 3)")
lax.add(1, 2)
sin = api.jit(jnp.sin)
prev_level = logging.get_verbosity()
try:
logging.set_verbosity('DEBUG')
with self.assertLogs(level=logging.DEBUG) as l:
ans1 = api.grad(sin)(2.)
ans2 = api.grad(sin)(3.)
finally:
logging.set_verbosity(prev_level)
self.assertLen(l.output, 2)
self.assertAllClose(ans1, np.cos(2.), check_dtypes=False)
self.assertAllClose(ans2, np.cos(3.), check_dtypes=False)
def test_grad_of_jit_compilation_caching2(self):
@api.jit
def f(x):
return jnp.sin(x)
with jtu.count_jit_and_pmap_compiles() as count:
_ = jax.grad(f)(3.)
self.assertEqual(count[0], 2)
with jtu.count_jit_and_pmap_compiles() as count:
_ = jax.grad(f)(3.)
_ = jax.grad(f)(4.)
self.assertEqual(count[0], 0)
def test_grad_does_not_unflatten_tree_with_none(self):
class CustomNode(list):
pass
def unflatten(unused_aux_data, children):
self.assertIsNotNone(children[0])
return CustomNode(children)
tree_util.register_pytree_node(CustomNode, lambda x: (x, None), unflatten)
grad(lambda x: x[0])(CustomNode([0.]))
def test_trivial_computations(self):
x = jnp.array([1, 2, 3])
y = api.jit(lambda x: x)(x)
self.assertIs(x, y)
z1, z2 = api.jit(lambda x: (x, x))(x)
self.assertIs(z1, z2)
x1, x2 = jnp.array([1, 2]), jnp.array([2, 3])
z1, z2, z3 = api.jit(lambda x, y: (y, 1, x))(x1, x2)
self.assertIs(z1, x2)
self.assertIs(z3, x1)
self.assertEqual(z2, 1)
def test_nested_jit_hoisting(self):
@api.jit
def f(x, y):
z = 2 * x
return y + z, 3
@api.jit
def g(x):
return f(2, x)
jaxpr_subcomp = xla.jaxpr_subcomp
jaxprs = []
def jaxpr_subcomp_and_collect(c, jaxpr, *args, **kwargs):
jaxprs.append(jaxpr)
return jaxpr_subcomp(c, jaxpr, *args, **kwargs)
try:
xla.jaxpr_subcomp = jaxpr_subcomp_and_collect
ans = g(3)
finally:
xla.jaxpr_subcomp = jaxpr_subcomp
self.assertEqual(ans, (7, 3))
self.assertLen(jaxprs, 2)
outer_jaxpr, inner_jaxpr = jaxprs
self.assertLen(outer_jaxpr.eqns, 1)
self.assertEqual(outer_jaxpr.eqns[0].primitive.name, 'xla_call')
subjaxpr_1 = outer_jaxpr.eqns[0].params["call_jaxpr"]
self.assertEqual(str(subjaxpr_1), str(inner_jaxpr))
self.assertLen(inner_jaxpr.eqns, 2)
self.assertEqual(inner_jaxpr.eqns[-2].primitive.name, 'mul')
self.assertEqual(inner_jaxpr.eqns[-1].primitive.name, 'add')
def test_primitive_compilation_cache(self):
with jtu.count_primitive_compiles() as count:
lax.add(1, 2)
lax.add(2, 3)
self.assertEqual(count[0], 1)
def test_arange_jit(self):
def fun(x):
r = jnp.arange(x.shape[0])[x]
return r
jit(fun)(jnp.array([0, 1, 2], dtype=jnp.int32))
def helper_save_tracer(self, x):
self._saved_tracer = x
return x
def test_escaped_tracers_different_top_level_traces(self):
api.jit(self.helper_save_tracer)(0.)
with self.assertRaisesRegex(
UnexpectedTracerError, "Encountered an unexpected tracer"):
api.jit(lambda x: self._saved_tracer)(0.)
def test_escaped_tracers_cant_lift_sublevels(self):
api.jit(self.helper_save_tracer)(0.)
with self.assertRaisesRegex(
UnexpectedTracerError,
re.compile(
"Encountered an unexpected tracer",
re.DOTALL)):
api.jit(lambda x: x)(self._saved_tracer)
def test_escaped_tracers_tracer_from_higher_level(self):
api.grad(self.helper_save_tracer)(0.)
with self.assertRaisesRegex(
UnexpectedTracerError,
re.compile(
"Encountered an unexpected tracer.*Tracer from a higher level",
re.DOTALL)):
api.grad(lambda x: x)(self._saved_tracer)
def test_escaped_tracers_incompatible_sublevel(self):
def func1(x):
api.jit(self.helper_save_tracer)(0.)
# Use the tracer
return x + self._saved_tracer
with self.assertRaisesRegex(
UnexpectedTracerError,
re.compile("Encountered an unexpected tracer",
re.DOTALL)):
api.jit(func1)(2.)
def test_escaped_tracers_cant_lift(self):
def func1(x):
api.grad(self.helper_save_tracer)(0.)
return x + self._saved_tracer
with self.assertRaisesRegex(
UnexpectedTracerError,
re.compile("Encountered an unexpected tracer.*Can't lift",
re.DOTALL)):
api.grad(func1)(2.)
def test_escaped_tracers_not_among_input_tracers(self):
def func1(x):
api.grad(self.helper_save_tracer)(x)
return x + self._saved_tracer
with self.assertRaisesRegex(
UnexpectedTracerError,
re.compile(
"Encountered an unexpected tracer.*Tracer not among input tracers",
re.DOTALL)):
api.jit(func1)(2.)
def test_escaped_tracer_omnistaging(self):
count = 1
@jit
def f():
nonlocal count
count = jnp.add(count, 1)
f()
def f(x, c):
jnp.add(count, 1)
return None, None
@jit
def g():
lax.scan(f, None, None, length=2)
with self.assertRaisesRegex(UnexpectedTracerError,
"was created on line"):
g()
def test_escaped_tracer_omnistaging_top_trace(self):
count = 1
def f(_, __):
nonlocal count
count = jnp.add(count, 1)
return None, None
lax.scan(f, None, None, length=2)
with self.assertRaisesRegex(UnexpectedTracerError,
"was created on line"):
jax.jit(jnp.add)(jnp.ones(()), count)
def test_escaped_tracer_transform_name(self):
with self.assertRaisesRegex(UnexpectedTracerError,
"for jit"):
jax.jit(self.helper_save_tracer)(1)
_ = self._saved_tracer+1
with self.assertRaisesRegex(UnexpectedTracerError,
"for pmap"):
jax.pmap(self.helper_save_tracer)(jnp.ones((1, 2)))
_ = self._saved_tracer+1
with self.assertRaisesRegex(UnexpectedTracerError,
"for eval_shape"):
jax.eval_shape(self.helper_save_tracer, 1)
_ = self._saved_tracer+1
def test_escaped_tracer_shape_dtype(self):
with self.assertRaisesRegex(core.UnexpectedTracerError,
r"shape \(4, 3\) and dtype int32"):
jax.jit(self.helper_save_tracer)(jnp.ones((4, 3), dtype=jnp.int32))
_ = self._saved_tracer+1
def test_pmap_static_kwarg_error_message(self):
def f(a, b):
return a + b
g = jax.pmap(f, static_broadcasted_argnums=(1,))
msg = (r"pmapped function has static_broadcasted_argnums=\(1,\) but was "
r"called with only 1 positional argument. All static broadcasted "
r"arguments must be passed positionally.")
with self.assertRaisesRegex(ValueError, msg):
g(jnp.ones((1, 1)), b=1)
def test_vmap_unmapped_last(self):
@partial(jax.vmap, out_axes=-1)
def f(x):
return np.zeros((2,))
f(np.zeros((5,)))
@unittest.skipIf(True, "broken by convert_element_type change.")
def test_xla_constant_dedup(self):
y = np.array([7, 14], dtype=np.float32)
def f(x):
return x + y + y
x = np.array([1, 2], dtype=np.float32)
hlo_lines = jax.xla_computation(f)(x).as_hlo_text().split('\n')
hlo_lines = set([s.strip() for s in hlo_lines])
self.assertIn('constant.1 = f32[2]{0} constant({7, 14})', hlo_lines)
self.assertNotIn('constant.2 = f32[2]{0} constant({7, 14})', hlo_lines)
def test_eval_context(self):
@jit
def f():
with core.eval_context():
assert jnp.add(1, 1) == 2
f()
def test_concrete_error_because_arg_unary(self):
@jax.jit
def f(x):
if x > 0:
return x
else:
return 0
msg = r"on the value of the argument 'x'"
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f(1)
def test_concrete_error_because_arg_binary(self):
@jax.jit
def f(x, y):
if x > y:
return x
else:
return y
msg = r"on the values of the arguments 'x' and 'y'"
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f(1, 2)
def test_concrete_error_because_arg_ternary(self):
@jax.jit
def f(x, y, z):
if x > z:
return x
else:
return y
msg = r"on the values of the arguments 'x' and 'z'"
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f(1, 2, 3)
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f(1, 2, z=3)
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f(1, y=2, z=3)
def test_concrete_error_because_arg_varargs(self):
@jax.jit
def f(*args):
x, y, z = args
if x > z:
return x
else:
return y
msg = r"on the values of the argument 'args'"
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f(1, 2, 3)
def test_concrete_error_because_arg_kwargs(self):
@jax.jit
def f(**kwargs):
x, y, z = kwargs['x'], kwargs['y'], kwargs['z']
if x > z:
return x
else:
return y
msg = r"on the values of the argument 'kwargs'"
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f(x=1, y=2, z=3)
def test_concrete_error_because_arg_pytree(self):
@jax.jit
def f(xy, z):
x, y = xy
if x > 0:
return x
else:
return y
msg = r"on the value of the argument 'xy'"
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f((1, 2), z=3)
def test_concrete_error_because_const(self):
@jax.jit
def f():
assert jnp.add(1, 1) > 0
msg = "on these lines"
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f()
def test_xla_computation_zeros_doesnt_device_put(self):
with jtu.count_device_put() as count:
api.xla_computation(lambda: jnp.zeros(3))()
self.assertEqual(count[0], 0)
def test_join_concrete_arrays_with_omnistaging(self):
# https://github.com/google/jax/issues/4622
x = jnp.array([1., 2., 3.])
y = jnp.array([1., 2., 4.])
@jit
def f():
core.lattice_join(core.ConcreteArray(x), core.ConcreteArray(y))
f() # doesn't crash
def test_linearize_aval_error(self):
f = lambda x: x
_, f_jvp = api.linearize(f, 1.)
f_jvp(1.)
_, f_jvp = api.linearize(f, np.ones(2, np.int32))
f_jvp(np.zeros(2, float0))
_, f_jvp = api.linearize(f, 1.)
with self.assertRaisesRegex(ValueError, "tangent values inconsistent"):
f_jvp(1)
_, f_jvp = api.linearize(f, np.ones(2, np.int32))
with self.assertRaisesRegex(ValueError, "tangent values inconsistent"):
f_jvp(np.ones(2, np.int32))
def test_grad_of_token_consuming_primitive(self):
tokentest_p = core.Primitive("tokentest")
tokentest_p.def_impl(partial(xla.apply_primitive, tokentest_p))
tokentest_p.def_abstract_eval(lambda x, y: x)
xla.translations[tokentest_p] = lambda c, x, y: x
ad.defjvp(tokentest_p, (lambda g, x, token: x), None)
token = jax.lax.create_token(123)
arr = jnp.ones((3, 2))
res, vjp_fun = jax.vjp(lambda x: tokentest_p.bind(x, token), arr)
vjp_fun(arr)
def test_jit_returning_token(self):
x = jax.jit(jax.lax.create_token)(1.0)
self.assertIsInstance(x, jax.interpreters.xla.Token)
def test_leak_checker_catches_a_jit_leak(self):
with jax.checking_leaks():
lst = []
@jit
def f(x):
lst.append(x)
return x
with self.assertRaisesRegex(Exception, r"Leaked"):
f(3)
def test_leak_checker_catches_a_pmap_leak(self):
with jax.checking_leaks():
lst = []
@api.pmap
def f(x):
lst.append(x)
return x
with self.assertRaisesRegex(Exception, r"Leaked"):
f(np.ones(1))
def test_leak_checker_catches_a_grad_leak(self):
with jax.checking_leaks():
lst = []
def f(x):
lst.append(x)
return x
with self.assertRaisesRegex(Exception, r"Leaked trace"):
api.grad(f)(3.)
def test_leak_checker_avoids_false_positives(self):
with jax.checking_leaks():
@jit
def f(x):
return x
f(3)
api.vmap(f)(np.arange(3)) # doesn't crash
api.grad(f)(3.)
@api.pmap
def f(x):
return x
f(np.ones(1)) # doesn't crash
api.vmap(f)(np.ones((1, 1)))
def test_leak_checker_catches_a_scan_leak(self):
with jax.checking_leaks():
lst = []
to_scan = lambda c, x: (lst.append(c) or jnp.sin(c), None)
with self.assertRaisesRegex(Exception, r"Leaked trace"):
lax.scan(to_scan, 1., np.arange(3.))
def test_leak_checker_avoids_false_positives_scan(self):
with jax.checking_leaks():
to_scan = lambda c, x: (jnp.sin(c), None)
lax.scan(to_scan, 1., np.arange(3.)) # doesn't crash
def test_leak_checker_avoids_false_positives_scan_jvp(self):
with jax.checking_leaks():
to_scan = lambda c, x: (c, None)
def f(x):
lax.scan(to_scan, x, None, length=1)
api.jvp(f, (3.,), (1.,))
def test_leak_checker_avoids_false_positives_scan_vmap(self):
with jax.checking_leaks():
to_scan = lambda c, _: (1., None)
@api.vmap
def f(x):
lax.scan(to_scan, x, None, length=1)
f(np.arange(5.)) # doesn't crash
def test_leak_checker_avoids_false_positives_scan_vmap_2(self):
with jax.checking_leaks():
to_scan = lambda c, _: (c, None)
@api.vmap
def f(x):
lax.scan(to_scan, x, None, length=1)
f(np.arange(5.))
def test_leak_checker_catches_a_sublevel_leak(self):
with jax.checking_leaks():
@jit
def f(x):
lst = []
@jit
def g(x):
lst.append(x)
return x
x = g(x)
return x
with self.assertRaisesRegex(Exception, r"Leaked sublevel"):
f(3)
def test_leak_checker_avoids_false_positive_custom_jvp(self):
# see https://github.com/google/jax/issues/5636
with jax.checking_leaks():
@api.custom_jvp
def t(y):
return y
def t_jvp(p, t):
pass
t.defjvp(t_jvp)
@jit
def s(y):
return t(y)
s(3) # doesn't crash
def test_default_backend(self):
first_local_device = api.local_devices()[0]
self.assertEqual(first_local_device.platform, api.default_backend())
def test_dunder_jax_array(self):
class AlexArray:
def __init__(self, jax_val):
self.jax_val = jax_val
def __jax_array__(self):
return self.jax_val
dtype = property(lambda self: self.jax_val.dtype)
shape = property(lambda self: self.jax_val.shape)
x = AlexArray(jnp.array([1., 2., 3.]))
y = jnp.sin(x)
self.assertAllClose(y, jnp.sin(jnp.array([1., 2., 3.])))
y = api.grad(api.jit(lambda x: jnp.sin(x).sum()))(x)
self.assertAllClose(y, jnp.cos(jnp.array([1., 2., 3.])))
x = AlexArray(jnp.array([[1., 2., 3.]]))
y = api.pmap(jnp.sin)(x)
self.assertAllClose(y, jnp.sin(jnp.array([[1., 2., 3.]])))
x = jnp.array(1)
a = AlexArray(x)
for f in [jnp.isscalar, jnp.size, jnp.shape, jnp.dtype]:
self.assertEqual(f(x), f(a))
def test_constant_handler_mro(self):
class Foo(enum.IntEnum):
bar = 1
@api.pmap
def f(_):
return Foo.bar
ans = f(jnp.arange(1))
expected = jnp.arange(1) + 1
self.assertAllClose(ans, expected)
def test_large_python_ints(self):
with self.assertRaises(OverflowError):
jnp.multiply(2 ** 100, 3.)
out = lax.convert_element_type(2 ** 100, jnp.float32) # doesn't crash
self.assertArraysEqual(out, np.float32(2 ** 100))
def test_dot_precision_context_manager(self):
x = jnp.zeros((2, 2))
with jax.default_matmul_precision(None):
jnp.dot(x, x)
jaxpr = jax.make_jaxpr(jnp.dot)(x, x)
self.assertIn('precision=None', str(jaxpr))
with jax.default_matmul_precision("bfloat16"):
x @ x # doesn't crash
jaxpr = jax.make_jaxpr(op.matmul)(x, x)
self.assertIn('Precision.DEFAULT', str(jaxpr))
with jax.default_matmul_precision("tensorfloat32"):
jnp.dot(x, x)
jaxpr = jax.make_jaxpr(jnp.dot)(x, x)
self.assertIn('Precision.HIGH', str(jaxpr))
with jax.default_matmul_precision("float32"):
jnp.dot(x, x) # doesn't crash
jaxpr = jax.make_jaxpr(jnp.dot)(x, x)
self.assertIn('Precision.HIGHEST', str(jaxpr))
dot = partial(jnp.dot, precision=lax.Precision.HIGHEST)
with jax.default_matmul_precision("tensorfloat32"):
dot(x, x)
jaxpr = jax.make_jaxpr(dot)(x, x)
self.assertIn('Precision.HIGHEST', str(jaxpr))
def test_dot_precision_flag(self):
x = jnp.zeros((2, 2))
prev_val = config._read("jax_default_matmul_precision")
try:
config.FLAGS.jax_default_matmul_precision = "tensorfloat32"
jnp.dot(x, x) # doesn't crash
jaxpr = jax.make_jaxpr(jnp.dot)(x, x)
finally:
config.FLAGS.jax_default_matmul_precision = prev_val
self.assertIn('Precision.HIGH', str(jaxpr))
self.assertEqual(prev_val, config._read("jax_default_matmul_precision"))
prev_val = config._read("jax_default_matmul_precision")
try:
config.update('jax_default_matmul_precision','tensorfloat32')
jnp.dot(x, x)
jaxpr = jax.make_jaxpr(jnp.dot)(x, x)
finally:
config.update('jax_default_matmul_precision', prev_val)
self.assertIn('Precision.HIGH', str(jaxpr))
self.assertEqual(prev_val, config._read("jax_default_matmul_precision"))
def test_dot_precision_forces_retrace(self):
num_traces = 0
def g(x):
nonlocal num_traces
num_traces += 1
return jnp.dot(x, x)
def f_cond(x):
return lax.cond(True, g, g, x)
@jax.jit
def f_jit(x):
nonlocal num_traces
num_traces += 1
return jnp.dot(x, x)
for f in [f_jit, f_cond]:
precision = config.jax_default_matmul_precision
try:
num_traces = 0
x = jnp.zeros((2, 2))
f(x)
self.assertEqual(num_traces, 1)
f(x)
self.assertEqual(num_traces, 1)
with jax.default_matmul_precision("tensorfloat32"):
f(x)
self.assertEqual(num_traces, 2)
FLAGS.jax_default_matmul_precision = "float32"
f(x)
self.assertGreaterEqual(num_traces, 2)
nt = num_traces
f(x)
self.assertEqual(num_traces, nt + 1)
f(x)
self.assertEqual(num_traces, nt + 1)
finally:
FLAGS.jax_default_matmul_precision = precision
def test_rank_promotion_forces_retrace(self):
num_traces = 0
def g(x):
nonlocal num_traces
num_traces += 1
return x + x
def f_cond(x):
return lax.cond(True, g, g, x)
@jax.jit
def f_jit(x):
nonlocal num_traces
num_traces += 1
return x + x
for f in [f_jit, f_cond]:
allow_promotion = config.jax_numpy_rank_promotion
try:
num_traces = 0
@jax.jit
def f(x):
nonlocal num_traces
num_traces += 1
return x + x
x = jnp.zeros((2, 2))
f(x)
self.assertEqual(num_traces, 1)
f(x)
self.assertEqual(num_traces, 1)
with jax.numpy_rank_promotion("warn"):
f(x)
self.assertEqual(num_traces, 2)
FLAGS.jax_numpy_rank_promotion = "raise"
f(x)
self.assertGreaterEqual(num_traces, 2)
nt = num_traces
f(x)
self.assertEqual(num_traces, nt + 1)
f(x)
self.assertEqual(num_traces, nt + 1)
finally:
FLAGS.jax_numpy_rank_promotion = allow_promotion
def test_backward_pass_ref_dropping(self):
refs = []
@api.custom_vjp
def f(x):
return x
def f_fwd(x):
return x, None
def f_rev(_, g):
assert len(refs) != 2 or refs[0]() is None
zero = np.zeros(())
refs.append(weakref.ref(zero))
return (zero,)
f.defvjp(f_fwd, f_rev)
api.grad(lambda x: f(f(f(x))))(1.)
def test_custom_vjp_scan_batching_edge_case(self):
# https://github.com/google/jax/issues/5832
@jax.custom_vjp
def mul(x, coeff): return x * coeff
def mul_fwd(x, coeff): return mul(x, coeff), (x, coeff)
def mul_bwd(res, g):
x, coeff = res
g_x = g * coeff
g_coeff = (x * g).sum()
return g_x, g_coeff
mul.defvjp(mul_fwd, mul_bwd)
def scan_over_mul(x, coeff):
def f_(x, t):
return mul(x, coeff), None
y, _ = jax.lax.scan(f_, x, jnp.arange(3))
return y
key = jax.random.PRNGKey(0)
key1, key2 = jax.random.split(key, 2)
x_batch = jax.random.normal(key1, (3, 2))
covector_batch = jax.random.normal(key2, (3, 2))
coeff = jnp.array(1.)
batched_scan_over_mul = jax.vmap(scan_over_mul, in_axes=(0, None), out_axes=0)
res, vjp_fun = jax.vjp(batched_scan_over_mul, x_batch, coeff)
vjp_fun(covector_batch) # doesn't crash
jtu.check_grads(batched_scan_over_mul, (x_batch, coeff), order=2,
modes=['rev'])
def test_jit_inline(self):
@partial(api.jit, inline=False)
def f(x):
return x * 2
jaxpr = api.make_jaxpr(f)(3)
self.assertIn('xla_call', str(jaxpr))
@partial(api.jit, inline=True)
def f(x):
return x * 2
jaxpr = api.make_jaxpr(f)(3)
self.assertNotIn('xla_call', str(jaxpr))
def test_compute_with_large_transfer(self):
def f(x, delta):
return x + jnp.asarray(delta, x.dtype)
xs = np.random.uniform(0., 1., size=(10, 131, 111, 3)).astype(np.float32)
for x in xs:
delta = np.random.uniform(-0.5, 0.5, size=())
jitted_f = api.jit(f)
np.testing.assert_allclose(jitted_f(x, delta), f(x, delta))
def test_vjp_fun_jit(self):
f = lambda x: 2. * x
@partial(jit, static_argnums=0)
def linearize_vjp(f, x):
_, vjp_fun = api.vjp(f, x)
return vjp_fun
linearized = linearize_vjp(f, 1.)
actual = jit(lambda f, x: f(x))(linearized, 3.)
expected = (6.,)
self.assertEqual(actual, expected)
def test_linearize_fun_jit(self):
f = lambda x: 2. * x
@partial(jit, static_argnums=0)
def linearize(f, x):
_, jvp_fun = api.linearize(f, x)
return jvp_fun
linearized = linearize(f, 1.)
actual = jit(lambda f, x: f(x))(linearized, 3.)
expected = 6.
self.assertEqual(actual, expected)
def test_linear_transpose_fun_jit(self):
f = lambda x: 2. * x
@partial(jit, static_argnums=0)
def transpose(f, x):
return api.linear_transpose(f, x)
transposed = transpose(f, 1.)
actual = jit(lambda f, x: f(x))(transposed, 3.)
expected = (6.,)
self.assertEqual(actual, expected)
def test_leaked_tracer_issue_7613(self):
import numpy.random as npr
def sigmoid(x):
return 1. / (1. + jnp.exp(-x))
x = jnp.ones((50,))
A = jnp.array(npr.randn(50, 50))
@jax.jit
def loss(A, x):
h = jax.nn.sigmoid(A * x)
return jnp.sum((h - x)**2)
with jax.checking_leaks():
_ = jax.grad(loss)(A, x)
def test_vmap_caching(self):
# https://github.com/google/jax/issues/7621
f = lambda x: jnp.square(x).mean()
jf = jax.jit(f)
x = jax.random.uniform(jax.random.PRNGKey(0), shape=(8, 4))
with jtu.count_jit_and_pmap_compiles() as count: # noqa: F841
for _ in range(5):
jax.hessian(jf)(x).block_until_ready()
n = count[0]
# The exact number of compilations may vary depending on the number of
# jit decorators in the function above, but it should not grow after an
# initial warmup phase.
for _ in range(5):
jax.hessian(jf)(x).block_until_ready()
self.assertEqual(count[0], n)
def test_jnp_array_doesnt_device_put(self):
with jtu.count_device_put() as count:
api.make_jaxpr(lambda: jnp.array(3))()
self.assertEqual(count[0], 0)
class RematTest(jtu.JaxTestCase):
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', api.remat),
('_policy', partial(api.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_basic(self, remat):
@remat
def g(x):
return lax.sin(lax.sin(x)), 3.
def f(x):
x, _ = g(x)
return x
ans = f(2.)
expected = np.sin(np.sin(2.))
self.assertAllClose(ans, expected, check_dtypes=False)
ans, f_lin = api.linearize(f, 2.)
expected = np.sin(np.sin(2.))
self.assertAllClose(ans, expected, check_dtypes=False)
ans = f_lin(3.)
expected = np.cos(np.sin(2.)) * np.cos(2.) * 3.
self.assertAllClose(ans, expected, check_dtypes=False)
sin_calls = []
cos_calls = []
sin_impl = lax.sin_p.impl
cos_impl = lax.cos_p.impl
try:
lax.sin_p.def_impl(lambda x: sin_calls.append(1) or sin_impl(x))
lax.cos_p.def_impl(lambda x: cos_calls.append(1) or cos_impl(x))
f_lin(3.)
finally:
lax.sin_p.def_impl(sin_impl)
lax.cos_p.def_impl(cos_impl)
self.assertEqual(len(sin_calls), 1)
self.assertEqual(len(cos_calls), 2)
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', api.remat),
('_policy', partial(api.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_freevars(self, remat):
def f1(x):
y = 2 * jnp.sin(x)
z = jnp.cos(x) * jnp.sin(y)
return z
def f2(x):
y = 2 * jnp.sin(x)
z = remat(lambda x: jnp.cos(x) * jnp.sin(y))(x)
return z
ans, f_lin = api.linearize(f2, 2.)
expected, f_lin_expected = api.linearize(f1, 2.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = f_lin(3.)
expected = f_lin_expected(3.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_remat_grad_python_control_flow(self):
@partial(api.remat, concrete=True)
def g(x):
if x > 0:
return lax.sin(x), 3.
else:
return lax.cos(x), 4.
def f(x):
x, _ = g(x)
return x
ans = f(2.)
expected = np.sin(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(f)(2.)
expected = np.cos(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', api.remat),
('_policy', partial(api.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_jit(self, remat):
@remat
def g(x):
return lax.sin(lax.sin(x))
def f_(x):
return g(x)
f = api.jit(f_)
ans = f(2.)
expected = np.sin(np.sin(2.))
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(f)(2.)
expected = np.cos(np.sin(2.)) * np.cos(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jit(api.grad(f_))(2.)
expected = np.cos(np.sin(2.)) * np.cos(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', api.remat),
('_policy', partial(api.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_vmap(self, remat):
@remat
def g(x):
return lax.sin(lax.sin(x))
x = np.arange(3.)
ans = api.vmap(g)(x)
expected = np.sin(np.sin(x))
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jacfwd(g)(x)
expected = np.diag(np.cos(np.sin(x)) * np.cos(x))
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jacrev(g)(x)
expected = np.diag(np.cos(np.sin(x)) * np.cos(x))
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', api.remat),
('_policy', partial(api.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_higher_order_autodiff(self, remat):
def f(x):
return lax.cos(lax.sin(x))
g = remat(f)
ans = api.grad(api.grad(g))(3.)
expected = api.grad(api.grad(f))(3.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_remat_scan(self):
to_scan = lambda c, x: (jnp.sin(c), None)
def f_noremat(x):
y, _ = lax.scan(to_scan, x, np.arange(3.))
return y
def f_yesremat(x):
y, _ = lax.scan(api.remat(to_scan), x, np.arange(3.))
return y
ans = f_yesremat(4.)
expected = f_noremat(4.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(f_yesremat)(4.)
expected = api.grad(f_noremat)(4.)
self.assertAllClose(ans, expected, check_dtypes=False)
jaxpr = api.make_jaxpr(api.linearize(f_yesremat, 4.)[1])(1.)
scan_eqn, = jaxpr.jaxpr.eqns
self.assertIn(' cos ', str(scan_eqn.params['jaxpr']))
jaxpr = api.make_jaxpr(api.vjp(f_yesremat, 4.)[1])(1.)
scan_eqn, = jaxpr.jaxpr.eqns
self.assertIn(' cos ', str(scan_eqn.params['jaxpr']))
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', api.remat),
('_policy', partial(api.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_no_redundant_flops(self, remat):
# see https://github.com/google/jax/pull/1749#issuecomment-558267584
@api.jit
def g(x):
return f(2., x)
@remat
def f(x, y):
return jnp.sin(x) * y
# We swap out sin_p's impl rule to count how many times it's invoked
called = []
sin_impl = lax.sin_p.impl
try:
lax.sin_p.def_impl(lambda x: called.append(1) or sin_impl(x))
api.grad(g)(3.)
finally:
lax.sin_p.def_impl(sin_impl)
num_calls = len(called)
self.assertLessEqual(num_calls, 1)
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', api.remat),
('_policy', partial(api.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_binomial_checkpointing(self, remat):
def binom_checkpoint(funs):
if len(funs) == 1:
return funs[0]
else:
f1 = binom_checkpoint(funs[:len(funs)//2])
f2 = binom_checkpoint(funs[len(funs)//2:])
return remat(lambda x: f1(f2(x)))
f1 = binom_checkpoint([jnp.sin, jnp.sin, jnp.sin, jnp.sin])
f2 = lambda x: jnp.sin(jnp.sin(jnp.sin(jnp.sin(x))))
x = 4.
self.assertAllClose(f1(x), f2(x), check_dtypes=False)
self.assertAllClose(api.grad(f1)(x), api.grad(f2)(x), check_dtypes=False)
def test_remat_symbolic_zeros(self):
# code from https://github.com/google/jax/issues/1907
key = jax.random.PRNGKey(0)
key, split = jax.random.split(key)
n = 5
def func(D0):
def shift(R, dR, **unused_kwargs):
return R + dR
def apply_fn(R):
return D0 * R
Rinit = jax.random.uniform(split, (n,3), minval=0.0, maxval=5.0,
dtype=jnp.float32)
def move(R,i):
F = apply_fn(R)
return shift(R, 0.001 * F), jnp.array([0.])
move = api.remat(move)
R, temp = lax.scan(move, Rinit, jnp.arange(2))
return R[0, 0]
api.grad(func)(5.0) # doesn't crash
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', api.remat),
('_policy', partial(api.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_jit2(self, remat):
@api.jit
def f(x):
y = 2 * x
@remat
def g():
return y
return g()
self.assertAllClose(f(3), 6, check_dtypes=False)
def test_remat_nontrivial_env(self):
@api.remat
def foo(state, dt=0.5, c=1):
u, u_t = state
u_tt = c**2 * u
u_t = u_t + u_tt * dt
return (u, u_t)
@partial(api.jit, static_argnums=(1,))
def _multi_step(state, count, dt, c):
f = lambda s, _: (foo(s, dt, c), _)
return lax.scan(f, state, None, count)
def multi_step(state, count, dt=1/jnp.sqrt(2), c=1):
return _multi_step(state, count, dt, c)
def loss(u0, target, steps, dt=1/jnp.sqrt(2), c=1):
init = (u0, jnp.zeros_like(u0))
(uf, _), _ = multi_step(init, steps, dt, c)
return ((uf - target) ** 2).mean()
target = jnp.zeros((128, 128))
u0 = jnp.ones_like(target)
loss(u0, target, 10)
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', api.remat),
('_policy', partial(api.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_jit3(self, remat):
# https://github.com/google/jax/issues/2180
def f(w, x):
a = jnp.dot(x, w)
b = jnp.einsum("btd,bTd->btT", a, a)
c = jnp.einsum("btT,btd->btd", b, a)
return jnp.sum(c)
w = jnp.ones([1, 1])
x = jnp.ones([1, 1, 1])
f = remat(f)
api.grad(f)(w, x) # doesn't crash
@api.jit
def mul(a, b):
return a * b
def f(w, x):
a = mul(w, x)
b = mul(a, a)
return b
w = 1.
x = 1.
f = remat(f)
api.grad(f)(w, x)
def test_remat_scan2(self):
# https://github.com/google/jax/issues/1963
def scan_bug(x0):
f = lambda x, _: (x + 1, None)
def scanned_f(x, _):
return lax.scan(f, x, xs=None, length=1)[0], None
x, _ = jax.remat(scanned_f)(x0, None)
return x
jax.grad(scan_bug)(1.0) # doesn't crash
def test_remat_jit_static_argnum_omnistaging(self):
):
f_ = lu.wrap_init(lambda: (f(*args),))
out, = core.call_p.bind(f_)
return out
return named_f
def f(a_bool, y):
if a_bool:
return y + 1
else:
return y
api.jit(named_call(f), static_argnums=0)(True, 1) # no crash
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', api.remat),
('_policy', partial(api.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_remat_eval_counter(self, remat):
# https://github.com/google/jax/issues/2737
add_one_p = Primitive('add_one')
add_one = add_one_p.bind
num_evals = 0
@contextmanager
def assertEvals(n):
start = num_evals
yield
assert num_evals - start == n
def add_one_impl(x):
nonlocal num_evals
num_evals += 1
return x + 1
add_one_p.def_impl(add_one_impl)
def add_one_jvp(pin, tin):
pout = add_one(pin[0])
return pout, pout * tin[0]
ad.primitive_jvps[add_one_p] = add_one_jvp
add_one_p.def_abstract_eval(lambda x: x)
v = np.zeros((1,))
f = remat(add_one)
g = remat(lambda x: add_one(f(x)))
# 2 calls needed to evaluate g
with assertEvals(2):
_, vjp = jax.vjp(g, v)
# 2 calls made while transposing g, 1 call made while transposing f
with assertEvals(3):
vjp(v)
@jax._src.util.curry
def call(f, *args):
return jax.core.call(
jax.linear_util.wrap_init(lambda *args: [f(*args)]),
*args, name='foo')[0]
f = call(add_one)
g = remat(lambda x: add_one(f(x)))
# 2 calls needed to evaluate g
with assertEvals(2):
_, vjp = jax.vjp(g, v)
# 2 calls made while transposing g, no reevaluation for transposition of f
with assertEvals(2):
vjp(v)
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', api.remat),
('_policy', partial(api.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_escaped_tracer_remat(self, remat):
# b/169779185
def f():
seq = [jnp.zeros([])]
def g():
seq[0] += 1 # this is line 7 btw
return seq[0]
remat(g)()
remat(g)()
with self.assertRaisesRegex(UnexpectedTracerError, "global state"):
api.jit(f)()
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', api.remat),
('_policy', partial(api.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_no_cse_widget_on_primals(self, remat):
@remat
def g(x):
return lax.sin(lax.sin(x)), 3.
def f(x):
x, _ = g(x)
return x
c = api.xla_computation(f)(2.)
self.assertNotIn('while', c.as_hlo_text())
self.assertNotIn('conditional', c.as_hlo_text())
c = api.xla_computation(grad(f))(2.)
text = c.as_hlo_text()
self.assertTrue('while' in text or 'conditional' in text)
def test_no_cse_widget_with_prevent_cse_false(self):
@partial(api.remat, prevent_cse=False)
def g(x):
return lax.sin(lax.sin(x)), 3.
def f(x):
x, _ = g(x)
return x
c = api.xla_computation(f)(2.)
self.assertNotIn('while', c.as_hlo_text())
self.assertNotIn('conditional', c.as_hlo_text())
c = api.xla_computation(grad(f))(2.)
self.assertNotIn('while', c.as_hlo_text())
self.assertNotIn('conditional', c.as_hlo_text())
@parameterized.named_parameters(
{"testcase_name": f"_{policy_name}", "policy": policy,
"in_jaxpr2": in_jaxpr2, "not_in_jaxpr2": not_in_jaxpr2}
for policy_name, policy, in_jaxpr2, not_in_jaxpr2 in [
('save_anything', lambda *_, **__: True, [], [' sin ', ' cos ']),
('save_nothing', lambda *_, **__: False, [' sin ', ' cos '], []),
('save_sin', lambda p, *_, **__: str(p) == 'sin', [' cos '], [' sin ']),
])
def test_remat_custom_policy(self, policy, in_jaxpr2, not_in_jaxpr2):
for square in [lambda x: x * x, api.jit(lambda x: x * x)]:
f = api.remat(lambda x: jnp.sin(square(jnp.sin(x))),
policy=policy)
y, f_lin = api.linearize(f, 1.)
ydot = f_lin(2.)
jaxpr_text = str(f_lin.func.args[0])
for substr in in_jaxpr2:
self.assertIn(substr, jaxpr_text)
for substr in not_in_jaxpr2:
self.assertNotIn(substr, jaxpr_text)
y_expected, ydot_expected = api.jvp(lambda x: jnp.sin(square(jnp.sin(x))),
[1.], [2.])
self.assertAllClose(y, y_expected)
self.assertAllClose(ydot, ydot_expected)
jtu.check_grads(f, (3.,), order=2, modes=['fwd', 'rev'])
def test_remat_custom_policy_save_cos(self):
save_cos = lambda prim, *_, **__: str(prim) == 'cos'
f = api.remat(lambda x: jnp.sin(jnp.sin(x)), # different function
policy=save_cos)
_, f_lin = api.linearize(f, 1.)
jaxpr_text = str(f_lin.func.args[0])
self.assertNotIn(' sin ', jaxpr_text)
self.assertNotIn(' cos ', jaxpr_text)
jtu.check_grads(f, (3.,), order=2, modes=['fwd', 'rev'])
def test_remat_checkpoint_dots(self):
@partial(api.remat, policy=jax.checkpoint_policies.checkpoint_dots)
def f(x):
x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x)
x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x)
x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x)
return x
_, f_lin = api.linearize(f, jnp.ones((2, 2)))
jaxpr_text = str(f_lin.func.args[0])
self.assertEqual(jaxpr_text.count(' sin '), 2)
self.assertEqual(jaxpr_text.count(' dot_'), 6)
jtu.check_grads(f, (jnp.ones((2, 2)),), order=2, modes=['fwd', 'rev'])
def test_remat_checkpoint_dots_with_no_batch_dims(self):
@partial(api.remat, policy=jax.checkpoint_policies.checkpoint_dots_with_no_batch_dims)
def f(x):
x = jnp.einsum('ij,jk->ik', x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x)
x = jnp.einsum('ij,jk->ik', x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x)
x = jnp.einsum('ij,jk->ik', x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x)
return x
_, f_lin = api.linearize(f, jnp.ones((2, 2)))
jaxpr_text = str(f_lin.func.args[0])
self.assertEqual(jaxpr_text.count(' sin '), 2)
self.assertEqual(jaxpr_text.count(' dot_general'), 6)
jtu.check_grads(f, (jnp.ones((2, 2)),), order=2, modes=['fwd', 'rev'])
def test_remat_checkpoint_dots_with_no_batch_dims2(self):
@partial(api.remat, policy=jax.checkpoint_policies.checkpoint_dots_with_no_batch_dims)
def f(x):
x = jnp.einsum('nij,njk->nik', x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x)
x = jnp.einsum('nij,njk->nik', x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x)
x = jnp.einsum('nij,njk->nik', x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x)
return x
_, f_lin = api.linearize(f, jnp.ones((3, 2, 2)))
jaxpr_text = str(f_lin.func.args[0])
self.assertEqual(jaxpr_text.count(' sin '), 2)
self.assertEqual(jaxpr_text.count(' dot_general'), 9)
jtu.check_grads(f, (jnp.ones((3, 2, 2)),), order=2, modes=['fwd', 'rev'])
def test_remat_checkpoint_dots_jit(self):
@api.jit
@partial(api.remat, policy=jax.checkpoint_policies.checkpoint_dots)
def f(x):
x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x * 1e-3)
x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x * 1e-3)
x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x * 1e-3)
return x
_, f_lin = api.linearize(f, jnp.ones((2, 2)))
jaxpr_text = str(f_lin.func.args[0])
self.assertEqual(jaxpr_text.count(' sin '), 2)
self.assertEqual(jaxpr_text.count(' dot_'), 6)
jtu.check_grads(f, (jnp.ones((2, 2)),), order=2, modes=['fwd', 'rev'])
def test_remat_checkpoint_dots_inside_scan(self):
x = jnp.ones((5,))
def f(W):
@partial(api.remat, policy=jax.checkpoint_policies.checkpoint_dots)
def f(x):
x = jnp.sin(jnp.dot(x, W, precision=lax.Precision.HIGHEST))
x = jnp.sin(jnp.dot(x, W, precision=lax.Precision.HIGHEST))
x = jnp.sin(jnp.dot(x, W, precision=lax.Precision.HIGHEST))
return x
def body(x, _): return f(x), None
return lax.scan(body, x, None, length=2)[0]
_, f_vjp = api.vjp(f, jnp.ones((5, 5)))
jaxpr_text = str(f_vjp.args[0].func.args[1])
# Two sine calls in the backward pass because while we don't save sines
self.assertEqual(jaxpr_text.count(' sin '), 2)
self.assertEqual(jaxpr_text.count(' cos '), 3)
self.assertEqual(jaxpr_text.count(' dot_'), 6)
jtu.check_grads(api.jit(f), (jnp.ones((5, 5)),), order=2,
modes=['fwd', 'rev'])
def test_remat_custom_jvp_policy(self):
@api.custom_jvp
def sin(x):
return jnp.sin(x)
def sin_jvp(primals, tangents):
x, = primals
g, = tangents
return sin(x), jnp.cos(x) * g
sin.defjvp(sin_jvp)
@partial(api.remat, policy=jax.checkpoint_policies.checkpoint_dots)
def f(x):
x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)
x = sin(x * 1e-3)
x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)
x = sin(x * 1e-3)
x = jnp.dot(x, x, precision=lax.Precision.HIGHEST)
x = sin(x * 1e-3)
return x
jtu.check_grads(f, (3.,), order=2, modes=['fwd', 'rev'])
def g(x):
return lax.scan(lambda x, _: (f(x), None), x, None, length=2)[0]
jtu.check_grads(g, (3.,), order=2, modes=['fwd', 'rev'])
def test_remat_custom_vjp_policy(self):
@api.custom_vjp
def sin(x):
return jnp.sin(x)
def sin_fwd(x):
return sin(x), x
def sin_bwd(x, y_bar):
return (jnp.cos(x) * y_bar,)
sin.defvjp(sin_fwd, sin_bwd)
@partial(api.remat, policy=jax.checkpoint_policies.checkpoint_dots)
def f(x):
@partial(api.named_call, name="dot")
def dot2(y, z):
return jnp.dot(x, jnp.dot(y, z, precision=lax.Precision.HIGHEST),
precision=lax.Precision.HIGHEST)
x = dot2(x, x)
x = sin(x * 1e-3)
x = dot2(x, x)
x = sin(x * 1e-3)
x = dot2(x, x)
x = sin(x * 1e-3)
return x
jtu.check_grads(f, (3.,), order=2, modes=['rev'])
def g(x):
return lax.scan(lambda x, _: (f(x), None), x, None, length=2)[0]
jtu.check_grads(g, (3.,), order=2, modes=['rev'])
def test_remat_dropvar_policy(self):
def f(x):
return x, x
@partial(api.remat, policy=jax.checkpoint_policies.checkpoint_dots)
def g(x):
x = api.grad(lambda x: f(x)[0])(x)
return x
api.grad(g)(3.)
def test_remat_custom_jvp_linear_policy(self):
@api.custom_jvp
def sum(x):
return jnp.sum(x, axis=0)
@sum.defjvp
def sum_jvp(primals, tangents):
(x,), (xdot,) = primals, tangents
return sum(x), sum(xdot)
@partial(api.remat, policy=jax.checkpoint_policies.checkpoint_dots)
def f(x):
return sum(x)
jtu.check_grads(f, (jnp.ones(3),), order=2, modes=['fwd', 'rev'])
def g(x):
return lax.scan(lambda _, x: (None, f(x)), None, x)[1]
jtu.check_grads(g, (jnp.ones((2, 3)),), order=2, modes=['fwd', 'rev'])
def test_constants_not_hoisted(self):
@partial(new_checkpoint, policy=lambda *_, **__: False)
def f(x):
return jnp.einsum('ii->i', x)
res_avals = saved_residuals(f, jnp.ones((2, 2)))
self.assertLen(res_avals, 0)
@partial(new_checkpoint, policy=lambda *_, **__: False)
def f(x):
return jnp.zeros_like(x) * x
res_avals = saved_residuals(f, jnp.ones((2, 2)))
self.assertLen(res_avals, 0)
@partial(new_checkpoint, policy=lambda *_, **__: False)
def f(x):
return jnp.zeros_like(x) * jnp.sin(x)
res_avals = saved_residuals(f, jnp.ones((2, 2)))
self.assertLen(res_avals, 1)
def test_name_denylist(self):
def f(x):
y = checkpoint_name(jnp.multiply(2., 2.), 'y')
z = checkpoint_name(jnp.multiply(2., 2.), 'z')
w = checkpoint_name(jnp.multiply(2., 2.), 'w')
u = jnp.multiply(2., 2.)
return (((x * y) * z) * w) * u
policy = jax.checkpoint_policies.save_any_names_but_these('y', 'z', 'w')
res = saved_residuals(new_checkpoint(f, policy=policy), 1.)
self.assertLen(res, 0)
policy = jax.checkpoint_policies.save_any_names_but_these('z', 'w')
res = saved_residuals(new_checkpoint(f, policy=policy), 1.)
self.assertLen(res, 1) # can save only y
policy = jax.checkpoint_policies.save_any_names_but_these('w')
res = saved_residuals(new_checkpoint(f, policy=policy), 1.)
self.assertLen(res, 2) # can save y and z
policy = jax.checkpoint_policies.save_any_names_but_these()
res = saved_residuals(new_checkpoint(f, policy=policy), 1.)
self.assertLen(res, 3) # can save y, z, and w
def test_name_allowlist(self):
def f(x):
y = checkpoint_name(jnp.multiply(2., 2.), 'y')
z = checkpoint_name(jnp.multiply(2., 2.), 'z')
w = checkpoint_name(jnp.multiply(2., 2.), 'w')
u = jnp.multiply(2., 2.)
return (((x * y) * z) * w) * u
policy = jax.checkpoint_policies.save_only_these_names('y', 'z', 'w')
res = saved_residuals(new_checkpoint(f, policy=policy), 1.)
self.assertLen(res, 3) # can save y, z, and w
policy = jax.checkpoint_policies.save_only_these_names('z', 'w')
res = saved_residuals(new_checkpoint(f, policy=policy), 1.)
self.assertLen(res, 2) # can save z and w
policy = jax.checkpoint_policies.save_only_these_names('w')
res = saved_residuals(new_checkpoint(f, policy=policy), 1.)
self.assertLen(res, 1) # can save w
policy = jax.checkpoint_policies.save_only_these_names()
res = saved_residuals(new_checkpoint(f, policy=policy), 1.)
self.assertLen(res, 0) # can't save anything!
def test_saved_residuals_utility(self):
def f(x, y):
x1, x2 = x
z = checkpoint_name(jnp.sin(3.), 'z')
return z * ((x1 * x2) * y) * np.array([3.])
res = saved_residuals(f, (2., 3.), y=4.)
self.assertLen(res, 6)
self.assertEqual(res[0][0].shape, (1,))
self.assertEqual(res[0][1], "from a constant")
self.assertEqual(res[1][0].shape, ())
self.assertEqual(res[1][1], "from the argument 'x'")
self.assertEqual(res[2][0].shape, ())
self.assertEqual(res[2][1], "from the argument 'x'")
self.assertEqual(res[3][0].shape, ())
self.assertEqual(res[3][1], "from the argument 'y'")
self.assertEqual(res[4][0].shape, ())
self.assertStartsWith(res[4][1], "named 'z'")
self.assertEqual(res[5][0].shape, ())
def test_saved_residuals_utility_literals(self):
res = saved_residuals(lambda x: x * 2., 3.)
self.assertLen(res, 1)
self.assertEqual(res[0][0].shape, ())
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', api.remat),
('_policy', partial(api.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_checkpoint_dropvars(self, remat):
@remat
def f(x):
_, x = api.jit(lambda: (x, x))()
return x
_ = api.grad(f)(3.)
def test_dce_keeps_eqns_with_used_outputs_but_no_used_inputs(self):
@new_checkpoint
def f(x):
c = jax.jit(lambda: 3.)()
return c * x
_ = jax.grad(f)(3.) # doesn't crash
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', api.remat),
('_policy', partial(api.remat, policy=lambda *_, **__: False)),
('_new', partial(new_checkpoint, policy=lambda *_, **__: False)),
])
def test_unit_dropvar_consistency_regression(self, remat):
@partial(remat, policy=lambda *_, **__: False)
def f(u, x):
x, _ = jax.jit(lambda x: (x, u))(x)
return x
_ = api.linearize(partial(f, core.unit), 3.)
class JaxprTest(jtu.JaxTestCase):
def test_scalar_literals(self):
jaxpr = api.make_jaxpr(lambda x: x + 2)(42)
self.assertLen(jaxpr.jaxpr.constvars, 0)
def test_abstract_inputs(self):
jaxpr = api.make_jaxpr(lambda x: x + 2.)(
types.SimpleNamespace(shape=(), dtype=np.dtype(np.float32)))
self.assertEqual(jaxpr.in_avals[0].shape, ())
self.assertEqual(jaxpr.in_avals[0].dtype, np.float32)
def test_const(self):
def fun(x):
return (x, 1., np.zeros(1, dtype=jnp.float32))
expected = "{ lambda a:f32[1]; b:f32[]. let in (b, 1.0, a) }"
jaxpr = api.make_jaxpr(fun)(jnp.float32(0.))
self.assertMultiLineStrippedEqual(expected, str(jaxpr))
def test_cond(self):
def f(x):
return lax.cond(x >= 0.,
x + 1.,
lambda xt: xt + x,
x + 2.,
lambda xf: xf - x)
expected = """{ lambda ; a:f32[]. let
b:bool[] = ge a 0.0
c:f32[] = add a 1.0
d:f32[] = add a 2.0
e:i32[] = convert_element_type[new_dtype=int32 weak_type=False] b
f:f32[] = cond[
branches=(
{ lambda ; g_:f32[] h:f32[] i:f32[] j:f32[]. let
k:f32[] = sub j h
in (k,) }
{ lambda ; l:f32[] m_:f32[] n:f32[] o:f32[]. let
p:f32[] = add n l
in (p,) }
)
linear=(False, False, False, False)
] e a a c d
in (f,) }"""
jaxpr = api.make_jaxpr(f)(jnp.float32(3.))
self.assertMultiLineStrippedEqual(expected, str(jaxpr))
def test_make_jaxpr_static_argnums(self):
def f(x, y):
return x + y
jaxpr = api.make_jaxpr(f, static_argnums=(1,))(2, 3)
self.assertIn('3', str(jaxpr))
def test_make_jaxpr_return_shape(self):
_, shape_tree = api.make_jaxpr(lambda x: (x + 1, jnp.zeros(2, jnp.float32)),
return_shape=True)(np.int32(1))
expected = (api.ShapeDtypeStruct(shape=(), dtype=jnp.int32),
api.ShapeDtypeStruct(shape=(2,), dtype=jnp.float32))
self.assertEqual(shape_tree, expected)
def test_make_jaxpr_axis_env(self):
def f(x):
return x - lax.psum(x, 'i')
jaxpr = api.make_jaxpr(f, axis_env=[('i', 4)])(2)
self.assertIn('psum', str(jaxpr))
def test_make_jaxpr_named(self):
def f(x):
return x - lax.psum(x, 'i')
x = api.ShapeDtypeStruct(
shape=(2, 3), dtype=jnp.dtype(jnp.float32), named_shape={'i': 10})
jaxpr = api.make_jaxpr(f, axis_env=[('i', 10)])(x)
named_shapes = [v.aval.named_shape for v in jaxpr.jaxpr.eqns[1].invars]
self.assertEqual(named_shapes, [{'i': 10}, {}])
@parameterized.parameters(True, False)
def test_vjp_reduce_axes_jaxpr(self, gy_batched):
def f(w, x):
return jnp.sin(jnp.dot(x, w))
w = api.ShapeDtypeStruct(
shape=(3, 4), dtype=jnp.float32, named_shape={})
x = api.ShapeDtypeStruct(
shape=(3,), dtype=jnp.float32, named_shape={'batch': 2})
gy = api.ShapeDtypeStruct(
shape=(4,), dtype=jnp.float32,
named_shape={'batch': 2} if gy_batched else {})
jaxpr, shapes = api.make_jaxpr(
lambda w, x, gy: api.vjp(f, w, x)[1](gy), axis_env=[('batch', 2)],
return_shape=True)(w, x, gy)
expected = (api.ShapeDtypeStruct(
shape=(3, 4), dtype=jnp.float32, named_shape={'batch': 2}), x)
self.assertEqual(shapes, expected)
self.assertNotIn('psum', str(jaxpr))
jaxpr, shapes = api.make_jaxpr(
lambda w, x, gy: api.vjp(f, w, x, reduce_axes=('batch',))[1](gy),
axis_env=[('batch', 2)],
return_shape=True)(w, x, gy)
expected = (w, x)
self.assertEqual(shapes, expected)
self.assertIn('psum', str(jaxpr))
class CustomJVPTest(jtu.JaxTestCase):
def test_basic(self):
@api.custom_jvp
def f(x):
return jnp.sin(x)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * jnp.cos(x) * g
f.defjvp(f_jvp)
x = 3.
self.assertAllClose(f(x), jnp.sin(x))
self.assertAllClose(api.jvp(f, (x,), (1.,)),
(jnp.sin(x), 2 * jnp.cos(x)))
self.assertAllClose(api.grad(f)(x), 2 * jnp.cos(x))
def test_invariance(self):
@api.custom_jvp
def f(x):
return jnp.cos(2 * x) / 2.
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return (f(x), 3 * g)
f.defjvp(f_jvp)
def f2(x):
y, _ = api.jvp(f, (x,), (x,))
return y
def f3(x):
y, _ = api.jvp(f2, (x,), (x,))
return y
x = 1.
self.assertAllClose(api.jvp(f, (x,), (x,)),
api.jvp(f2, (x,), (x,)),
check_dtypes=False)
self.assertAllClose(api.jvp(f, (x,), (x,)),
api.jvp(f3, (x,), (x,)),
check_dtypes=False)
def test_python_control_flow(self):
@api.custom_jvp
def f(x):
if x > 0:
return jnp.sin(x)
else:
return jnp.cos(x)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
if x > 0:
return f(x), 2 * g
else:
return f(x), 3 * g
f.defjvp(f_jvp)
x = 2.
self.assertAllClose(f(x), jnp.sin(x))
self.assertAllClose(f(-x), jnp.cos(-x))
self.assertAllClose(api.jvp(f, (x,), (1.,)),
(jnp.sin(x), 2.),
check_dtypes=False)
self.assertAllClose(api.jvp(f, (-x,), (1.,)),
(jnp.cos(-x), 3.),
check_dtypes=False)
self.assertAllClose(api.grad(f)(x), 2., check_dtypes=False)
self.assertAllClose(api.grad(f)(-x), 3., check_dtypes=False)
def test_vmap(self):
@api.custom_jvp
def f(x):
assert jnp.ndim(x) == 0
return jnp.sin(x)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
assert jnp.ndim(x) == jnp.ndim(g) == 0
return f(x), 2 * jnp.cos(x) * g
f.defjvp(f_jvp)
x = jnp.arange(3.)
xx = jnp.arange(6.).reshape(2, 3)
self.assertAllClose(api.vmap(f)(x), jnp.sin(x))
self.assertAllClose(api.vmap(api.vmap(f))(xx), jnp.sin(xx))
self.assertAllClose(api.vmap(lambda x: api.jvp(f, (x,), (x,)))(x),
(jnp.sin(x), 2 * jnp.cos(x) * x))
self.assertAllClose(api.vmap(api.vmap(lambda x: api.jvp(f, (x,), (x,))))(xx),
(jnp.sin(xx), 2 * jnp.cos(xx) * xx))
self.assertAllClose(api.jvp(api.vmap(f), (x,), (x,)),
(jnp.sin(x), 2 * jnp.cos(x) * x))
self.assertAllClose(api.jvp(api.vmap(api.vmap(f)), (xx,), (xx,)),
(jnp.sin(xx), 2 * jnp.cos(xx) * xx))
self.assertAllClose(api.vmap(lambda x: api.jvp(api.vmap(f), (x,), (x,)))(xx),
(jnp.sin(xx), 2 * jnp.cos(xx) * xx))
def test_jit(self):
@api.custom_jvp
def f(x):
return jnp.sin(x)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * jnp.cos(x) * g
f.defjvp(f_jvp)
x = 3.
self.assertAllClose(api.jit(f)(x), jnp.sin(x))
self.assertAllClose(api.jit(api.jit(f))(x), jnp.sin(x))
self.assertAllClose(api.jit(lambda x: api.jvp(f, (x,), (x,)))(x),
(jnp.sin(x), 2 * jnp.cos(x) * x),
check_dtypes=False)
self.assertAllClose(api.jvp(api.jit(f), (x,), (x,)),
(jnp.sin(x), 2 * jnp.cos(x) * x),
check_dtypes=False)
def test_pytrees(self):
@api.custom_jvp
def f(x):
return {'b': jnp.sin(x['a'])}
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), {'b': 2 * jnp.cos(x['a']) * g['a']}
f.defjvp(f_jvp)
x = {'a': 3.}
self.assertAllClose(f(x)['b'], jnp.sin(x['a']))
self.assertAllClose(api.jvp(f, (x,), (x,)),
({'b': jnp.sin(x['a'])},
{'b': 2 * jnp.cos(x['a']) * x['a']}),
check_dtypes=False)
def test_kwargs(self):
@api.custom_jvp
def my_fun(x, y, c=1.):
return c * (x + y)
def my_jvp(primals, tangents):
x, y, c = primals
t_x, t_y, t_c = tangents
return my_fun(x, y, c), t_c
my_fun.defjvp(my_jvp)
f = lambda x, y: jnp.square(my_fun(x, y, c=2.)).sum()
f(10., 5.)
api.jvp(f, (10., 5.), (1., 1.)) # doesn't crash
def test_initial_style(self):
@api.custom_jvp
def f(x):
return 3 * x
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * g
f.defjvp(f_jvp)
def foo(x):
out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
return out
ans = api.grad(foo)(3.)
expected = 2.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.jit(foo))(3.)
expected = 2.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jit(api.grad(foo))(3.)
expected = 2.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.grad(foo))(3.)
expected = 0.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.grad(api.jit(foo)))(3.)
expected = 0.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.jit(api.grad(foo)))(3.)
expected = 0.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jit(api.grad(api.grad(foo)))(3.)
expected = 0.
self.assertAllClose(ans, expected, check_dtypes=False)
def test_initial_style_vmap(self):
@api.custom_jvp
def f(x):
assert jnp.ndim(x) == 0
return 3 * x
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * g
f.defjvp(f_jvp)
def foo(x):
out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
return out
ans = api.vmap(foo)(jnp.ones(3))
expected = 3. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.vmap(api.jit(foo))(jnp.ones(3))
expected = 3. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jit(api.vmap(foo))(jnp.ones(3))
expected = 3. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(lambda x: api.vmap(foo)(x).sum())(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(lambda x: api.vmap(api.jit(foo))(x).sum())(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(lambda x: api.jit(api.vmap(foo))(x).sum())(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.jit(lambda x: api.vmap(foo)(x).sum()))(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jit(api.grad(lambda x: api.vmap(foo)(x).sum()))(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_initial_style_vmap_with_collective(self):
@api.custom_jvp
def f(x):
return lax.psum(x, 'foo')
@f.defjvp
def f_jvp(xs, ts):
x, = xs
t, = ts
return lax.psum(x, 'foo'), t
def g(x):
jaxpr = api.make_jaxpr(f)(x)
return core.eval_jaxpr(jaxpr.jaxpr, [], x)[0]
v = api.vmap(lambda _, x: g(x), axis_name='foo', in_axes=(0, None),
out_axes=None)(jnp.arange(4.), 2.)
self.assertAllClose(v, 8.)
def test_closed_over_tracers_error_message(self):
def f(x):
@api.custom_jvp
def g(y):
return x + y
def g_jvp(primals, tangents):
return g(x), 2 * primals[0]
g.defjvp(g_jvp)
return g(1.)
self.assertRaises(ad.CustomJVPException, lambda: api.jvp(f, (3.,), (1.,)))
self.assertRaises(ad.CustomJVPException, lambda: api.grad(f)(3.))
def test_nondiff_arg(self):
@partial(api.custom_jvp, nondiff_argnums=(0,))
def app(f, x):
return f(x)
def app_jvp(f, primals, tangents):
(x,), (t,) = primals, tangents
return app(f, x), 3 * t
app.defjvp(app_jvp)
ans = app(lambda x: 2 * x, 1)
expected = 2
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jvp(lambda x: app(lambda y: 2 * y, x), (1.,), (1.,))
expected = (2., 3.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_nondiff_arg_jit_tracer(self):
@partial(api.custom_jvp, nondiff_argnums=(0,))
def f(x, y):
return x * y
def f_jvp(x, primals, tangents):
(y,), (t_y,) = primals, tangents
return f(x, y), 5 * t_y
f.defjvp(f_jvp)
@jit
def g(x, y):
return f(x, y)
ans = api.jvp(lambda y: g(2., y), (3.,), (1.,))
expected = (6., 5.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_nondiff_arg_hiding_jvp_tracer(self):
def f(x):
@partial(api.custom_jvp, nondiff_argnums=(0,))
def g(h, x):
return h(x)
@g.defjvp
def g_jvp(h, primals, tangents):
x, = primals
t, = tangents
return g(h, x), 2. * t
h = lambda y: x + y
return g(h, x)
with self.assertRaisesRegex(ad.CustomJVPException, "Detected differentiation"):
api.jvp(f, (2.,), (1.,))
def test_vmap_axes(self):
raise unittest.SkipTest("TODO")
def test_pmap(self):
raise unittest.SkipTest("TODO")
def test_missing_jvp_rule_error_message(self):
@api.custom_jvp
def foo(x):
return x ** 2
self.assertRaisesRegex(
AttributeError,
r"No JVP defined for custom_jvp function foo using defjvp.",
lambda: foo(2))
self.assertRaisesRegex(
AttributeError,
r"No JVP defined for custom_jvp function foo using defjvp.",
lambda: api.jvp(foo, (2.,), (1.,)))
self.assertRaisesRegex(
AttributeError,
r"No JVP defined for custom_jvp function foo using defjvp.",
lambda: api.grad(foo)(2.))
def test_jvp_rule_inconsistent_pytree_structures_error_message(self):
@api.custom_jvp
def f(x):
return (x**2,)
@f.defjvp
def foo_jvp(primals, tangents):
x, = primals
t, = tangents
return f(x), [2 * x * t, x]
f(2.)
self.assertRaisesRegex(
TypeError,
re.escape(
"Custom JVP rule must produce primal and tangent outputs "
"with equal container (pytree) structures, but got "
"{} and {} respectively.".format(
tree_util.tree_structure((1,)),
tree_util.tree_structure([1, 2]))
),
lambda: api.jvp(f, (2.,), (1.,)))
def test_primal_tangent_aval_disagreement_error_message(self):
@api.custom_jvp
def f(x):
return x ** 2
@f.defjvp
def foo_jvp(primals, tangents):
x, = primals
t, = tangents
return f(x), jnp.reshape(t, (1,))
f(2.) # doesn't crash
self.assertRaisesRegex(
TypeError,
re.escape(
"Custom JVP rule must produce primal and tangent outputs "
"with equal shapes and dtypes, but got float32[] and float32[1] "
"respectively."),
lambda: api.jvp(f, (jnp.float32(2.),), (jnp.float32(1.),)))
def test_jvp_rule_doesnt_return_pair_error_message(self):
@api.custom_jvp
def f(x):
return x ** 2
@f.defjvp
def foo_jvp(primals, tangents):
x, = primals
t, = tangents
return t
f(2.)
self.assertRaisesRegex(
TypeError,
re.escape(
"Custom JVP rule must produce a pair (list or tuple of length two) "
"representing primal and tangent outputs, got 1.0"),
lambda: api.jvp(f, (2.,), (1.,)))
def test_multiple_rule_invocations(self):
@jax.custom_jvp
def expit(x):
return 1 / (1 + lax.exp(-x))
@expit.defjvp
def _expit_jvp(primals, tangents):
(x,), (t,) = primals, tangents
ans = expit(x)
t_out = t * ans * (1 - ans)
return ans, t_out
def scanned_fun(c, _):
return [expit(c[0])] + [c[i-1] + c[i] for i in range(1, len(c))], None
def foo(x):
c, _ = lax.scan(scanned_fun, [x, 0., 0., 0., 0.], None, length=10)
return c[-1]
# just make sure these don't crash
foo(3.)
grad(foo)(3.)
grad(lambda x: jax.vmap(foo)(x).sum())(jnp.arange(3.))
def test_hard_stuff(self):
arr = jnp.ones((5, 2, 2))
api.jit(jax.vmap(jnp.linalg.det))(arr)
def test_hard_stuff2(self):
@jax.custom_jvp
def f(x):
return lax.tie_in(x, np.zeros(x.shape, x.dtype))
@f.defjvp
def f_jvp(primals, tangents):
x, = primals
t, = tangents
return f(x), t
# don't crash
jax.jit(jax.vmap(f))(jnp.arange(3.))
jax.jit(jax.vmap(jax.grad(f)))(jnp.arange(3.))
jax.jit(jax.grad(lambda x: jax.vmap(f)(x).sum()))(jnp.arange(3.))
jax.grad(lambda x: jax.vmap(f)(x).sum())(jnp.arange(3.))
jax.jvp(jax.vmap(f), (jnp.arange(3.),), (jnp.ones(3),))
def test_hard_stuff3(self):
@jax.custom_jvp
def relu(x):
return jnp.maximum(x, 0)
@relu.defjvp
def _relu_jvp(primals, tangents):
x, = primals
t, = tangents
return relu(x), lax.select(x > 0, t, lax.full_like(t, 0))
def scanned_fun(c, _):
return [relu(c[0])] + [c[i-1] + c[i] for i in range(1, len(c))], None
def f(x):
c, _ = lax.scan(scanned_fun, [x, 0., 0., 0., 0.], None, length=10)
return c[-1]
jax.jit(jax.vmap(f))(jnp.arange(3.))
jax.jit(jax.vmap(jax.grad(f)))(jnp.arange(3.))
jax.jit(jax.grad(lambda x: jax.vmap(f)(x).sum()))(jnp.arange(3.))
jax.grad(lambda x: jax.vmap(f)(x).sum())(jnp.arange(3.))
jax.jvp(jax.jit(jax.vmap(f)), (jnp.arange(3.),), (jnp.ones(3),))
def test_eval_shape(self):
@jax.custom_jvp
def expit(x):
return 1 / (1 + lax.exp(-x))
@expit.defjvp
def _expit_jvp(primals, tangents):
(x,), (t,) = primals, tangents
ans = expit(x)
t_out = t * ans * (1 - ans)
return ans, t_out
# don't crash
api.eval_shape(expit, jnp.ones((2, 3)))
api.eval_shape(api.grad(lambda x: expit(x).sum()), jnp.ones((2, 3)))
def test_jaxpr_zeros(self):
@api.custom_jvp
def f(A, b):
return A @ b
def f_jvp(primals, tangents):
A, b = primals
dA, db = tangents
z = f(A, b)
dz = A @ db + dA @ b
return z, dz
f.defjvp(f_jvp)
def experiment(theta):
def step(q, _):
z = f(jnp.eye(3), jnp.ones(3) * theta)
q += z[0]
return q, q
q = 0.
q, _ = lax.scan(step, q, None, 4)
return q
grad(experiment)(1.)
def test_linear_in_scan(self):
@api.custom_jvp
def f(x):
return -x
@f.defjvp
def f_jvp(primals, tangents):
x, = primals
x_dot, = tangents
return f(x), f(x_dot)
def foo(x):
out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
return out
ans = api.grad(foo)(3.)
expected = -1.
self.assertAllClose(ans, expected, check_dtypes=False)
def test_custom_jvps_first_rule_is_none(self):
# https://github.com/google/jax/issues/3389
@api.custom_jvp
def f(x, y):
return x ** 2 * y
f.defjvps(None, lambda x_dot, primal_out, x, y: 2 * x * y * x_dot)
ans = grad(f, 1)(2., 3.) # doesn't crash
expected = 12.
self.assertAllClose(ans, expected, check_dtypes=False)
def test_concurrent_initial_style(self):
def unroll(param, sequence):
def scan_f(prev_state, inputs):
return prev_state, jax.nn.sigmoid(param * inputs)
return jnp.sum(jax.lax.scan(scan_f, None, sequence)[1])
def run():
return jax.grad(unroll)(jnp.array(1.0), jnp.array([1.0]))
expected = run()
n_workers = 2
with concurrent.futures.ThreadPoolExecutor(max_workers=n_workers) as e:
futures = []
for _ in range(n_workers):
futures.append(e.submit(run))
results = [f.result() for f in futures]
for ans in results:
self.assertAllClose(ans, expected)
def test_nondiff_argnums_vmap_tracer(self):
# https://github.com/google/jax/issues/3964
@partial(jax.custom_jvp, nondiff_argnums=(0, 2))
def sample(shape, param, seed):
return jax.random.uniform(key=seed, shape=shape, minval=param)
@sample.defjvp
def sample_jvp(shape, seed, primals, tangents):
param, = primals
dparam, = tangents
dparam = jnp.broadcast_to(dparam, shape)
samples = sample(shape, param, seed)
return samples, samples * dparam # dummy jvp for proof of concept
# check these don't crash
jax.vmap(lambda seed: sample((2,3), 1., seed))(
jax.random.split(jax.random.PRNGKey(1), 10))
jax.jvp(lambda x: sample((2, 3), x, jax.random.PRNGKey(1)),
(1.,), (1.,))
def test_fun_with_nested_calls_2(self):
def call(f, *args):
f = api.custom_jvp(f)
f.defjvp(lambda primals, tangents: (f(*primals), sum(tangents)))
return f(*args)
def fun_with_nested_calls_2(x):
def bar(y):
def baz(w):
q = call(lambda x: y, x)
q = q + call(lambda: y)
q = q + call(lambda y: w + y, y)
q = call(lambda w: call(jnp.sin, x) * y, 1.0) + q
return q
return api.jit(baz)(x)
return call(bar, x)
self.assertAllClose(api.jit(fun_with_nested_calls_2)(3.),
fun_with_nested_calls_2(3.))
api.vmap(fun_with_nested_calls_2)(jnp.arange(3.))
def test_closure_with_vmap(self):
# https://github.com/google/jax/issues/3822
alpha = np.float32(2.)
def sample(seed):
@api.custom_jvp
def f(alpha):
return jax.random.gamma(seed, alpha, shape=[])
@f.defjvp
def f_jvp(primal, tangent):
alpha = primal
dalpha = tangent
sample = f(alpha)
partial_alpha = lax.random_gamma_grad(alpha, sample)
return sample, partial_alpha * dalpha
return f(alpha)
api.vmap(sample)(jax.random.split(jax.random.PRNGKey(1), 3)) # don't crash
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_float0(self):
@api.custom_jvp
def f(x, y):
return x, y
def f_jvp(primals, _):
return primals, (2., 1)
f.defjvp(f_jvp)
primals = (2., 3)
tangents = (np.ones(()), np.zeros((), float0),)
expected_tangents = (2., np.zeros((), float0))
self.assertArraysEqual(api.jvp(f, primals, tangents),
(primals, expected_tangents))
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_float0_initial_style(self):
@api.custom_jvp
def f(x, y):
return x, y
def f_jvp(primals, _):
x, y = primals
return (x, y), (2., 1)
f.defjvp(f_jvp)
def foo(x, y):
out, _ = lax.scan(lambda c, _: (f(*c), None), (x, y), None, length=1)
return out
primals = (2., 3)
tangents = (np.ones(()), np.zeros((), float0),)
expected_tangents = (2., np.zeros((), float0))
self.assertArraysEqual(api.jvp(foo, primals, tangents),
(primals, expected_tangents))
def test_remat(self):
@api.custom_jvp
def f(x):
return jnp.sin(x)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * jnp.cos(x) * g
f.defjvp(f_jvp)
@api.remat
def g(x):
return f(f(x))
ans = g(2.)
expected = np.sin(np.sin(2.))
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(g)(2.)
expected = 4. * api.grad(lambda x: jnp.sin(jnp.sin(x)))(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_remat_higher_order(self):
@api.custom_jvp
def f(x):
return jnp.sin(x)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * jnp.cos(x) * g
f.defjvp(f_jvp)
def g(x):
return f(f(x))
ans = api.grad(api.grad(api.remat(g)))(2.)
expected = api.grad(api.grad(g))(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.remat(api.grad(g)))(2.)
expected = api.grad(api.grad(g))(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.grad(api.grad(api.remat(g))))(2.)
expected = api.grad(api.grad(api.grad(g)))(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_initial_style_vmap_2(self):
y = jnp.array([1., 2., 3.])
@api.custom_jvp
def f(x):
assert jnp.ndim(x) == 0
return 3 * x * jnp.sum(y)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * g
f.defjvp(f_jvp)
def foo(x):
out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
return out
ans = api.grad(lambda x: api.vmap(foo)(x).sum())(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(lambda x: api.vmap(api.jit(foo))(x).sum())(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(lambda x: api.jit(api.vmap(foo))(x).sum())(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.jit(lambda x: api.vmap(foo)(x).sum()))(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jit(api.grad(lambda x: api.vmap(foo)(x).sum()))(jnp.ones(3))
expected = 2. * jnp.ones(3)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_custom_jvp_vmap_broadcasting_interaction(self):
def f2(y, z):
v1 = z
v2 = jnp.sum(y) + z
return jnp.logaddexp(v1, v2)
def f1(y, z):
v = api.vmap(lambda _y: f2(_y, z))(y)
return jnp.sum(v)
y = jnp.ones((3, 2))
f = lambda z: f1(y, z)
z = 0.1
val, g = api.value_and_grad(f)(z)
self.assertEqual(val.shape, ())
self.assertEqual(g.shape, ())
def test_custom_jvp_vmap_broadcasting_interaction_2(self):
@api.custom_jvp
def transform(box, R):
if jnp.isscalar(box) or box.size == 1:
return R * box
elif box.ndim == 2:
return jnp.einsum('ij,j->i', box, R)
raise ValueError()
@transform.defjvp
def transform_jvp(primals, tangents):
box, R = primals
dbox, dR = tangents
return (transform(box, R), dR + transform(dbox, R))
def periodic_general(box):
def displacement_fn(Ra, Rb, **kwargs):
_box = kwargs.get('box', box)
return transform(_box, Ra - Rb)
return displacement_fn
N = 250
scalar_box = 1.0
displacement = periodic_general(scalar_box)
key = jax.random.PRNGKey(0)
R = jax.random.uniform(key, (N, 2))
def energy_fn(box):
d = partial(displacement, box=box)
d = api.vmap(api.vmap(d, (None, 0)), (0, None))
return jnp.sum(d(R, R) ** 2)
self.assertEqual(grad(energy_fn)(scalar_box).shape, ())
def test_custom_jvp_implicit_broadcasting(self):
if config.x64_enabled:
raise unittest.SkipTest("test only applies when x64 is disabled")
@jax.custom_jvp
def projection_unit_simplex(x: jnp.ndarray) -> jnp.ndarray:
s = 1.0
n_features = x.shape[0]
u = jnp.sort(x)[::-1]
cssv = jnp.cumsum(u) - s
ind = jnp.arange(n_features) + 1
cond = u - cssv / ind > 0
idx = jnp.count_nonzero(cond)
threshold = cssv[idx - 1] / idx.astype(x.dtype)
return jax.nn.relu(x - threshold)
@projection_unit_simplex.defjvp
def projection_unit_simplex_jvp(primals, tangents):
x, = primals
x_dot, = tangents
primal_out = projection_unit_simplex(x)
supp = primal_out > 0
card = jnp.count_nonzero(supp)
tangent_out = supp * x_dot - (jnp.dot(supp, x_dot) / card) * supp
return primal_out, tangent_out
rng = np.random.RandomState(0)
x = rng.rand(5).astype(np.float32)
J_rev = jax.jacrev(projection_unit_simplex)(x)
J_fwd = jax.jacfwd(projection_unit_simplex)(x)
p = projection_unit_simplex(x)
support = (p > 0).astype(jnp.int32)
cardinality = jnp.count_nonzero(support)
J_true = jnp.diag(support) - jnp.outer(support, support) / cardinality
self.assertAllClose(J_true, J_fwd)
self.assertAllClose(J_true, J_rev)
proj = jax.vmap(projection_unit_simplex)
def fun(X):
return jnp.sum(proj(X) ** 2)
rng = np.random.RandomState(0)
X = rng.rand(4, 5).astype(np.float32)
U = rng.rand(4, 5)
U /= np.sqrt(np.sum(U ** 2))
U = U.astype(np.float32)
eps = 1e-3
dir_deriv_num = (fun(X + eps * U) - fun(X - eps * U)) / (2 * eps)
dir_deriv = jnp.vdot(jax.grad(fun)(X), U)
self.assertAllClose(dir_deriv, dir_deriv_num, atol=1e-3)
def test_vmap_inside_defjvp(self):
seed = 47
key = jax.random.PRNGKey(seed)
mat = jax.random.normal(key, (2, 3))
@jax.custom_jvp
def f(mat, aux):
num_rows, num_cols = mat.shape
return jnp.ones((num_rows, 1)) / num_cols
@f.defjvp
def f_jvp(primals, tangents):
mat, aux = primals
vec, _ = tangents
output = f(*primals)
num_rows, num_cols = mat.shape
size = num_rows * num_cols
bd_mat = mat.reshape(1, 1, num_rows, num_cols)
bd_mat = jnp.tile(bd_mat, reps=(num_rows, num_cols))
bd_mat = bd_mat.reshape(size, num_rows, num_cols)
rowsum = jnp.sum(mat, axis=1, keepdims=True)
colsum = jnp.sum(mat, axis=0, keepdims=True)
bd_rowsum = jnp.tile(rowsum, reps=(1, num_rows))
bd_colsum = jnp.tile(colsum, reps=(num_cols, 1))
bd_vec = vec.reshape(size, 1)
def operate(mx, val):
buf = 0
for i in range(2):
buf = buf + jnp.matmul(mx, bd_colsum) / jnp.power(aux, i)
buf = jnp.matmul(bd_rowsum, buf)
return buf * val
bd_buf = jax.vmap(operate, in_axes=(0, 0), out_axes=0)(bd_mat, bd_vec)
bd_buf = bd_buf / aux
jvp = jnp.sum(bd_buf, axis=0)
jvp = jnp.mean(jvp, axis=1, keepdims=True)
return (output, jvp)
jax.grad(lambda mat, aux: jnp.sum(f(mat, aux)))(mat, 0.5)
def test_custom_jvp_unbroadcasting(self):
# https://github.com/google/jax/issues/3056
a = jnp.array([1., 1.])
@jax.custom_jvp
def f(x):
return a * x
@f.defjvp
def f_jvp(primals, tangents):
x, = primals
dx, = tangents
return a * x, a * dx
shape = grad(lambda x: jnp.sum(f(x)))(jnp.array(1.)).shape
self.assertEqual(shape, ())
class CustomVJPTest(jtu.JaxTestCase):
def test_basic(self):
@api.custom_vjp
def f(x):
return jnp.sin(x)
def f_fwd(x):
return f(x), jnp.cos(x)
def f_rev(cos_x, g):
return (2 * cos_x * g,)
f.defvjp(f_fwd, f_rev)
x = 3.
self.assertAllClose(f(x), jnp.sin(x))
self.assertAllClose(api.grad(f)(x), 2 * jnp.cos(x))
self.assertAllClose(api.value_and_grad(f)(x),
(jnp.sin(x), 2 * jnp.cos(x)))
def test_invariance(self):
@api.custom_vjp
def f(x):
return jnp.cos(2 * x) / 2.
def f_fwd(x):
return (f(x), x)
def f_rev(x, g):
return (g * 3,)
f.defvjp(f_fwd, f_rev)
def f2(x):
y, _ = api.value_and_grad(f)(x)
return y
def f3(x):
y, _ = api.value_and_grad(f2)(x)
return y
x = 1.
self.assertAllClose(f(x), f2(x), check_dtypes=False)
self.assertAllClose(f(x), f3(x), check_dtypes=False)
self.assertAllClose(api.grad(f)(x), api.grad(f2)(x),
check_dtypes=False)
self.assertAllClose(api.grad(f)(x), api.grad(f3)(x),
check_dtypes=False)
def test_python_control_flow(self):
@api.custom_vjp
def f(x):
if x > 0:
return jnp.sin(x)
else:
return jnp.cos(x)
def f_fwd(x):
if x > 0:
return f(x), x
else:
return f(x), x
def f_rev(x, g):
if x > 0:
return (2 * g,)
else:
return (3 * g,)
f.defvjp(f_fwd, f_rev)
x = 2.
self.assertAllClose(f(x), jnp.sin(x))
self.assertAllClose(f(-x), jnp.cos(-x))
self.assertAllClose(api.value_and_grad(f)(x), (jnp.sin(x), 2.),
check_dtypes=False)
self.assertAllClose(api.value_and_grad(f)(-x), (jnp.cos(-x), 3.),
check_dtypes=False)
def test_vmap(self):
@api.custom_vjp
def f(x):
assert jnp.ndim(x) == 0
return jnp.sin(x)
def f_fwd(x):
assert jnp.ndim(x) == 0
return f(x), jnp.cos(x)
def f_rev(cos_x, g):
return (2 * cos_x * g,)
f.defvjp(f_fwd, f_rev)
x = jnp.arange(3.)
xx = jnp.arange(6.).reshape(2, 3)
# vmap of f
self.assertAllClose(api.vmap(f)(x), jnp.sin(x))
self.assertAllClose(api.vmap(api.vmap(f))(xx), jnp.sin(xx))
# vmap of grad of f
self.assertAllClose(api.vmap(api.grad(f))(x), 2 * jnp.cos(x))
self.assertAllClose(api.vmap(api.value_and_grad(f))(x),
(jnp.sin(x), 2 * jnp.cos(x)))
self.assertAllClose(api.vmap(api.vmap(api.grad(f)))(xx), 2 * jnp.cos(xx))
self.assertAllClose(api.vmap(api.vmap(api.value_and_grad(f)))(xx),
(jnp.sin(xx), 2 * jnp.cos(xx)))
# grad of vmap of f
self.assertAllClose(api.grad(lambda x: api.vmap(f)(x).sum())(x),
2 * jnp.cos(x))
self.assertAllClose(api.grad(lambda x: api.vmap(api.vmap(f))(x).sum())(xx),
2 * jnp.cos(xx))
# vmap of grad of vmap of f
self.assertAllClose(api.vmap(api.grad(lambda x: api.vmap(f)(x).sum()))(xx),
2 * jnp.cos(xx))
def test_jit(self):
@api.custom_vjp
def f(x):
return jnp.sin(x)
def f_fwd(x):
return f(x), jnp.cos(x)
def f_rev(cos_x, g):
return (2 * cos_x * g,)
f.defvjp(f_fwd, f_rev)
x = 3.
# jit
self.assertAllClose(api.jit(f)(x), jnp.sin(x))
self.assertAllClose(api.jit(api.jit(f))(x), jnp.sin(x))
# jit of grad
self.assertAllClose(api.jit(api.grad(f))(x), 2 * jnp.cos(x),
check_dtypes=False)
# grad of jit
self.assertAllClose(api.grad(api.jit(f))(x), 2 * jnp.cos(x),
check_dtypes=False)
def test_pytrees(self):
@api.custom_vjp
def f(x):
return {'b': jnp.sin(x['a'])}
def f_fwd(x):
return f(x), {'r': jnp.cos(x['a'])}
def f_bwd(res, g):
cos_x = res['r']
return ({'a': 2 * cos_x * g['b']},)
f.defvjp(f_fwd, f_bwd)
x = {'a': 3.}
self.assertAllClose(f(x)['b'], jnp.sin(x['a']))
self.assertAllClose(api.grad(lambda x: f(x)['b'])(x),
{'a': 2 * jnp.cos(x['a'])})
def test_jvp_error(self):
@api.custom_vjp
def f(x):
return jnp.sin(x)
def f_fwd(x):
return f(x), jnp.cos(x)
def f_rev(cos_x, g):
return (2 * cos_x * g,)
f.defvjp(f_fwd, f_rev)
self.assertRaisesRegex(
TypeError,
r"can't apply forward-mode autodiff \(jvp\) to a custom_vjp function.",
lambda: api.jvp(f, (3.,), (1.,)))
self.assertRaisesRegex(
TypeError,
r"can't apply forward-mode autodiff \(jvp\) to a custom_vjp function.",
lambda: api.jvp(api.vmap(f), (jnp.arange(3.),), (jnp.ones(3),)))
self.assertRaisesRegex(
TypeError,
r"can't apply forward-mode autodiff \(jvp\) to a custom_vjp function.",
lambda: api.jvp(jit(f), (3.,), (1.,)))
def test_kwargs(self):
@api.custom_vjp
def my_fun(x, y, c=1.):
return c * (x + y)
my_fun.defvjp(lambda x, y, c=1.: (my_fun(c, y, c), None),
lambda _, g: (g, g, g))
f = lambda x, y: jnp.square(my_fun(x, y, c=2.)).sum()
f(10., 5.)
api.grad(f)(10., 5.) # doesn't crash
def test_initial_style(self):
@api.custom_vjp
def f(x):
return jnp.sin(x)
def f_fwd(x):
return f(x), jnp.cos(x)
def f_rev(cos_x, g):
return (2 * cos_x * g,)
f.defvjp(f_fwd, f_rev)
def foo(x):
out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
return out
ans = api.grad(foo)(3.)
expected = 2. * jnp.cos(3.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.grad(foo))(3.)
expected = -2. * jnp.sin(3.)
self.assertAllClose(ans, expected)
def test_initial_style_vmap(self):
@api.custom_vjp
def f(x):
assert jnp.ndim(x) == 0
return 3 * x
def f_fwd(x):
return f(x), jnp.cos(x)
def f_rev(cos_x, g):
return (2 * cos_x * g,)
f.defvjp(f_fwd, f_rev)
def foo(x):
out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
return out
ans = api.vmap(foo)(jnp.arange(3.))
expected = 3. * jnp.arange(3.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(lambda x: api.vmap(foo)(x).sum())(jnp.arange(3.))
expected = 2. * jnp.cos(jnp.arange(3.))
self.assertAllClose(ans, expected, check_dtypes=False)
def test_nondiff_arg(self):
@partial(api.custom_vjp, nondiff_argnums=(0,))
def app(f, x):
return f(x)
def app_fwd(f, x):
return app(f, x), jnp.cos(x)
def app_rev(f, cos_x, g):
return (cos_x * g,)
app.defvjp(app_fwd, app_rev)
ans = app(lambda x: 2 * x, 1)
expected = 2
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.value_and_grad(lambda x: app(lambda y: 2 * y, x))(1.)
expected = (2., jnp.cos(1.))
self.assertAllClose(ans, expected, check_dtypes=False)
def test_closed_over_tracer(self):
def outer(x):
@api.custom_vjp
def f(y):
return x * y
def f_fwd(y):
return f(y), jnp.cos(y)
def f_rev(cos_y, g):
return (cos_y * g,)
f.defvjp(f_fwd, f_rev)
return f
@jit
def g(x, y):
return outer(x)(y)
ans = g(2, 3.)
expected = 6.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(g, 1)(2., 3.)
expected = jnp.cos(3.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_closed_over_tracer2(self):
def outer(x):
@api.custom_vjp
def f(y):
return x * y
def f_fwd(y):
return f(y), jnp.cos(y)
def f_rev(cos_y, g):
return (cos_y * g,)
f.defvjp(f_fwd, f_rev)
return f
@api.vmap
def g(x):
return outer(x)(3.)
ans = g(np.arange(3.))
expected = np.arange(3.) * 3
self.assertAllClose(ans, expected, check_dtypes=False)
def test_closed_over_tracer3(self):
def outer(x):
@api.custom_vjp
def f(y):
return x * y
def f_fwd(y):
return f(y), (x, jnp.cos(y))
def f_rev(res, g):
x, cos_y = res
return (cos_y * g * x,)
f.defvjp(f_fwd, f_rev)
return api.grad(f)
@api.vmap
def g(x):
return outer(x)(3.)
ans = g(np.arange(3.))
expected = np.cos(3.) * np.arange(3.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_nondiff_arg_tracer_error(self):
@partial(api.custom_vjp, nondiff_argnums=(0,))
def f(x, y):
return x * y
def f_fwd(x, y):
return f(x, y), jnp.cos(y)
def f_rev(x, cos_y, g):
return (cos_y * g,)
f.defvjp(f_fwd, f_rev)
@jit
def g(x, y):
return f(x, y)
with self.assertRaisesRegex(UnexpectedTracerError, "custom_vjp"):
_ = g(2, 3.)
with self.assertRaisesRegex(UnexpectedTracerError, "custom_vjp"):
_ = api.grad(g, 1)(2., 3.)
def test_vmap_axes(self):
raise unittest.SkipTest("TODO") # TODO(mattjj): write test
def test_pmap(self):
raise unittest.SkipTest("TODO") # TODO(mattjj): write test
def test_missing_vjp_rule_error(self):
@api.custom_vjp
def foo(x):
return x ** 2
self.assertRaisesRegex(
AttributeError,
r"No VJP defined for custom_vjp function foo using defvjp.",
lambda: foo(2))
self.assertRaisesRegex(
AttributeError,
r"No VJP defined for custom_vjp function foo using defvjp.",
lambda: api.grad(foo)(2.))
def test_vjp_rule_inconsistent_pytree_structures_error(self):
@api.custom_vjp
def f(x):
return x
def foo_fwd(x):
return x, None
def foo_bwd(_, g):
return (g, g)
f.defvjp(foo_fwd, foo_bwd)
f(2) # doesn't crash
self.assertRaisesRegex(
TypeError,
re.escape(
"Custom VJP rule must produce an output with the same container "
"(pytree) structure as the args tuple of the primal function, "
"and in particular must produce a tuple of length equal to the "
"number of arguments to the primal function, but got VJP output "
"structure {} for primal input structure {}.".format(
tree_util.tree_structure((1, 1)),
tree_util.tree_structure((1,)))
),
lambda: api.grad(f)(2.))
def test_vjp_bwd_returns_non_tuple_error(self):
@api.custom_vjp
def f(x):
return x
def foo_fwd(x):
return x, None
def foo_bwd(_, g):
return 2. * g
f.defvjp(foo_fwd, foo_bwd)
with self.assertRaisesRegex(TypeError, "Custom VJP rule .* must produce a tuple"):
api.grad(f)(3.)
def test_issue2511(self):
arr = jnp.ones((5, 2, 2))
foo = lambda x: api.vmap(jnp.linalg.det, (0,))(x)
api.jit(foo)(arr)
def test_lowering_out_of_traces(self):
# https://github.com/google/jax/issues/2578
class F(collections.namedtuple("F", ["a"])):
def __call__(self, x):
return jax.nn.relu(self.a) * x
@jax.jit
def g(f, x):
return f(x)
jax.grad(g, argnums=(1,))(F(2.0), 0.) # doesn't crash
def test_clip_gradient(self):
@api.custom_vjp
def _clip_gradient(lo, hi, x):
return x
def clip_gradient_fwd(lo, hi, x):
return x, (lo, hi,)
def clip_gradient_bwd(res, g):
lo, hi = res
return (None, None, jnp.clip(g, lo, hi),)
_clip_gradient.defvjp(clip_gradient_fwd, clip_gradient_bwd)
def clip_gradient(x):
lo = -0.1
hi = x + 0.1
return _clip_gradient(lo, hi, x)
g = jax.grad(clip_gradient)(0.1)
self.assertAllClose(g, jnp.array(0.2))
def test_nestable_vjp(self):
# Verify that https://github.com/google/jax/issues/3667 is resolved.
def f(x):
return x ** 2
@api.custom_vjp
def g(x):
return f(x)
def g_fwd(x):
y, f_vjp = api.vjp(f, x)
return y, f_vjp
def g_bwd(f_vjp, y_bar):
return f_vjp(y_bar)
g.defvjp(g_fwd, g_bwd)
# Check that VJP can be nested in simple situations. For this to pass,
# vjp has to return a PyTree.
_, g_vjp = api.vjp(g, 1.0)
y, = g_vjp(1.0)
self.assertAllClose(y, jnp.array(2.0))
# Check that VJP can be nested in complex situations. For this to pass,
# vjp can't treat the closed-over tracer x as a static argument.
@jit
def z(x):
_, g_vjp = api.vjp(g, x)
return g_vjp
y, = z(1.0)(3.0)
self.assertAllClose(y, jnp.array(6.0))
def test_initial_style_vmap_2(self):
x = jnp.ones((10, 3))
@api.custom_vjp
def custom_fun(x):
return x.sum()
def forward(x):
return x.sum(), (jnp.ones_like(x),)
def backward(res, g):
return g * res[0],
custom_fun.defvjp(forward, backward)
def train_fun(x):
def summed_fun(x):
return api.vmap(custom_fun)(x).sum()
return api.grad(summed_fun)(x)
def scan_body(carry, inputs):
x = carry
return carry, train_fun(x)
scan_range = jnp.arange(4)
lax.scan(scan_body, x, scan_range)
def test_initial_style_vmap_3(self):
# This is like test_initial_style_vmap except the primal function closes
# over an array constant.
y = jnp.array([1., 2., 3.])
@api.custom_vjp
def f(x):
assert jnp.ndim(x) == 0
return 3 * x * jnp.sum(y)
def f_fwd(x):
return f(x), jnp.cos(x)
def f_rev(cos_x, g):
return (2 * cos_x * g,)
f.defvjp(f_fwd, f_rev)
def foo(x):
out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
return out
ans = api.vmap(foo)(jnp.arange(3.))
expected = 3. * jnp.arange(3.) * 6
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(lambda x: api.vmap(foo)(x).sum())(jnp.arange(3.))
expected = 2. * jnp.cos(jnp.arange(3.))
self.assertAllClose(ans, expected, check_dtypes=False)
def test_initial_style_vmap_with_collective(self):
@api.custom_vjp
def f(x):
return lax.psum(x, 'foo')
def f_fwd(x):
return lax.psum(x, 'foo'), None
def f_bwd(res, dx):
return dx
f.defvjp(f_fwd, f_bwd)
def g(x):
jaxpr = api.make_jaxpr(f)(x)
return core.eval_jaxpr(jaxpr.jaxpr, [], x)[0]
out = api.vmap(lambda _, x: g(x), axis_name='foo', in_axes=(0, None),
out_axes=None)(jnp.arange(4.), 2.)
self.assertAllClose(out, 8.)
def test_bwd_closes_over_tracer(self):
def f(y):
@jax.custom_vjp
def f(x):
return 2. * jnp.sin(x)
def fwd(x):
return f(x), ()
def bwd(_, g):
return (2. * jnp.cos(y) * g,) # capture!
f.defvjp(fwd, bwd)
return jax.grad(f)(1.)
ans = jax.jit(f)(2.)
self.assertAllClose(ans, 2. * jnp.cos(2.))
ans = jax.vmap(f)(jnp.arange(3.))
self.assertAllClose(ans, 2. * jnp.cos(jnp.arange(3.)))
ans = jax.jit(jax.vmap(f))(jnp.arange(3.))
self.assertAllClose(ans, 2. * jnp.cos(jnp.arange(3.)))
ans = jax.vmap(jax.jit(f))(jnp.arange(3.))
self.assertAllClose(ans, 2. * jnp.cos(jnp.arange(3.)))
ans = jax.grad(f)(4.)
self.assertAllClose(ans, -2. * jnp.sin(4.))
def test_fwd_closes_over_tracer(self):
def f(y):
@jax.custom_vjp
def f(x):
return 2. * jnp.sin(x)
def fwd(x):
return f(x), y
def bwd(y, g):
return (2. * jnp.cos(y) * g,) # capture!
f.defvjp(fwd, bwd)
return jax.grad(f)(1.)
ans = jax.jit(f)(2.)
self.assertAllClose(ans, 2. * jnp.cos(2.))
ans = jax.vmap(f)(jnp.arange(3.))
self.assertAllClose(ans, 2. * jnp.cos(jnp.arange(3.)))
ans = jax.jit(jax.vmap(f))(jnp.arange(3.))
self.assertAllClose(ans, 2. * jnp.cos(jnp.arange(3.)))
ans = jax.vmap(jax.jit(f))(jnp.arange(3.))
self.assertAllClose(ans, 2. * jnp.cos(jnp.arange(3.)))
ans = jax.grad(f)(4.)
self.assertAllClose(ans, -2. * jnp.sin(4.))
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_float0(self):
@api.custom_vjp
def f(x, _):
return x
def f_fwd(x, _):
# we need a defined (non-float0) tangent to trigger the rule
return x, (2., 1)
def f_rev(*_):
return (2., 1)
f.defvjp(f_fwd, f_rev)
x = 2.
y = 3
self.assertEqual(api.grad(f, allow_int=True, argnums=(0, 1))(x, y),
(2., np.zeros(shape=(), dtype=float0)))
@unittest.skipIf(numpy_version == (1, 21, 0),
"https://github.com/numpy/numpy/issues/19305")
def test_float0_initial_style(self):
@api.custom_vjp
def f(x):
return x
def f_fwd(x):
return x, (2., x)
def f_rev(*_):
return ((2., 1),)
f.defvjp(f_fwd, f_rev)
def foo(x, y):
out, _ = lax.scan(lambda c, _: (f(c), None), (x, y), None, length=1)
return out[0]
x = 2.
y = 3
self.assertEqual(api.grad(foo, allow_int=True, argnums=(0, 1))(x, y),
(2., np.zeros(shape=(), dtype=float0)))
def test_remat(self):
@api.custom_vjp
def f(x):
return jnp.sin(x)
def f_fwd(x):
return f(x), jnp.cos(x)
def f_rev(cos_x, g):
return (2 * cos_x * g,)
f.defvjp(f_fwd, f_rev)
@api.remat
def g(x):
return f(f(x))
ans = g(2.)
expected = np.sin(np.sin(2.))
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(g)(2.)
expected = 4. * api.grad(lambda x: jnp.sin(jnp.sin(x)))(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_remat_higher_order(self):
@api.custom_vjp
def f(x):
return jnp.sin(x)
def f_fwd(x):
return f(x), jnp.cos(x)
def f_rev(cos_x, g):
return (2 * cos_x * g,)
f.defvjp(f_fwd, f_rev)
def g(x):
return f(f(x))
ans = api.grad(api.grad(api.remat(g)))(2.)
expected = api.grad(api.grad(g))(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.remat(api.grad(g)))(2.)
expected = api.grad(api.grad(g))(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.grad(api.grad(api.remat(g))))(2.)
expected = api.grad(api.grad(api.grad(g)))(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_bwd_nones(self):
@api.custom_vjp
def f(x, y):
return x * jnp.sin(y)
def f_fwd(x, y):
return f(x, y), jnp.cos(y)
def f_rev(cos, g):
return (None, 2 * cos * g)
f.defvjp(f_fwd, f_rev)
ans = api.grad(lambda x: f(x, x))(3.)
expected = 2 * jnp.cos(3.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_bwd_nones_vmap(self):
@api.custom_vjp
def f(x, y):
return x * jnp.sin(y)
def f_fwd(x, y):
return f(x, y), jnp.cos(y)
def f_rev(cos, g):
return (None, 2 * cos * g)
f.defvjp(f_fwd, f_rev)
ans = api.grad(lambda x: api.vmap(f)(x, x).sum())(jnp.arange(3.))
expected = 2 * jnp.cos(jnp.arange(3.))
self.assertAllClose(ans, expected, check_dtypes=False)
def test_bwd_nones_pytree(self):
@api.custom_vjp
def f(xs, y):
x1, x2 = xs
return x1 * x2 * jnp.sin(y)
def f_fwd(xs, y):
return f(xs, y), jnp.cos(y)
def f_rev(cos, g):
return (None, 2 * cos * g)
f.defvjp(f_fwd, f_rev)
ans = api.grad(lambda x: f((x, x), x))(3.)
expected = 2 * jnp.cos(3.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_custom_vjp_closure_4521(self):
# https://github.com/google/jax/issues/4521
@api.custom_vjp
def g(x, y):
return None
def g_fwd(x, y):
return None, y
def g_bwd(residuals, z_bar):
assert False
g.defvjp(g_fwd, g_bwd)
def f(xs, y):
v_g = api.vmap(g, in_axes=(0, None), out_axes=None)
v_g(xs, y)
def scan_body(xs, _):
y = jnp.zeros(1)
_, vjp_f = api.vjp(f, xs, y)
vjp_f(None)
return xs, None
lax.scan(scan_body, jnp.ones(5), None, 100) # doesn't crash
def test_float0_bwd_none(self):
@api.custom_vjp
def f(i, x):
return jnp.sin(x)
def f_fwd(i, x):
return f(i, x), jnp.cos(x)
def f_rev(cos_x, g):
return (None, 2 * cos_x * g)
f.defvjp(f_fwd, f_rev)
ans = api.grad(f, 1)(jnp.array([1, 2]), 3.)
expected = 2 * jnp.cos(3.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_custom_gradient(self):
@api.custom_gradient
def f(x):
return x ** 2, lambda g: (g * x,)
self.assertAllClose(f(3.), 9., check_dtypes=False)
self.assertAllClose(api.grad(f)(3.), 3., check_dtypes=False)
self.assertAllClose(api.grad(api.grad(f))(3.), 1., check_dtypes=False)
def test_custom_gradient_2(self):
@api.custom_gradient
def f(x, y):
return x * y, lambda g: (y, x)
self.assertAllClose(f(3., 4.), 12., check_dtypes=False)
self.assertAllClose(api.grad(f, argnums=(0, 1))(3., 4.), (4., 3.),
check_dtypes=False)
def test_custom_gradient_3(self):
@api.custom_gradient
def f(x):
vjp = lambda g: (jnp.cos(x) * jnp.array([3., 4., 5.]),)
return jnp.sum(jnp.sin(x)), vjp
self.assertAllClose(f(jnp.arange(3)), jnp.sum(jnp.sin(jnp.arange(3.))),
check_dtypes=False)
self.assertAllClose(
api.grad(f)(jnp.arange(3.)),
api.grad(lambda x: jnp.sum(jnp.sin(x)))(jnp.arange(3.)) * jnp.array([3., 4., 5.]),
check_dtypes=False)
def test_custom_gradient_can_return_singleton_value_in_vjp(self):
@api.custom_gradient
def f(x):
return x ** 2, lambda g: g * x
self.assertAllClose(f(3.), 9., check_dtypes=False)
self.assertAllClose(api.grad(f)(3.), 3., check_dtypes=False)
self.assertAllClose(api.grad(api.grad(f))(3.), 1., check_dtypes=False)
def test_closure_convert(self):
def cos_after(fn, x):
converted_fn, aux_args = api.closure_convert(fn, x)
self.assertLessEqual(len(aux_args), 1)
return _cos_after(converted_fn, x, *aux_args)
@partial(api.custom_vjp, nondiff_argnums=(0,))
def _cos_after(fn, x, *args):
return jnp.cos(fn(x, *args))
def fwd(fn, x, *args):
y = _cos_after(fn, x, *args)
return y, (x, args)
def rev(fn, res, g):
x, args = res
x_bar = 17. * x
args_bars = [42. * a for a in args]
return (x_bar, *args_bars)
_cos_after.defvjp(fwd, rev)
def dist(c, x):
return jnp.sum((x - c) ** 2.)
def solve(c, x):
def closure(x):
return dist(c, x)
return cos_after(closure, x)
c, x = 2. * jnp.ones(2), jnp.ones(2)
expected = jnp.cos(dist(c, x))
self.assertAllClose(solve(c, x), expected, check_dtypes=False)
g_c, g_x = api.grad(solve, argnums=(0, 1))(c, x)
self.assertAllClose(g_c, 42. * c, check_dtypes=False)
self.assertAllClose(g_x, 17. * x, check_dtypes=False)
def test_closure_convert_mixed_consts(self):
# Like test_closure_convert, but close over values that
# participate in AD as well as values that do not.
# See https://github.com/google/jax/issues/6415
def cos_after(fn, x):
converted_fn, aux_args = api.closure_convert(fn, x)
self.assertLessEqual(len(aux_args), 1)
return _cos_after(converted_fn, x, *aux_args)
@partial(api.custom_vjp, nondiff_argnums=(0,))
def _cos_after(fn, x, *args):
return jnp.cos(fn(x, *args))
def fwd(fn, x, *args):
y = _cos_after(fn, x, *args)
return y, (x, args)
def rev(fn, res, g):
x, args = res
x_bar = 17. * x
args_bars = [42. * a for a in args]
return (x_bar, *args_bars)
_cos_after.defvjp(fwd, rev)
def dist(c, s, x):
return jnp.sum(s * (x - c) ** 2.)
def solve(c, s, x):
def closure(x):
return dist(c, s, x)
return cos_after(closure, x)
c, s, x = 2. * jnp.ones(2), 3. * jnp.ones(2), jnp.ones(2)
expected = jnp.cos(dist(c, s, x))
self.assertAllClose(solve(c, s, x), expected, check_dtypes=False)
g_c, g_x = api.grad(solve, argnums=(0, 2))(c, s, x)
self.assertAllClose(g_c, 42. * c, check_dtypes=False)
self.assertAllClose(g_x, 17. * x, check_dtypes=False)
def test_float0_cotangents_automatically_handled(self):
@jax.custom_vjp
def f(x, y):
return x
def f_fwd(x, y):
return x, None
def f_bwd(_, zbar):
return (0., 1)
f.defvjp(f_fwd, f_bwd)
jax.jit(lambda x: jax.vjp(f, 0., x)[1](1.))(1) # doesn't crash
class CustomTransposeTest(jtu.JaxTestCase):
def transpose(self, f, x_example):
def transposed(y):
x, = api.linear_transpose(f, x_example)(y)
return x
return transposed
def test_linear_call(self):
def f(x, y):
def fn(r, x): return x / r
def tp(r, t): return t / r
return x + api.linear_call(fn, tp, y, x)
def f_ref(x, y):
return x + x / y
x = jnp.ones(2) * 6.
y = jnp.ones(2) * 3.
self.assertAllClose(f(x, y), f_ref(x, y))
f1 = lambda x: f(x, y)
f1_ref = lambda x: f_ref(x, y)
self.assertAllClose(self.transpose(f1, x)(x),
self.transpose(f1_ref, x)(x))
def test_linear_call_incorrect_transpose(self):
def f(x, y):
def fn(r, x): return x / r
def tp(r, t): return t / (2. * r)
return x + api.linear_call(fn, tp, y, x)
def f_ref(x, y):
return x + x / y
x = jnp.ones(2) * 6.
y = jnp.ones(2) * 3.
self.assertAllClose(f(x, y), f_ref(x, y))
f1 = lambda x: f(x, y)
f1_ref = lambda x: f_ref(x, 2. * y)
self.assertAllClose(self.transpose(f1, x)(x),
self.transpose(f1_ref, x)(x))
def test_linear_call_transpose_transpose_transpose(self):
def fn(r, x): return x / r
def tp(r, t): return t / (2. * r)
def f_(x, y):
return x + api.linear_call(fn, tp, y, x)
x = jnp.ones(2) * 6.
y = jnp.ones(2) * 3.
f = lambda x: f_(x, y)
ft = self.transpose(f, x)
ftt = self.transpose(ft, x)
fttt = self.transpose(ftt, x)
self.assertAllClose(ft(x), x + tp(y, x))
self.assertAllClose(f(x), ftt(x))
self.assertAllClose(ft(x), fttt(x))
def test_linear_call_scalar_to_vector(self):
def f(c, x):
def fn(_, x):
return [x, x]
def tp(_, t):
t1, t2 = t
return t1 + t2
return api.linear_call(fn, tp, (), c * x)
def f_ref(c, x):
return [c * x, c * x]
c, x = 2., 3.
t = [4., 5.]
self.assertAllClose(f(c, x), f_ref(c, x))
self.assertAllClose(self.transpose(partial(f, c), x)(t),
self.transpose(partial(f_ref, c), x)(t))
def test_linear_call_nested(self):
def id_(x):
def f(_, x): return x
def t(_, t): return 0.
return api.linear_call(f, t, (), x)
def f(x):
def f_(_, x): return id_(x)
def t_(_, t): return id_(7.)
return api.linear_call(f_, t_, (), x)
x = 5.
id_t = self.transpose(id_, x)
id_tt = self.transpose(id_t, x)
ft = self.transpose(f, x)
ftt = self.transpose(ft, x)
fttt = self.transpose(ftt, x)
self.assertAllClose(id_(x), x)
self.assertAllClose(id_t(x), 0.)
self.assertAllClose(id_tt(x), x)
self.assertAllClose(f(x), x)
self.assertAllClose(ft(x), 7.)
self.assertAllClose(ftt(x), x)
self.assertAllClose(fttt(x), 7.)
def test_linear_call_jit(self):
def f(x, y):
def fn(r, x): return x / r
def tp(r, t): return t / r
return x + api.linear_call(fn, tp, y, x)
x = jnp.ones(2) * 6.
y = jnp.ones(2) * 3.
self.assertAllClose(f(x, y), jax.jit(f)(x, y))
f1 = lambda x: f(x, y)
self.assertAllClose(self.transpose(f1, x)(x),
jax.jit(self.transpose(f1, x))(x))
class InvertibleADTest(jtu.JaxTestCase):
@jtu.ignore_warning(message="Values that an @invertible function closes")
def test_invertible_basic(self):
def f(x):
return lax.mul(lax.mul(lax.exp(x), 4.), x)
finv = jax.invertible(f)
x = jnp.ones((5,))
jaxpr = jax.make_jaxpr(lambda p, ct: jax.vjp(finv, p)[1](ct))(x, x)
# { lambda ; a b.
# let c = exp a
# d = mul c 4.0
# e = mul d a
# f = mul b a
# g = div e a
# h = mul b g
# i = mul f 4.0
# j = div g 4.0
# k = mul f j
# _ = reduce_sum[ axes=(0,) ] k
# _ = log j
# l = mul i j
# m = add_any h l
# in (m,) }
# """
rtIn('div', str(jaxpr))
self.assertIn('log', str(jaxpr))
self.assertAllClose(jax.value_and_grad(lambda x: np.sum(f(x)))(x),
jax.value_and_grad(lambda x: np.sum(finv(x)))(x),
check_dtypes=True)
def test_invertible_blocks(self):
def mk_reversible_block(f, g):
@jax.custom_ivjp
def rev_block(x1, x2):
y1 = f(x2) + x1
y2 = g(y1) + x2
return y1, y2
@rev_block.defivjp
def rev_block_ivjp(xs, ys, dys):
(y1, y2) = ys
(dy1, dy2) = dys
dgo, dx2 = dy2, dy2
go, gvjp = jax.vjp(g, y1)
dy1 += gvjp(dgo)[0]
del gvjp
x2 = y2 - go
dfo, dx1 = dy1, dy1
fo, fvjp = jax.vjp(f, x2)
dx2 += fvjp(dfo)[0]
del fvjp
x1 = y1 - fo
return (x1, x2), (dx1, dx2)
return rev_block
rev_block = mk_reversible_block(jnp.sin, jnp.cos)
def g(x1, x2):
for i in range(2):
x1, x2 = rev_block(x1, x2)
return x1, x2
def reduce(f, x1, x2):
y1, y2 = f(x1, x2)
return np.sum(y1) + np.sum(y2)
x = np.ones((1,))
self.assertAllClose(jax.value_and_grad(partial(reduce, jax.invertible(g)), argnums=(0, 1))(x, x + 2),
jax.value_and_grad(partial(reduce, g), argnums=(0, 1))(x, x + 2),
check_dtypes=True)
def test_invertible_partial_diff(self):
# of the invertible function.
def f(x, y):
return lax.mul(lax.mul(lax.exp(x), 4.), x), lax.add(y, 4.)
finv = jax.invertible(f)
o = np.ones((5,))
self.assertAllClose(jax.value_and_grad(lambda x: np.sum(f(x, o)[0]))(o),
jax.value_and_grad(lambda x: np.sum(finv(x, o)[0]))(o),
check_dtypes=True)
def test_invertible_pytree(self):
def f(x, y):
return lax.add(lax.mul(lax.exp(x[0]), x[1]), y)
finv = jax.invertible(f)
o = np.ones((5,))
self.assertAllClose(jax.value_and_grad(lambda x: np.sum(f((x, x), x)[0]))(o),
jax.value_and_grad(lambda x: np.sum(finv((x, x), x)[0]))(o),
check_dtypes=True)
class BufferDonationTest(jtu.BufferDonationTestCase):
@jtu.skip_on_devices("cpu") # In/out aliasing not supported on CPU.
def test_pmap_donate_argnums_invalidates_input(self):
move = api.pmap(lambda x: x + x - x, donate_argnums=0)
n = jax.local_device_count()
x = api.pmap(lambda x: x)(jnp.ones([n]))
y = move(x)
self.assertDeleted(x)
np.testing.assert_allclose(y, [1.] * n)
def test_pmap_nested_donate_ignored(self):
pmap_fun = jit(lambda x: api.pmap(lambda y: y ** 2, donate_argnums=0)(x))
a = api.pmap(lambda x: x)(jnp.array([1]))
# NOTE(mattjj): stopped raising error here and instead just ignored
# with self.assertRaisesRegex(ValueError, "nested.*not supported"):
# pmap_fun(a)
pmap_fun(a) # doesn't crash
class NamedCallTest(jtu.JaxTestCase):
def test_default_name(self):
@api.named_call
def my_test_function(x):
return x**2
@jax.jit
def f(x):
return my_test_function(x)
c = jax.xla_computation(f)(2)
self.assertIn("my_test_function", c.as_hlo_text())
def test_non_jaxtype_arg(self):
def f(not_a_jaxtype, a_jaxtype):
if not_a_jaxtype:
return a_jaxtype
return 0
f = api.named_call(f, name="test")
out = jax.jit(f, static_argnums=(0,))("not a Jaxtype", 1)
self.assertEqual(out, 1)
@parameterized.parameters(jax.jit, jax.grad, jax.vmap, jax.remat)
def test_jax_transforms(self, transform):
f = jnp.sum
x = jnp.array([1.])
unnamed_out = transform(f)(x)
named_out = transform(api.named_call(f, name="test"))(x)
self.assertEqual(unnamed_out, named_out)
def test_static_argnums(self):
f = api.named_call(lambda x, y: y if x else None, name="test")
f = jax.jit(f, static_argnums=(0,))
out = f(True, 5)
self.assertEqual(out, 5)
def test_partial_eval(self):
f = api.named_call(lambda x, y: y if x else None, name="test")
f = jax.jit(functools.partial(f, True))
out = f(5)
self.assertEqual(out, 5)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_jit_type={}_func={}".format(jit_type, func),
"jit_type": jit_type, "func": func}
for func in ['identity', 'asarray', 'device_put']
for jit_type in [None, "python", "cpp"]
if not (jit_type is None and func == 'identity')))
def test_integer_overflow(self, jit_type, func):
funcdict = {
'identity': lambda x: x,
'asarray': jnp.asarray,
'device_put': api.device_put,
}
jit = {
'python': api._python_jit,
'cpp': api._cpp_jit,
None: lambda x: x,
}
f = jit[jit_type](funcdict[func])
int_dtype = dtypes.canonicalize_dtype(jnp.int_)
int_max = np.iinfo(int_dtype).max
int_min = np.iinfo(int_dtype).min
self.assertEqual(f(int_max).dtype, int_dtype)
self.assertEqual(f(int_min).dtype, int_dtype)
self.assertRaises(OverflowError, f, int_max + 1)
self.assertRaises(OverflowError, f, int_min - 1)
class BackendsTest(jtu.JaxTestCase):
@unittest.skipIf(not sys.executable, "test requires sys.executable")
@jtu.skip_on_devices("gpu", "tpu")
def test_cpu_warning_suppression(self):
warning_expected = (
"import jax; "
"jax.numpy.arange(10)")
warning_not_expected = (
"import jax; "
"jax.config.update('jax_platform_name', 'cpu'); "
"jax.numpy.arange(10)")
result = subprocess.run([sys.executable, '-c', warning_expected],
check=True, capture_output=True)
assert "No GPU/TPU found" in result.stderr.decode()
result = subprocess.run([sys.executable, '-c', warning_not_expected],
check=True, capture_output=True)
assert "No GPU/TPU found" not in result.stderr.decode()
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| true | true |
1c379c56877c1c8f51cd44e18e3eba0986f7c3d1 | 907 | py | Python | tmp/wswp/crawler.py | godontop/python | a33391304e3396d2f208dfc8cec3c200e4f18136 | [
"MIT"
] | null | null | null | tmp/wswp/crawler.py | godontop/python | a33391304e3396d2f208dfc8cec3c200e4f18136 | [
"MIT"
] | null | null | null | tmp/wswp/crawler.py | godontop/python | a33391304e3396d2f208dfc8cec3c200e4f18136 | [
"MIT"
] | null | null | null | # coding=utf-8
import datetime
import re
import time
import urllib.error
import urllib.parse
import urllib.request
import urllib.robotparser
import sys
from downloader import Downloader
def crawler(seed_url, delay=1, max_depth=2, user_agent='Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36', proxies=None, num_retries=2, scrape_callback=None, cache=None):
"""Crawl from the given seed URL following links matched by link_regex
"""
crawl_queue = [seed_url]
D = Downloader(delay=delay, user_agent=user_agent, proxies=proxies,
num_retries=num_retries, cache=cache)
if scrape_callback:
crawl_queue = scrape_callback('http://python.ticp.net:2018/top-1m.csv.zip', D('http://python.ticp.net:2018/top-1m.csv.zip'))
while crawl_queue:
url = crawl_queue.pop()
html = D(url)
| 37.791667 | 243 | 0.724366 |
import datetime
import re
import time
import urllib.error
import urllib.parse
import urllib.request
import urllib.robotparser
import sys
from downloader import Downloader
def crawler(seed_url, delay=1, max_depth=2, user_agent='Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36', proxies=None, num_retries=2, scrape_callback=None, cache=None):
crawl_queue = [seed_url]
D = Downloader(delay=delay, user_agent=user_agent, proxies=proxies,
num_retries=num_retries, cache=cache)
if scrape_callback:
crawl_queue = scrape_callback('http://python.ticp.net:2018/top-1m.csv.zip', D('http://python.ticp.net:2018/top-1m.csv.zip'))
while crawl_queue:
url = crawl_queue.pop()
html = D(url)
| true | true |
1c379db5bb9eb9cc51e5d447527f55bc6ea4f4f7 | 4,676 | py | Python | flink-ai-flow/ai_flow/application_master/master.py | MarvinMiao/flink-ai-extended | e45eecf2deea6976ba3d7ba821ffb8d9ce0a17f4 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2020-12-12T15:21:05.000Z | 2020-12-12T15:21:05.000Z | flink-ai-flow/ai_flow/application_master/master.py | MarvinMiao/flink-ai-extended | e45eecf2deea6976ba3d7ba821ffb8d9ce0a17f4 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2021-01-30T11:28:37.000Z | 2021-01-30T11:28:37.000Z | flink-ai-flow/ai_flow/application_master/master.py | MarvinMiao/flink-ai-extended | e45eecf2deea6976ba3d7ba821ffb8d9ce0a17f4 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
from typing import Text
from ai_flow.rest_endpoint.service.server import AIFlowServer, HighAvailableAIFlowServer
from ai_flow.store.db.base_model import base
from ai_flow.store.sqlalchemy_store import SqlAlchemyStore
from ai_flow.store.mongo_store import MongoStoreConnManager
from ai_flow.application_master.master_config import MasterConfig, DBType
from ai_flow.client.ai_flow_client import get_ai_flow_client
import logging
_SQLITE_DB_FILE = 'aiflow.db'
_SQLITE_DB_URI = '%s%s' % ('sqlite:///', _SQLITE_DB_FILE)
_MYSQL_DB_URI = 'mysql+pymysql://root:aliyunmysql@localhost:3306/aiflow'
_PORT = '50051'
GLOBAL_MASTER_CONFIG = {}
class AIFlowMaster(object):
"""
AI flow master.
"""
def __init__(self, config_file: Text = None, enable_ha=False, server_uri: str = None, ttl_ms=10000) -> None:
"""
Set the master attribute according to the master config file.
:param config_file: master configuration file.
"""
super().__init__()
self.config_file = config_file
self.server = None
self.master_config = MasterConfig()
self.enable_ha = enable_ha
self.server_uri = server_uri
self.ttl_ms = ttl_ms
def start(self,
is_block=False) -> None:
"""
Start the AI flow master.
:param is_block: AI flow master will run non-stop if True.
"""
if self.config_file is not None:
self.master_config.load_from_file(self.config_file)
else:
self.master_config.set_master_port(str(_PORT))
global GLOBAL_MASTER_CONFIG
GLOBAL_MASTER_CONFIG = self.master_config
logging.info("AI Flow Master Config {}".format(GLOBAL_MASTER_CONFIG))
if not self.master_config.get_enable_ha():
self.server = AIFlowServer(
store_uri=self.master_config.get_db_uri(),
port=str(self.master_config.get_master_port()),
start_default_notification=self.master_config.start_default_notification(),
notification_uri=self.master_config.get_notification_uri())
else:
self.server = HighAvailableAIFlowServer(
store_uri=self.master_config.get_db_uri(),
port=str(self.master_config.get_master_port()),
start_default_notification=self.master_config.start_default_notification(),
notification_uri=self.master_config.get_notification_uri(),
server_uri=self.master_config.get_master_ip() + ":" + str(self.master_config.get_master_port()),
ttl_ms=self.master_config.get_ha_ttl_ms())
self.server.run(is_block=is_block)
def stop(self, clear_sql_lite_db_file=True) -> None:
"""
Stop the AI flow master.
:param clear_sql_lite_db_file: If True, the sqlite database files will be deleted When the server stops working.
"""
self.server.stop()
if self.master_config.get_db_type() == DBType.SQLITE and clear_sql_lite_db_file:
store = SqlAlchemyStore(self.master_config.get_db_uri())
base.metadata.drop_all(store.db_engine)
os.remove(self.master_config.get_sql_lite_db_file())
elif self.master_config.get_db_type() == DBType.MONGODB:
MongoStoreConnManager().disconnect_all()
def _clear_db(self):
if self.master_config.get_db_type() == DBType.SQLITE:
store = SqlAlchemyStore(self.master_config.get_db_uri())
base.metadata.drop_all(store.db_engine)
base.metadata.create_all(store.db_engine)
elif self.master_config.get_db_type() == DBType.MONGODB:
MongoStoreConnManager().drop_all()
def set_master_config():
code, config, message = get_ai_flow_client().get_master_config()
for k, v in config.items():
GLOBAL_MASTER_CONFIG[k] = v
| 42.126126 | 120 | 0.695252 |
import os
from typing import Text
from ai_flow.rest_endpoint.service.server import AIFlowServer, HighAvailableAIFlowServer
from ai_flow.store.db.base_model import base
from ai_flow.store.sqlalchemy_store import SqlAlchemyStore
from ai_flow.store.mongo_store import MongoStoreConnManager
from ai_flow.application_master.master_config import MasterConfig, DBType
from ai_flow.client.ai_flow_client import get_ai_flow_client
import logging
_SQLITE_DB_FILE = 'aiflow.db'
_SQLITE_DB_URI = '%s%s' % ('sqlite:///', _SQLITE_DB_FILE)
_MYSQL_DB_URI = 'mysql+pymysql://root:aliyunmysql@localhost:3306/aiflow'
_PORT = '50051'
GLOBAL_MASTER_CONFIG = {}
class AIFlowMaster(object):
def __init__(self, config_file: Text = None, enable_ha=False, server_uri: str = None, ttl_ms=10000) -> None:
super().__init__()
self.config_file = config_file
self.server = None
self.master_config = MasterConfig()
self.enable_ha = enable_ha
self.server_uri = server_uri
self.ttl_ms = ttl_ms
def start(self,
is_block=False) -> None:
if self.config_file is not None:
self.master_config.load_from_file(self.config_file)
else:
self.master_config.set_master_port(str(_PORT))
global GLOBAL_MASTER_CONFIG
GLOBAL_MASTER_CONFIG = self.master_config
logging.info("AI Flow Master Config {}".format(GLOBAL_MASTER_CONFIG))
if not self.master_config.get_enable_ha():
self.server = AIFlowServer(
store_uri=self.master_config.get_db_uri(),
port=str(self.master_config.get_master_port()),
start_default_notification=self.master_config.start_default_notification(),
notification_uri=self.master_config.get_notification_uri())
else:
self.server = HighAvailableAIFlowServer(
store_uri=self.master_config.get_db_uri(),
port=str(self.master_config.get_master_port()),
start_default_notification=self.master_config.start_default_notification(),
notification_uri=self.master_config.get_notification_uri(),
server_uri=self.master_config.get_master_ip() + ":" + str(self.master_config.get_master_port()),
ttl_ms=self.master_config.get_ha_ttl_ms())
self.server.run(is_block=is_block)
def stop(self, clear_sql_lite_db_file=True) -> None:
self.server.stop()
if self.master_config.get_db_type() == DBType.SQLITE and clear_sql_lite_db_file:
store = SqlAlchemyStore(self.master_config.get_db_uri())
base.metadata.drop_all(store.db_engine)
os.remove(self.master_config.get_sql_lite_db_file())
elif self.master_config.get_db_type() == DBType.MONGODB:
MongoStoreConnManager().disconnect_all()
def _clear_db(self):
if self.master_config.get_db_type() == DBType.SQLITE:
store = SqlAlchemyStore(self.master_config.get_db_uri())
base.metadata.drop_all(store.db_engine)
base.metadata.create_all(store.db_engine)
elif self.master_config.get_db_type() == DBType.MONGODB:
MongoStoreConnManager().drop_all()
def set_master_config():
code, config, message = get_ai_flow_client().get_master_config()
for k, v in config.items():
GLOBAL_MASTER_CONFIG[k] = v
| true | true |
1c379de15770b17dc2ba1bf16a65246f51b2f12e | 136 | py | Python | abc/abc143/abc143c-1.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | 1 | 2019-08-21T00:49:34.000Z | 2019-08-21T00:49:34.000Z | abc/abc143/abc143c-1.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | null | null | null | abc/abc143/abc143c-1.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | null | null | null | N = int(input())
S = input()
p = ''
result = 0
for i in range(N):
if p != S[i]:
result += 1
p = S[i]
print(result)
| 12.363636 | 19 | 0.448529 | N = int(input())
S = input()
p = ''
result = 0
for i in range(N):
if p != S[i]:
result += 1
p = S[i]
print(result)
| true | true |
1c379e95efb13865d25b98a431b6010d16bbe638 | 11,345 | py | Python | es_test_data.py | unfor19/elasticsearch-test-data | e79be946aee74fb4f4cc77cf9209ac3a62f710be | [
"MIT"
] | 1 | 2021-09-18T06:50:04.000Z | 2021-09-18T06:50:04.000Z | es_test_data.py | unfor19/elasticsearch-test-data | e79be946aee74fb4f4cc77cf9209ac3a62f710be | [
"MIT"
] | null | null | null | es_test_data.py | unfor19/elasticsearch-test-data | e79be946aee74fb4f4cc77cf9209ac3a62f710be | [
"MIT"
] | null | null | null | #!/usr/bin/python
import json
import time
import logging
import random
import string
import uuid
import datetime
import tornado.gen
import tornado.httpclient
import tornado.ioloop
import tornado.options
try:
xrange
range = xrange
except NameError:
pass
async_http_client = tornado.httpclient.AsyncHTTPClient()
headers = tornado.httputil.HTTPHeaders({"content-type": "application/json"})
id_counter = 0
upload_data_count = 0
_dict_data = None
def delete_index(idx_name):
try:
url = "%s/%s?refresh=true" % (tornado.options.options.es_url, idx_name)
request = tornado.httpclient.HTTPRequest(url, headers=headers, method="DELETE", request_timeout=240, auth_username=tornado.options.options.username, auth_password=tornado.options.options.password, validate_cert=tornado.options.options.validate_cert)
response = tornado.httpclient.HTTPClient().fetch(request)
logging.info('Deleting index "%s" done %s' % (idx_name, response.body))
except tornado.httpclient.HTTPError:
pass
def create_index(idx_name):
schema = {
"settings": {
"number_of_shards": tornado.options.options.num_of_shards,
"number_of_replicas": tornado.options.options.num_of_replicas
},
"refresh": True
}
body = json.dumps(schema)
url = "%s/%s" % (tornado.options.options.es_url, idx_name)
try:
logging.info('Trying to create index %s' % (url))
request = tornado.httpclient.HTTPRequest(url, headers=headers, method="PUT", body=body, request_timeout=240, auth_username=tornado.options.options.username, auth_password=tornado.options.options.password, validate_cert=tornado.options.options.validate_cert)
response = tornado.httpclient.HTTPClient().fetch(request)
logging.info('Creating index "%s" done %s' % (idx_name, response.body))
except tornado.httpclient.HTTPError:
logging.info('Looks like the index exists already')
pass
@tornado.gen.coroutine
def upload_batch(upload_data_txt):
try:
request = tornado.httpclient.HTTPRequest(tornado.options.options.es_url + "/_bulk",
method="POST",
body=upload_data_txt,
headers=headers,
request_timeout=tornado.options.options.http_upload_timeout,
auth_username=tornado.options.options.username, auth_password=tornado.options.options.password, validate_cert=tornado.options.options.validate_cert)
response = yield async_http_client.fetch(request)
except Exception as ex:
logging.error("upload failed, error: %s" % ex)
return
result = json.loads(response.body.decode('utf-8'))
res_txt = "OK" if not result['errors'] else "FAILED"
took = int(result['took'])
logging.info("Upload: %s - upload took: %5dms, total docs uploaded: %7d" % (res_txt, took, upload_data_count))
def get_data_for_format(format):
split_f = format.split(":")
if not split_f:
return None, None
field_name = split_f[0]
field_type = split_f[1]
return_val = ''
if field_type == "bool":
return_val = random.choice([True, False])
elif field_type == "str":
min = 3 if len(split_f) < 3 else int(split_f[2])
max = min + 7 if len(split_f) < 4 else int(split_f[3])
length = generate_count(min, max)
return_val = "".join([random.choice(string.ascii_letters + string.digits) for x in range(length)])
elif field_type == "int":
min = 0 if len(split_f) < 3 else int(split_f[2])
max = min + 100000 if len(split_f) < 4 else int(split_f[3])
return_val = generate_count(min, max)
elif field_type == "ipv4":
return_val = "{0}.{1}.{2}.{3}".format(generate_count(0, 245),generate_count(0, 245),generate_count(0, 245),generate_count(0, 245))
elif field_type in ["ts", "tstxt"]:
now = int(time.time())
per_day = 24 * 60 * 60
min = now - 30 * per_day if len(split_f) < 3 else int(split_f[2])
max = now + 30 * per_day if len(split_f) < 4 else int(split_f[3])
ts = generate_count(min, max)
return_val = int(ts * 1000) if field_type == "ts" else datetime.datetime.fromtimestamp(ts).strftime("%Y-%m-%dT%H:%M:%S.000-0000")
elif field_type == "words":
min = 2 if len(split_f) < 3 else int(split_f[2])
max = min + 8 if len(split_f) < 4 else int(split_f[3])
count = generate_count(min, max)
words = []
for _ in range(count):
word_len = random.randrange(3, 10)
words.append("".join([random.choice(string.ascii_letters + string.digits) for x in range(word_len)]))
return_val = " ".join(words)
elif field_type == "dict":
global _dict_data
min = 2 if len(split_f) < 3 else int(split_f[2])
max = min + 8 if len(split_f) < 4 else int(split_f[3])
count = generate_count(min, max)
return_val = " ".join([random.choice(_dict_data).strip() for _ in range(count)])
elif field_type == "text":
text = ["text1", "text2", "text3"] if len(split_f) < 3 else split_f[2].split("-")
min = 1 if len(split_f) < 4 else int(split_f[3])
max = min + 1 if len(split_f) < 5 else int(split_f[4])
count = generate_count(min, max)
words = []
for _ in range(count):
words.append(""+random.choice(text))
return_val = " ".join(words)
return field_name, return_val
def generate_count(min, max):
if min == max:
return max
elif min > max:
return random.randrange(max, min);
else:
return random.randrange(min, max);
def generate_random_doc(format):
global id_counter
res = {}
for f in format:
f_key, f_val = get_data_for_format(f)
if f_key:
res[f_key] = f_val
if not tornado.options.options.id_type:
return res
if tornado.options.options.id_type == 'int':
res['_id'] = id_counter
id_counter += 1
elif tornado.options.options.id_type == 'uuid4':
res['_id'] = str(uuid.uuid4())
return res
def set_index_refresh(val):
params = {"index": {"refresh_interval": val}}
body = json.dumps(params)
url = "%s/%s/_settings" % (tornado.options.options.es_url, tornado.options.options.index_name)
try:
request = tornado.httpclient.HTTPRequest(url, headers=headers, method="PUT", body=body, request_timeout=240, auth_username=tornado.options.options.username, auth_password=tornado.options.options.password, validate_cert=tornado.options.options.validate_cert)
http_client = tornado.httpclient.HTTPClient()
http_client.fetch(request)
logging.info('Set index refresh to %s' % val)
except Exception as ex:
logging.exception(ex)
@tornado.gen.coroutine
def generate_test_data():
global upload_data_count
if tornado.options.options.force_init_index:
delete_index(tornado.options.options.index_name)
create_index(tornado.options.options.index_name)
# todo: query what refresh is set to, then restore later
if tornado.options.options.set_refresh:
set_index_refresh("-1")
if tornado.options.options.out_file:
out_file = open(tornado.options.options.out_file, "w")
else:
out_file = None
if tornado.options.options.dict_file:
global _dict_data
with open(tornado.options.options.dict_file, 'r') as f:
_dict_data = f.readlines()
logging.info("Loaded %d words from the %s" % (len(_dict_data), tornado.options.options.dict_file))
format = tornado.options.options.format.split(',')
if not format:
logging.error('invalid format')
exit(1)
ts_start = int(time.time())
upload_data_txt = ""
logging.info("Generating %d docs, upload batch size is %d" % (tornado.options.options.count,
tornado.options.options.batch_size))
for num in range(0, tornado.options.options.count):
item = generate_random_doc(format)
if out_file:
out_file.write("%s\n" % json.dumps(item))
cmd = {'index': {'_index': tornado.options.options.index_name,
'_type': tornado.options.options.index_type}}
if '_id' in item:
cmd['index']['_id'] = item['_id']
upload_data_txt += json.dumps(cmd) + "\n"
upload_data_txt += json.dumps(item) + "\n"
upload_data_count += 1
if upload_data_count % tornado.options.options.batch_size == 0:
yield upload_batch(upload_data_txt)
upload_data_txt = ""
# upload remaining items in `upload_data_txt`
if upload_data_txt:
yield upload_batch(upload_data_txt)
if tornado.options.options.set_refresh:
set_index_refresh("1s")
if out_file:
out_file.close()
took_secs = int(time.time() - ts_start)
logging.info("Done - total docs uploaded: %d, took %d seconds" % (tornado.options.options.count, took_secs))
if __name__ == '__main__':
tornado.options.define("es_url", type=str, default='http://localhost:9200/', help="URL of your Elasticsearch node")
tornado.options.define("index_name", type=str, default='test_data', help="Name of the index to store your messages")
tornado.options.define("index_type", type=str, default='test_type', help="Type")
tornado.options.define("batch_size", type=int, default=1000, help="Elasticsearch bulk index batch size")
tornado.options.define("num_of_shards", type=int, default=2, help="Number of shards for ES index")
tornado.options.define("http_upload_timeout", type=int, default=3, help="Timeout in seconds when uploading data")
tornado.options.define("count", type=int, default=100000, help="Number of docs to generate")
tornado.options.define("format", type=str, default='name:str,age:int,last_updated:ts', help="message format")
tornado.options.define("num_of_replicas", type=int, default=0, help="Number of replicas for ES index")
tornado.options.define("force_init_index", type=bool, default=False, help="Force deleting and re-initializing the Elasticsearch index")
tornado.options.define("set_refresh", type=bool, default=False, help="Set refresh rate to -1 before starting the upload")
tornado.options.define("out_file", type=str, default=False, help="If set, write test data to out_file as well.")
tornado.options.define("id_type", type=str, default=None, help="Type of 'id' to use for the docs, valid settings are int and uuid4, None is default")
tornado.options.define("dict_file", type=str, default=None, help="Name of dictionary file to use")
tornado.options.define("username", type=str, default=None, help="Username for elasticsearch")
tornado.options.define("password", type=str, default=None, help="Password for elasticsearch")
tornado.options.define("validate_cert", type=bool, default=True, help="SSL validate_cert for requests. Use false for self-signed certificates.")
tornado.options.parse_command_line()
tornado.ioloop.IOLoop.instance().run_sync(generate_test_data)
| 40.230496 | 265 | 0.65509 |
import json
import time
import logging
import random
import string
import uuid
import datetime
import tornado.gen
import tornado.httpclient
import tornado.ioloop
import tornado.options
try:
xrange
range = xrange
except NameError:
pass
async_http_client = tornado.httpclient.AsyncHTTPClient()
headers = tornado.httputil.HTTPHeaders({"content-type": "application/json"})
id_counter = 0
upload_data_count = 0
_dict_data = None
def delete_index(idx_name):
try:
url = "%s/%s?refresh=true" % (tornado.options.options.es_url, idx_name)
request = tornado.httpclient.HTTPRequest(url, headers=headers, method="DELETE", request_timeout=240, auth_username=tornado.options.options.username, auth_password=tornado.options.options.password, validate_cert=tornado.options.options.validate_cert)
response = tornado.httpclient.HTTPClient().fetch(request)
logging.info('Deleting index "%s" done %s' % (idx_name, response.body))
except tornado.httpclient.HTTPError:
pass
def create_index(idx_name):
schema = {
"settings": {
"number_of_shards": tornado.options.options.num_of_shards,
"number_of_replicas": tornado.options.options.num_of_replicas
},
"refresh": True
}
body = json.dumps(schema)
url = "%s/%s" % (tornado.options.options.es_url, idx_name)
try:
logging.info('Trying to create index %s' % (url))
request = tornado.httpclient.HTTPRequest(url, headers=headers, method="PUT", body=body, request_timeout=240, auth_username=tornado.options.options.username, auth_password=tornado.options.options.password, validate_cert=tornado.options.options.validate_cert)
response = tornado.httpclient.HTTPClient().fetch(request)
logging.info('Creating index "%s" done %s' % (idx_name, response.body))
except tornado.httpclient.HTTPError:
logging.info('Looks like the index exists already')
pass
@tornado.gen.coroutine
def upload_batch(upload_data_txt):
try:
request = tornado.httpclient.HTTPRequest(tornado.options.options.es_url + "/_bulk",
method="POST",
body=upload_data_txt,
headers=headers,
request_timeout=tornado.options.options.http_upload_timeout,
auth_username=tornado.options.options.username, auth_password=tornado.options.options.password, validate_cert=tornado.options.options.validate_cert)
response = yield async_http_client.fetch(request)
except Exception as ex:
logging.error("upload failed, error: %s" % ex)
return
result = json.loads(response.body.decode('utf-8'))
res_txt = "OK" if not result['errors'] else "FAILED"
took = int(result['took'])
logging.info("Upload: %s - upload took: %5dms, total docs uploaded: %7d" % (res_txt, took, upload_data_count))
def get_data_for_format(format):
split_f = format.split(":")
if not split_f:
return None, None
field_name = split_f[0]
field_type = split_f[1]
return_val = ''
if field_type == "bool":
return_val = random.choice([True, False])
elif field_type == "str":
min = 3 if len(split_f) < 3 else int(split_f[2])
max = min + 7 if len(split_f) < 4 else int(split_f[3])
length = generate_count(min, max)
return_val = "".join([random.choice(string.ascii_letters + string.digits) for x in range(length)])
elif field_type == "int":
min = 0 if len(split_f) < 3 else int(split_f[2])
max = min + 100000 if len(split_f) < 4 else int(split_f[3])
return_val = generate_count(min, max)
elif field_type == "ipv4":
return_val = "{0}.{1}.{2}.{3}".format(generate_count(0, 245),generate_count(0, 245),generate_count(0, 245),generate_count(0, 245))
elif field_type in ["ts", "tstxt"]:
now = int(time.time())
per_day = 24 * 60 * 60
min = now - 30 * per_day if len(split_f) < 3 else int(split_f[2])
max = now + 30 * per_day if len(split_f) < 4 else int(split_f[3])
ts = generate_count(min, max)
return_val = int(ts * 1000) if field_type == "ts" else datetime.datetime.fromtimestamp(ts).strftime("%Y-%m-%dT%H:%M:%S.000-0000")
elif field_type == "words":
min = 2 if len(split_f) < 3 else int(split_f[2])
max = min + 8 if len(split_f) < 4 else int(split_f[3])
count = generate_count(min, max)
words = []
for _ in range(count):
word_len = random.randrange(3, 10)
words.append("".join([random.choice(string.ascii_letters + string.digits) for x in range(word_len)]))
return_val = " ".join(words)
elif field_type == "dict":
global _dict_data
min = 2 if len(split_f) < 3 else int(split_f[2])
max = min + 8 if len(split_f) < 4 else int(split_f[3])
count = generate_count(min, max)
return_val = " ".join([random.choice(_dict_data).strip() for _ in range(count)])
elif field_type == "text":
text = ["text1", "text2", "text3"] if len(split_f) < 3 else split_f[2].split("-")
min = 1 if len(split_f) < 4 else int(split_f[3])
max = min + 1 if len(split_f) < 5 else int(split_f[4])
count = generate_count(min, max)
words = []
for _ in range(count):
words.append(""+random.choice(text))
return_val = " ".join(words)
return field_name, return_val
def generate_count(min, max):
if min == max:
return max
elif min > max:
return random.randrange(max, min);
else:
return random.randrange(min, max);
def generate_random_doc(format):
global id_counter
res = {}
for f in format:
f_key, f_val = get_data_for_format(f)
if f_key:
res[f_key] = f_val
if not tornado.options.options.id_type:
return res
if tornado.options.options.id_type == 'int':
res['_id'] = id_counter
id_counter += 1
elif tornado.options.options.id_type == 'uuid4':
res['_id'] = str(uuid.uuid4())
return res
def set_index_refresh(val):
params = {"index": {"refresh_interval": val}}
body = json.dumps(params)
url = "%s/%s/_settings" % (tornado.options.options.es_url, tornado.options.options.index_name)
try:
request = tornado.httpclient.HTTPRequest(url, headers=headers, method="PUT", body=body, request_timeout=240, auth_username=tornado.options.options.username, auth_password=tornado.options.options.password, validate_cert=tornado.options.options.validate_cert)
http_client = tornado.httpclient.HTTPClient()
http_client.fetch(request)
logging.info('Set index refresh to %s' % val)
except Exception as ex:
logging.exception(ex)
@tornado.gen.coroutine
def generate_test_data():
global upload_data_count
if tornado.options.options.force_init_index:
delete_index(tornado.options.options.index_name)
create_index(tornado.options.options.index_name)
if tornado.options.options.set_refresh:
set_index_refresh("-1")
if tornado.options.options.out_file:
out_file = open(tornado.options.options.out_file, "w")
else:
out_file = None
if tornado.options.options.dict_file:
global _dict_data
with open(tornado.options.options.dict_file, 'r') as f:
_dict_data = f.readlines()
logging.info("Loaded %d words from the %s" % (len(_dict_data), tornado.options.options.dict_file))
format = tornado.options.options.format.split(',')
if not format:
logging.error('invalid format')
exit(1)
ts_start = int(time.time())
upload_data_txt = ""
logging.info("Generating %d docs, upload batch size is %d" % (tornado.options.options.count,
tornado.options.options.batch_size))
for num in range(0, tornado.options.options.count):
item = generate_random_doc(format)
if out_file:
out_file.write("%s\n" % json.dumps(item))
cmd = {'index': {'_index': tornado.options.options.index_name,
'_type': tornado.options.options.index_type}}
if '_id' in item:
cmd['index']['_id'] = item['_id']
upload_data_txt += json.dumps(cmd) + "\n"
upload_data_txt += json.dumps(item) + "\n"
upload_data_count += 1
if upload_data_count % tornado.options.options.batch_size == 0:
yield upload_batch(upload_data_txt)
upload_data_txt = ""
if upload_data_txt:
yield upload_batch(upload_data_txt)
if tornado.options.options.set_refresh:
set_index_refresh("1s")
if out_file:
out_file.close()
took_secs = int(time.time() - ts_start)
logging.info("Done - total docs uploaded: %d, took %d seconds" % (tornado.options.options.count, took_secs))
if __name__ == '__main__':
tornado.options.define("es_url", type=str, default='http://localhost:9200/', help="URL of your Elasticsearch node")
tornado.options.define("index_name", type=str, default='test_data', help="Name of the index to store your messages")
tornado.options.define("index_type", type=str, default='test_type', help="Type")
tornado.options.define("batch_size", type=int, default=1000, help="Elasticsearch bulk index batch size")
tornado.options.define("num_of_shards", type=int, default=2, help="Number of shards for ES index")
tornado.options.define("http_upload_timeout", type=int, default=3, help="Timeout in seconds when uploading data")
tornado.options.define("count", type=int, default=100000, help="Number of docs to generate")
tornado.options.define("format", type=str, default='name:str,age:int,last_updated:ts', help="message format")
tornado.options.define("num_of_replicas", type=int, default=0, help="Number of replicas for ES index")
tornado.options.define("force_init_index", type=bool, default=False, help="Force deleting and re-initializing the Elasticsearch index")
tornado.options.define("set_refresh", type=bool, default=False, help="Set refresh rate to -1 before starting the upload")
tornado.options.define("out_file", type=str, default=False, help="If set, write test data to out_file as well.")
tornado.options.define("id_type", type=str, default=None, help="Type of 'id' to use for the docs, valid settings are int and uuid4, None is default")
tornado.options.define("dict_file", type=str, default=None, help="Name of dictionary file to use")
tornado.options.define("username", type=str, default=None, help="Username for elasticsearch")
tornado.options.define("password", type=str, default=None, help="Password for elasticsearch")
tornado.options.define("validate_cert", type=bool, default=True, help="SSL validate_cert for requests. Use false for self-signed certificates.")
tornado.options.parse_command_line()
tornado.ioloop.IOLoop.instance().run_sync(generate_test_data)
| true | true |
1c379ecbd15812a9820a9f9718e729fc0870bf76 | 2,178 | py | Python | test/lib/git.py | morgante/cnrm-blueprints | 34453c4acde2cd321f71b76b3e6c6b086bc8ada1 | [
"Apache-2.0"
] | 9 | 2020-07-10T18:20:19.000Z | 2021-10-08T23:58:06.000Z | test/lib/git.py | morgante/cnrm-blueprints | 34453c4acde2cd321f71b76b3e6c6b086bc8ada1 | [
"Apache-2.0"
] | 1 | 2021-03-17T19:20:27.000Z | 2021-03-17T19:20:27.000Z | test/lib/git.py | isabella232/cnrm-blueprints | 19d7c459c4f71198208282da17bcade53d28cc9c | [
"Apache-2.0"
] | 4 | 2020-07-10T23:22:20.000Z | 2021-09-27T19:27:02.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import downloads
import os
class Git(object):
def __init__(self, user_email, user_name, env=None):
if env is None:
env = os.environ.copy()
self.bin = "git"
self.env = env
downloads.exec(["chmod", "600", "/root/.ssh/id_rsa"])
downloads.exec(["git", "config", "--global", "user.email", user_email])
downloads.exec(["git", "config", "--global", "user.name", user_name])
def __repr__(self):
return "Git:" + downloads.exec(["which", "git"])
def clone(self, repo, directory):
downloads.exec(["git", "clone", "--recursive", repo, directory])
self.statedir = directory
def checkout(self, branch):
self.exec(["checkout", branch])
def commit_and_push(self, branch, file, msg):
self.exec(["add", file])
self.exec(["commit", "-m", msg])
self.exec(["push", "origin", "HEAD:%s" % branch])
def create_remote_tag(self, tag):
self.exec(["tag", tag])
self.exec(["push", "origin", tag])
def get_commit_message(self, commit_hash):
return self.exec(["show", "--pretty=format:%s", "-s", commit_hash])
def get_last_commit_hash(self):
return self.exec(["rev-parse", "HEAD"])
# return a list of changed file paths
def get_changed_files(self, revision):
files = self.exec(
["show", "--pretty=", "--name-only", revision]).lstrip().rstrip()
return files.split("\n")
def exec(self, args):
return downloads.exec(
[self.bin] + args, cwd=self.statedir, env=self.env
).strip()
| 34.571429 | 79 | 0.624885 |
import downloads
import os
class Git(object):
def __init__(self, user_email, user_name, env=None):
if env is None:
env = os.environ.copy()
self.bin = "git"
self.env = env
downloads.exec(["chmod", "600", "/root/.ssh/id_rsa"])
downloads.exec(["git", "config", "--global", "user.email", user_email])
downloads.exec(["git", "config", "--global", "user.name", user_name])
def __repr__(self):
return "Git:" + downloads.exec(["which", "git"])
def clone(self, repo, directory):
downloads.exec(["git", "clone", "--recursive", repo, directory])
self.statedir = directory
def checkout(self, branch):
self.exec(["checkout", branch])
def commit_and_push(self, branch, file, msg):
self.exec(["add", file])
self.exec(["commit", "-m", msg])
self.exec(["push", "origin", "HEAD:%s" % branch])
def create_remote_tag(self, tag):
self.exec(["tag", tag])
self.exec(["push", "origin", tag])
def get_commit_message(self, commit_hash):
return self.exec(["show", "--pretty=format:%s", "-s", commit_hash])
def get_last_commit_hash(self):
return self.exec(["rev-parse", "HEAD"])
def get_changed_files(self, revision):
files = self.exec(
["show", "--pretty=", "--name-only", revision]).lstrip().rstrip()
return files.split("\n")
def exec(self, args):
return downloads.exec(
[self.bin] + args, cwd=self.statedir, env=self.env
).strip()
| true | true |
1c379f073a5a5d7623ac8b55b3bcc6bed55eeb70 | 8,763 | py | Python | maple/maple/spiders/news.py | honmaple/maple-spider | b9b6b295114149436974f4fe82f75dc7f2797129 | [
"MIT"
] | null | null | null | maple/maple/spiders/news.py | honmaple/maple-spider | b9b6b295114149436974f4fe82f75dc7f2797129 | [
"MIT"
] | null | null | null | maple/maple/spiders/news.py | honmaple/maple-spider | b9b6b295114149436974f4fe82f75dc7f2797129 | [
"MIT"
] | 1 | 2019-04-20T03:22:26.000Z | 2019-04-20T03:22:26.000Z | #!/usr/bin/env python
# -*- coding=UTF-8 -*-
#*************************************************************************
# Copyright © 2015 JiangLin. All rights reserved.
# File Name: news.py
# Author:JiangLin
# Mail:xiyang0807@gmail.com
# Created Time: 2016-04-03 23:02:32
# Last Update: 星期四 2016-4-7 12:57:8 (CST)
# By: jianglin
# Description: 爬取学校新闻
#*************************************************************************
import scrapy
from maple.items import NewsItem
from scrapy.http import Request
from scrapy.selector import Selector
from datetime import datetime
from maple.models import News, DBSession
session = DBSession()
def exsit_session(url):
a = session.query(News.url).filter_by(url=url).first()
if not a:
return False
else:
return True
class NewsSpider(scrapy.spiders.Spider):
name = "news"
allowed_domains = ["202.119.112.75"]
start_urls = []
for page in range(1, 3):
url = 'http://202.119.112.75/s/2001/t/2016/p/5/i/%d/list.htm' % page
start_urls.append(url)
def parse_item(self, response):
p1 = response.xpath('//td[contains(@class, "content")]/p')
p2 = response.xpath('//td[contains(@class, "content")]/div')
p = p1 or p2
item = response.meta['item']
content = ''
for text in p:
c1 = text.xpath('text()').extract()
c2 = text.xpath('*/text()').extract()
c3 = text.xpath('*/*/text()').extract()
c4 = text.xpath('*/*/*/text()').extract()
c = c1 + c2 + c3 + c4
for i in c:
con = i + '\n'
content += con
item['content'] = content
item['category'] = 'hhuc'
return item
def parse(self, response):
sites = response.xpath('//table[contains(@class, "columnStyle")]/tr')
items = []
for site in sites:
item = NewsItem()
title = site.xpath('td[1]/a/font/text()').extract()
url = site.xpath('td[1]/a/@href').extract()
time = site.xpath('td[2]/text()').extract()
if len(title) == 1:
item['title'] = title[0]
if len(url) == 1:
item['url'] = 'http://202.119.112.75' + url[0]
if len(time) == 1:
date_time = datetime.strptime(time[0], '%Y-%m-%d')
item['time'] = date_time
items.append(item)
for item in items:
if not exsit_session(item['url']):
yield Request(item['url'],
meta={'item': item},
callback=self.parse_item)
class BsSpider(scrapy.spiders.Spider):
name = "bs"
allowed_domains = ["bs.hhuc.edu.cn"]
start_urls = []
for page in range(1, 3):
url = 'http://bs.hhuc.edu.cn/s/2039/t/2371/p/3/i/%d/list.htm' % page
start_urls.append(url)
def parse_item(self, response):
p1 = response.xpath('//td[contains(@class, "content")]/p')
p2 = response.xpath('//td[contains(@class, "content")]/div')
p = p1 or p2
item = response.meta['item']
content = ''
for text in p:
c1 = text.xpath('text()').extract()
c2 = text.xpath('*/text()').extract()
c3 = text.xpath('*/*/text()').extract()
c4 = text.xpath('*/*/*/text()').extract()
c = c1 + c2 + c3 + c4
for i in c:
con = i + '\n'
content += con
item['content'] = content
item['category'] = 'bs'
return item
def parse(self, response):
sites = response.xpath('//table[contains(@class, "columnStyle")]/tr')
items = []
for site in sites:
item = NewsItem()
title = site.xpath('td[1]/a/font/text()').extract()
url = site.xpath('td[1]/a/@href').extract()
time = site.xpath('td[2]/text()').extract()
if len(title) == 1:
item['title'] = title[0]
if len(url) == 1:
item['url'] = 'http://bs.hhuc.edu.cn' + url[0]
if len(time) == 1:
date_time = datetime.strptime(time[0], '%Y-%m-%d')
item['time'] = date_time
items.append(item)
for item in items:
if not exsit_session(item['url']):
yield Request(item['url'],
meta={'item': item},
callback=self.parse_item)
class WulwxySpider(scrapy.spiders.Spider):
name = "wulwxy"
allowed_domains = ["wulwxy.hhuc.edu.cn"]
start_urls = []
for page in range(1, 3):
url = 'http://wulwxy.hhuc.edu.cn/s/2059/t/2561/p/4/i/%d/list.htm' % page
start_urls.append(url)
def parse_item(self, response):
p1 = response.xpath('//td[contains(@height, "400")]/p')
p2 = response.xpath('//td[contains(@height, "400")]/div')
p = p1 or p2
item = response.meta['item']
content = ''
for text in p:
c1 = text.xpath('text()').extract()
c2 = text.xpath('*/text()').extract()
c3 = text.xpath('*/*/text()').extract()
c4 = text.xpath('*/*/*/text()').extract()
c = c1 + c2 + c3 + c4
for i in c:
content += i
print(content)
item['content'] = content
item['category'] = 'wulwxy'
return item
def parse(self, response):
sites = response.xpath('//table[contains(@class, "columnStyle")]/tr')
items = []
for site in sites:
item = NewsItem()
title = site.xpath('td[1]/a/font/text()').extract()
url = site.xpath('td[1]/a/@href').extract()
time = site.xpath('td[2]/text()').extract()
if len(title) == 1:
item['title'] = title[0]
if len(url) == 1:
item['url'] = 'http://wulwxy.hhuc.edu.cn' + url[0]
if len(time) == 1:
date_time = datetime.strptime(time[0], '%Y-%m-%d')
item['time'] = date_time
items.append(item)
for item in items:
if not exsit_session(item['url']):
yield Request(item['url'],
meta={'item': item},
callback=self.parse_item)
class JidianSpider(scrapy.spiders.Spider):
name = "jidian"
allowed_domains = ["jidian.hhuc.edu.cn"]
start_urls = []
for page in range(1, 3):
url = 'http://jidian.hhuc.edu.cn/s/2029/t/2608/p/3/i/%d/list.htm' % page
start_urls.append(url)
def parse_item(self, response):
try:
p1 = response.xpath('//table[contains(@width, "98%")]\
/tr/td[contains(@valign,"top")]/p')
p2 = response.xpath('//table[contains(@width, "98%")]\
/tr/td[contains(@valign,"top")]/div')
except:
hxs = Selector(text=response.body)
p1 = hxs.xpath('//table[contains(@width, "98%")]\
/tr/td[contains(@valign,"top")]/p')
p2 = hxs.xpath('//table[contains(@width, "98%")]\
/tr/td[contains(@valign,"top")]/div')
p = p1 or p2
item = response.meta['item']
content = ''
for text in p:
c1 = text.xpath('text()').extract()
c2 = text.xpath('*/text()').extract()
c3 = text.xpath('*/*/text()').extract()
c4 = text.xpath('*/*/*/text()').extract()
c = c1 + c2 + c3 + c4
for i in c:
con = i + '\n'
content += con
print(content)
item['content'] = content
item['category'] = 'jidian'
return item
def parse(self, response):
sites = response.xpath('//table[contains(@class, "columnStyle")]/tr')
items = []
for site in sites:
item = NewsItem()
title = site.xpath('td[1]/a/font/text()').extract()
url = site.xpath('td[1]/a/@href').extract()
time = site.xpath('td[2]/text()').extract()
if len(title) == 1:
item['title'] = title[0]
if len(url) == 1:
item['url'] = 'http://jidian.hhuc.edu.cn' + url[0]
if len(time) == 1:
date_time = datetime.strptime(time[0], '%Y-%m-%d')
item['time'] = date_time
items.append(item)
for item in items:
if not exsit_session(item['url']):
yield Request(item['url'],
meta={'item': item},
callback=self.parse_item)
| 36.665272 | 80 | 0.479516 |
import scrapy
from maple.items import NewsItem
from scrapy.http import Request
from scrapy.selector import Selector
from datetime import datetime
from maple.models import News, DBSession
session = DBSession()
def exsit_session(url):
a = session.query(News.url).filter_by(url=url).first()
if not a:
return False
else:
return True
class NewsSpider(scrapy.spiders.Spider):
name = "news"
allowed_domains = ["202.119.112.75"]
start_urls = []
for page in range(1, 3):
url = 'http://202.119.112.75/s/2001/t/2016/p/5/i/%d/list.htm' % page
start_urls.append(url)
def parse_item(self, response):
p1 = response.xpath('//td[contains(@class, "content")]/p')
p2 = response.xpath('//td[contains(@class, "content")]/div')
p = p1 or p2
item = response.meta['item']
content = ''
for text in p:
c1 = text.xpath('text()').extract()
c2 = text.xpath('*/text()').extract()
c3 = text.xpath('*/*/text()').extract()
c4 = text.xpath('*/*/*/text()').extract()
c = c1 + c2 + c3 + c4
for i in c:
con = i + '\n'
content += con
item['content'] = content
item['category'] = 'hhuc'
return item
def parse(self, response):
sites = response.xpath('//table[contains(@class, "columnStyle")]/tr')
items = []
for site in sites:
item = NewsItem()
title = site.xpath('td[1]/a/font/text()').extract()
url = site.xpath('td[1]/a/@href').extract()
time = site.xpath('td[2]/text()').extract()
if len(title) == 1:
item['title'] = title[0]
if len(url) == 1:
item['url'] = 'http://202.119.112.75' + url[0]
if len(time) == 1:
date_time = datetime.strptime(time[0], '%Y-%m-%d')
item['time'] = date_time
items.append(item)
for item in items:
if not exsit_session(item['url']):
yield Request(item['url'],
meta={'item': item},
callback=self.parse_item)
class BsSpider(scrapy.spiders.Spider):
name = "bs"
allowed_domains = ["bs.hhuc.edu.cn"]
start_urls = []
for page in range(1, 3):
url = 'http://bs.hhuc.edu.cn/s/2039/t/2371/p/3/i/%d/list.htm' % page
start_urls.append(url)
def parse_item(self, response):
p1 = response.xpath('//td[contains(@class, "content")]/p')
p2 = response.xpath('//td[contains(@class, "content")]/div')
p = p1 or p2
item = response.meta['item']
content = ''
for text in p:
c1 = text.xpath('text()').extract()
c2 = text.xpath('*/text()').extract()
c3 = text.xpath('*/*/text()').extract()
c4 = text.xpath('*/*/*/text()').extract()
c = c1 + c2 + c3 + c4
for i in c:
con = i + '\n'
content += con
item['content'] = content
item['category'] = 'bs'
return item
def parse(self, response):
sites = response.xpath('//table[contains(@class, "columnStyle")]/tr')
items = []
for site in sites:
item = NewsItem()
title = site.xpath('td[1]/a/font/text()').extract()
url = site.xpath('td[1]/a/@href').extract()
time = site.xpath('td[2]/text()').extract()
if len(title) == 1:
item['title'] = title[0]
if len(url) == 1:
item['url'] = 'http://bs.hhuc.edu.cn' + url[0]
if len(time) == 1:
date_time = datetime.strptime(time[0], '%Y-%m-%d')
item['time'] = date_time
items.append(item)
for item in items:
if not exsit_session(item['url']):
yield Request(item['url'],
meta={'item': item},
callback=self.parse_item)
class WulwxySpider(scrapy.spiders.Spider):
name = "wulwxy"
allowed_domains = ["wulwxy.hhuc.edu.cn"]
start_urls = []
for page in range(1, 3):
url = 'http://wulwxy.hhuc.edu.cn/s/2059/t/2561/p/4/i/%d/list.htm' % page
start_urls.append(url)
def parse_item(self, response):
p1 = response.xpath('//td[contains(@height, "400")]/p')
p2 = response.xpath('//td[contains(@height, "400")]/div')
p = p1 or p2
item = response.meta['item']
content = ''
for text in p:
c1 = text.xpath('text()').extract()
c2 = text.xpath('*/text()').extract()
c3 = text.xpath('*/*/text()').extract()
c4 = text.xpath('*/*/*/text()').extract()
c = c1 + c2 + c3 + c4
for i in c:
content += i
print(content)
item['content'] = content
item['category'] = 'wulwxy'
return item
def parse(self, response):
sites = response.xpath('//table[contains(@class, "columnStyle")]/tr')
items = []
for site in sites:
item = NewsItem()
title = site.xpath('td[1]/a/font/text()').extract()
url = site.xpath('td[1]/a/@href').extract()
time = site.xpath('td[2]/text()').extract()
if len(title) == 1:
item['title'] = title[0]
if len(url) == 1:
item['url'] = 'http://wulwxy.hhuc.edu.cn' + url[0]
if len(time) == 1:
date_time = datetime.strptime(time[0], '%Y-%m-%d')
item['time'] = date_time
items.append(item)
for item in items:
if not exsit_session(item['url']):
yield Request(item['url'],
meta={'item': item},
callback=self.parse_item)
class JidianSpider(scrapy.spiders.Spider):
name = "jidian"
allowed_domains = ["jidian.hhuc.edu.cn"]
start_urls = []
for page in range(1, 3):
url = 'http://jidian.hhuc.edu.cn/s/2029/t/2608/p/3/i/%d/list.htm' % page
start_urls.append(url)
def parse_item(self, response):
try:
p1 = response.xpath('//table[contains(@width, "98%")]\
/tr/td[contains(@valign,"top")]/p')
p2 = response.xpath('//table[contains(@width, "98%")]\
/tr/td[contains(@valign,"top")]/div')
except:
hxs = Selector(text=response.body)
p1 = hxs.xpath('//table[contains(@width, "98%")]\
/tr/td[contains(@valign,"top")]/p')
p2 = hxs.xpath('//table[contains(@width, "98%")]\
/tr/td[contains(@valign,"top")]/div')
p = p1 or p2
item = response.meta['item']
content = ''
for text in p:
c1 = text.xpath('text()').extract()
c2 = text.xpath('*/text()').extract()
c3 = text.xpath('*/*/text()').extract()
c4 = text.xpath('*/*/*/text()').extract()
c = c1 + c2 + c3 + c4
for i in c:
con = i + '\n'
content += con
print(content)
item['content'] = content
item['category'] = 'jidian'
return item
def parse(self, response):
sites = response.xpath('//table[contains(@class, "columnStyle")]/tr')
items = []
for site in sites:
item = NewsItem()
title = site.xpath('td[1]/a/font/text()').extract()
url = site.xpath('td[1]/a/@href').extract()
time = site.xpath('td[2]/text()').extract()
if len(title) == 1:
item['title'] = title[0]
if len(url) == 1:
item['url'] = 'http://jidian.hhuc.edu.cn' + url[0]
if len(time) == 1:
date_time = datetime.strptime(time[0], '%Y-%m-%d')
item['time'] = date_time
items.append(item)
for item in items:
if not exsit_session(item['url']):
yield Request(item['url'],
meta={'item': item},
callback=self.parse_item)
| true | true |
1c37a06c7473a70641c7ae6eeef966e4e43240bc | 16,122 | py | Python | src/TestRailAPIClient.py | ezywebs/robotframework-testrail-extended | 7797905257a590e9764c07a915de2dcbbde1e850 | [
"Apache-2.0"
] | null | null | null | src/TestRailAPIClient.py | ezywebs/robotframework-testrail-extended | 7797905257a590e9764c07a915de2dcbbde1e850 | [
"Apache-2.0"
] | null | null | null | src/TestRailAPIClient.py | ezywebs/robotframework-testrail-extended | 7797905257a590e9764c07a915de2dcbbde1e850 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from requests import post, get
from typing import Any, cast, Dict, List, Optional, Sequence, Union
DEFAULT_TESTRAIL_HEADERS = {'Content-Type': 'application/json'}
TESTRAIL_STATUS_ID_PASSED = 1
# custom types
JsonDict = Dict[str, Any] # noqa: E993
JsonList = List[JsonDict] # noqa: E993
Id = Union[str, int] # noqa: E993
class TestRailAPIClient(object):
"""Library for working with [http://www.gurock.com/testrail/ | TestRail].
== Dependencies ==
| requests | https://pypi.python.org/pypi/requests |
== Preconditions ==
1. [ http://docs.gurock.com/testrail-api2/introduction | Enable TestRail API]
"""
def __init__(self, server: str, user: str, password: str, run_id: Id = None, protocol: str = 'http') -> None:
"""Create TestRailAPIClient instance.
*Args:*\n
_server_ - name of TestRail server;\n
_user_ - name of TestRail user;\n
_password_ - password of TestRail user;\n
_run_id_ - ID of the test run;\n
_protocol_ - connecting protocol to TestRail server: http or https.
"""
self._url = '{protocol}://{server}/testrail/index.php?/api/v2/'.format(protocol=protocol, server=server)
self._user = user
self._password = password
if run_id is not None:
self.run_id = run_id
def _send_post(self, uri: str, data: Dict[str, Any]) -> Union[JsonList, JsonDict]:
"""Perform post request to TestRail.
*Args:* \n
_uri_ - URI for test case;\n
_data_ - json with test result.
*Returns:* \n
Request result in json format.
"""
url = self._url + uri
response = post(url, json=data, auth=(self._user, self._password), verify=False)
response.raise_for_status()
return response.json()
def _send_get(self, uri: str, headers: Dict[str, str] = None,
params: Dict[str, Any] = None) -> Union[JsonList, JsonDict]:
"""Perform get request to TestRail.
*Args:* \n
_uri_ - URI for test case;\n
_headers_ - headers for http-request;\n
_params_ - parameters for http-request.
*Returns:* \n
Request result in json format.
"""
url = self._url + uri
response = get(url, headers=headers, params=params, auth=(self._user, self._password), verify=False)
response.raise_for_status()
return response.json()
def get_tests(self, run_id: Id, status_ids: Union[str, Sequence[int]] = None) -> JsonList:
"""Get tests from TestRail test run by run_id.
*Args:* \n
_run_id_ - ID of the test run;\n
_status_ids_ - list of the required test statuses.
*Returns:* \n
Tests information in json format.
"""
uri = 'get_tests/{run_id}'.format(run_id=run_id)
if status_ids:
status_ids = ','.join(str(status_id) for status_id in status_ids)
params = {
'status_id': status_ids
}
response = self._send_get(uri=uri, headers=DEFAULT_TESTRAIL_HEADERS, params=params)
return cast(JsonList, response)
def get_results_for_case(self, run_id: Id, case_id: Id, limit: int = None) -> JsonList:
"""Get results for case by run_id and case_id.
*Args:* \n
_run_id_ - ID of the test run;\n
_case_id_ - ID of the test case;\n
_limit_ - limit of case results.
*Returns:* \n
Cases results in json format.
"""
uri = 'get_results_for_case/{run_id}/{case_id}'.format(run_id=run_id, case_id=case_id)
params = {
'limit': limit
}
response = self._send_get(uri=uri, headers=DEFAULT_TESTRAIL_HEADERS, params=params)
return cast(JsonList, response)
def add_result_for_case(self, run_id: Id, case_id: Id,
test_result_fields: Dict[str, Union[str, int]]) -> None:
"""Add results for case in TestRail test run by run_id and case_id.
*Supported request fields for test result:*\n
| *Name* | *Type* | *Description* |
| status_id | int | The ID of the test status |
| comment | string | The comment / description for the test result |
| version | string | The version or build you tested against |
| elapsed | timespan | The time it took to execute the test, e.g. "30s" or "1m 45s" |
| defects | string | A comma-separated list of defects to link to the test result |
| assignedto_id | int | The ID of a user the test should be assigned to |
| Custom fields are supported as well and must be submitted with their system name, prefixed with 'custom_' |
*Args:* \n
_run_id_ - ID of the test run;\n
_case_id_ - ID of the test case;\n
_test_result_fields_ - result of the test fields dictionary.
*Example:*\n
| Add Result For Case | run_id=321 | case_id=123| test_result={'status_id': 3, 'comment': 'This test is untested', 'defects': 'DEF-123'} |
"""
uri = 'add_result_for_case/{run_id}/{case_id}'.format(run_id=run_id, case_id=case_id)
self._send_post(uri, test_result_fields)
def get_statuses(self) -> JsonList:
"""Get test statuses information from TestRail.
*Returns:* \n
Statuses information in json format.
"""
uri = 'get_statuses'
response = self._send_get(uri=uri, headers=DEFAULT_TESTRAIL_HEADERS)
return cast(JsonList, response)
def update_case(self, case_id: Id, request_fields: Dict[str, Union[str, int, None]]) -> JsonDict:
"""Update an existing test case in TestRail.
*Supported request fields:*\n
| *Name* | *Type* | *Description* |
| title | string | The title of the test case (required) |
| template_id | int | The ID of the template (field layout) (requires TestRail 5.2 or later) |
| type_id | int | The ID of the case type |
| priority_id | int | The ID of the case priority |
| estimate | timespan | The estimate, e.g. "30s" or "1m 45s" |
| milestone_id | int | The ID of the milestone to link to the test case |
| refs | string | A comma-separated list of references/requirements |
| Custom fields are supported as well and must be submitted with their system name, prefixed with 'custom_' |
*Args:* \n
_case_id_ - ID of the test case;\n
_request_fields_ - request fields dictionary.
*Returns:* \n
Case information in json format.
*Example:*\n
| Update Case | case_id=213 | request_fields={'title': name, 'type_id': 1, 'custom_case_description': description, 'refs': references} |
"""
uri = 'update_case/{case_id}'.format(case_id=case_id)
response = self._send_post(uri, request_fields)
return cast(JsonDict, response)
def get_status_id_by_status_label(self, status_label: str) -> int:
"""Get test status id by status label.
*Args:* \n
_status_label_ - status label of the tests.
*Returns:* \n
Test status ID.
"""
statuses_info = self.get_statuses()
for status in statuses_info:
if status['label'].lower() == status_label.lower():
return status['id']
raise Exception(u"There is no status with label \'{}\' in TestRail".format(status_label))
def get_test_status_id_by_case_id(self, run_id: Id, case_id: Id) -> Optional[int]:
"""Get test last status id by case id.
If there is no last test result returns None.
*Args:* \n
_run_id_ - ID of the test run;\n
_case_id_ - ID of the test case.
*Returns:* \n
Test status ID.
"""
last_case_result = self.get_results_for_case(run_id=run_id, case_id=case_id, limit=1)
return last_case_result[0]['status_id'] if last_case_result else None
def get_project(self, project_id: Id) -> JsonDict:
"""Get project info by project id.
*Args:* \n
_project_id_ - ID of the project.
*Returns:* \n
Request result in json format.
"""
uri = 'get_project/{project_id}'.format(project_id=project_id)
response = self._send_get(uri=uri, headers=DEFAULT_TESTRAIL_HEADERS)
return cast(JsonDict, response)
def get_suite(self, suite_id: Id) -> JsonDict:
"""Get suite info by suite id.
*Args:* \n
_suite_id_ - ID of the test suite.
*Returns:* \n
Request result in json format.
"""
uri = 'get_suite/{suite_id}'.format(suite_id=suite_id)
response = self._send_get(uri=uri, headers=DEFAULT_TESTRAIL_HEADERS)
return cast(JsonDict, response)
def get_section(self, section_id: Id) -> JsonDict:
"""Get section info by section id.
*Args:* \n
_section_id_ - ID of the section.
*Returns:* \n
Request result in json format.
"""
uri = 'get_section/{section_id}'.format(section_id=section_id)
response = self._send_get(uri=uri, headers=DEFAULT_TESTRAIL_HEADERS)
return cast(JsonDict, response)
def add_section(self, project_id: Id, name: str, suite_id: Id = None, parent_id: Id = None,
description: str = None) -> JsonDict:
"""Creates a new section.
*Args:* \n
_project_id_ - ID of the project;\n
_name_ - name of the section;\n
_suite_id_ - ID of the test suite(ignored if the project is operating in single suite mode);\n
_parent_id_ - ID of the parent section (to build section hierarchies);\n
_description_ - description of the section.
*Returns:* \n
New section information.
"""
uri = 'add_section/{project_id}'.format(project_id=project_id)
data: Dict[str, Union[int, str]] = {'name': name}
if suite_id is not None:
data['suite_id'] = suite_id
if parent_id is not None:
data['parent_id'] = parent_id
if description is not None:
data['description'] = description
response = self._send_post(uri=uri, data=data)
return cast(JsonDict, response)
def get_sections(self, project_id: Id, suite_id: Id) -> JsonList:
"""Returns existing sections.
*Args:* \n
_project_id_ - ID of the project;\n
_suite_id_ - ID of the test suite.
*Returns:* \n
Information about section.
"""
uri = 'get_sections/{project_id}&suite_id={suite_id}'.format(project_id=project_id, suite_id=suite_id)
response = self._send_get(uri=uri, headers=DEFAULT_TESTRAIL_HEADERS)
return cast(JsonList, response)
def get_case(self, case_id: Id) -> JsonDict:
"""Get case info by case id.
*Args:* \n
_case_id_ - ID of the test case.
*Returns:* \n
Request result in json format.
"""
uri = 'get_case/{case_id}'.format(case_id=case_id)
response = self._send_get(uri=uri, headers=DEFAULT_TESTRAIL_HEADERS)
return cast(JsonDict, response)
def get_cases(self, project_id: Id, suite_id: Id = None, section_id: Id = None) -> JsonList:
"""Returns a list of test cases for a test suite or specific section in a test suite.
*Args:* \n
_project_id_ - ID of the project;\n
_suite_id_ - ID of the test suite (optional if the project is operating in single suite mode);\n
_section_id_ - ID of the section (optional).
*Returns:* \n
Information about test cases in section.
"""
uri = 'get_cases/{project_id}'.format(project_id=project_id)
params = {'project_id': project_id}
if suite_id is not None:
params['suite_id'] = suite_id
if section_id is not None:
params['section_id'] = section_id
response = self._send_get(uri=uri, headers=DEFAULT_TESTRAIL_HEADERS, params=params)
return cast(JsonList, response)
def add_case(self, section_id: Id, title: str, steps: List[Dict[str, str]], description: str, refs: str,
type_id: Id, priority_id: Id, **additional_data: Any) -> JsonDict:
"""Creates a new test case.
*Args:* \n
_section_id_ - ID of the section;\n
_title_ - title of the test case;\n
_steps_ - test steps;\n
_description_ - test description;\n
_refs_ - comma-separated list of references;\n
_type_id_ - ID of the case type;\n
_priority_id_ - ID of the case priority;\n
_additional_data_ - additional parameters.
*Returns:* \n
Information about new test case.
"""
uri = 'add_case/{section_id}'.format(section_id=section_id)
data = {
'title': title,
'custom_case_description': description,
'custom_steps_separated': steps,
'refs': refs,
'type_id': type_id,
'priority_id': priority_id
}
for key in additional_data:
data[key] = additional_data[key]
response = self._send_post(uri=uri, data=data)
return cast(JsonDict, response)
def add_test_run(self, project_id: Id, suite_id: Id = None, name: str = "Test run"):
"""Adds test run to specified project
*Supported request fields for test result:*\n
| *Name* | *Type* | *Description* |
| suite_id | int | The ID of the test suite for the test run (optional if the project is
operating in single suite mode, required otherwise) |
| name | string | The name of the test run |
| description | string | The description of the test run |
| milestone_id | int | The ID of the milestone to link to the test run |
| assignedto_id | int | The ID of the user the test run should be assigned to |
| include_all | bool | True for including all test cases of the test suite and false for a
custom case selection (default: true) |
| case_ids | array | An array of case IDs for the custom case selection |
| refs | string | A comma-separated list of references/requirements
(Requires TestRail 6.1 or later) |
*Args:* \n
_project_id_ - ID of the project;\n
_suite_id_ - ID of the test suite(ignored if the project is operating in single suite mode);\n
_name_ - name of test run;\n
*Returns:* \n
Test Run information.
"""
uri = 'add_run/{project_id}'.format(project_id=project_id)
data: Dict[str, Union[int, str]] = {'name': name}
if suite_id is not None:
data['suite_id'] = suite_id
response = self._send_post(uri=uri, data=data)
self.run_id = response['id']
return cast(JsonDict, response)
| 42.426316 | 146 | 0.56823 |
from requests import post, get
from typing import Any, cast, Dict, List, Optional, Sequence, Union
DEFAULT_TESTRAIL_HEADERS = {'Content-Type': 'application/json'}
TESTRAIL_STATUS_ID_PASSED = 1
JsonDict = Dict[str, Any]
JsonList = List[JsonDict]
Id = Union[str, int]
class TestRailAPIClient(object):
def __init__(self, server: str, user: str, password: str, run_id: Id = None, protocol: str = 'http') -> None:
self._url = '{protocol}://{server}/testrail/index.php?/api/v2/'.format(protocol=protocol, server=server)
self._user = user
self._password = password
if run_id is not None:
self.run_id = run_id
def _send_post(self, uri: str, data: Dict[str, Any]) -> Union[JsonList, JsonDict]:
url = self._url + uri
response = post(url, json=data, auth=(self._user, self._password), verify=False)
response.raise_for_status()
return response.json()
def _send_get(self, uri: str, headers: Dict[str, str] = None,
params: Dict[str, Any] = None) -> Union[JsonList, JsonDict]:
url = self._url + uri
response = get(url, headers=headers, params=params, auth=(self._user, self._password), verify=False)
response.raise_for_status()
return response.json()
def get_tests(self, run_id: Id, status_ids: Union[str, Sequence[int]] = None) -> JsonList:
uri = 'get_tests/{run_id}'.format(run_id=run_id)
if status_ids:
status_ids = ','.join(str(status_id) for status_id in status_ids)
params = {
'status_id': status_ids
}
response = self._send_get(uri=uri, headers=DEFAULT_TESTRAIL_HEADERS, params=params)
return cast(JsonList, response)
def get_results_for_case(self, run_id: Id, case_id: Id, limit: int = None) -> JsonList:
uri = 'get_results_for_case/{run_id}/{case_id}'.format(run_id=run_id, case_id=case_id)
params = {
'limit': limit
}
response = self._send_get(uri=uri, headers=DEFAULT_TESTRAIL_HEADERS, params=params)
return cast(JsonList, response)
def add_result_for_case(self, run_id: Id, case_id: Id,
test_result_fields: Dict[str, Union[str, int]]) -> None:
uri = 'add_result_for_case/{run_id}/{case_id}'.format(run_id=run_id, case_id=case_id)
self._send_post(uri, test_result_fields)
def get_statuses(self) -> JsonList:
uri = 'get_statuses'
response = self._send_get(uri=uri, headers=DEFAULT_TESTRAIL_HEADERS)
return cast(JsonList, response)
def update_case(self, case_id: Id, request_fields: Dict[str, Union[str, int, None]]) -> JsonDict:
uri = 'update_case/{case_id}'.format(case_id=case_id)
response = self._send_post(uri, request_fields)
return cast(JsonDict, response)
def get_status_id_by_status_label(self, status_label: str) -> int:
statuses_info = self.get_statuses()
for status in statuses_info:
if status['label'].lower() == status_label.lower():
return status['id']
raise Exception(u"There is no status with label \'{}\' in TestRail".format(status_label))
def get_test_status_id_by_case_id(self, run_id: Id, case_id: Id) -> Optional[int]:
last_case_result = self.get_results_for_case(run_id=run_id, case_id=case_id, limit=1)
return last_case_result[0]['status_id'] if last_case_result else None
def get_project(self, project_id: Id) -> JsonDict:
uri = 'get_project/{project_id}'.format(project_id=project_id)
response = self._send_get(uri=uri, headers=DEFAULT_TESTRAIL_HEADERS)
return cast(JsonDict, response)
def get_suite(self, suite_id: Id) -> JsonDict:
uri = 'get_suite/{suite_id}'.format(suite_id=suite_id)
response = self._send_get(uri=uri, headers=DEFAULT_TESTRAIL_HEADERS)
return cast(JsonDict, response)
def get_section(self, section_id: Id) -> JsonDict:
uri = 'get_section/{section_id}'.format(section_id=section_id)
response = self._send_get(uri=uri, headers=DEFAULT_TESTRAIL_HEADERS)
return cast(JsonDict, response)
def add_section(self, project_id: Id, name: str, suite_id: Id = None, parent_id: Id = None,
description: str = None) -> JsonDict:
uri = 'add_section/{project_id}'.format(project_id=project_id)
data: Dict[str, Union[int, str]] = {'name': name}
if suite_id is not None:
data['suite_id'] = suite_id
if parent_id is not None:
data['parent_id'] = parent_id
if description is not None:
data['description'] = description
response = self._send_post(uri=uri, data=data)
return cast(JsonDict, response)
def get_sections(self, project_id: Id, suite_id: Id) -> JsonList:
uri = 'get_sections/{project_id}&suite_id={suite_id}'.format(project_id=project_id, suite_id=suite_id)
response = self._send_get(uri=uri, headers=DEFAULT_TESTRAIL_HEADERS)
return cast(JsonList, response)
def get_case(self, case_id: Id) -> JsonDict:
uri = 'get_case/{case_id}'.format(case_id=case_id)
response = self._send_get(uri=uri, headers=DEFAULT_TESTRAIL_HEADERS)
return cast(JsonDict, response)
def get_cases(self, project_id: Id, suite_id: Id = None, section_id: Id = None) -> JsonList:
uri = 'get_cases/{project_id}'.format(project_id=project_id)
params = {'project_id': project_id}
if suite_id is not None:
params['suite_id'] = suite_id
if section_id is not None:
params['section_id'] = section_id
response = self._send_get(uri=uri, headers=DEFAULT_TESTRAIL_HEADERS, params=params)
return cast(JsonList, response)
def add_case(self, section_id: Id, title: str, steps: List[Dict[str, str]], description: str, refs: str,
type_id: Id, priority_id: Id, **additional_data: Any) -> JsonDict:
uri = 'add_case/{section_id}'.format(section_id=section_id)
data = {
'title': title,
'custom_case_description': description,
'custom_steps_separated': steps,
'refs': refs,
'type_id': type_id,
'priority_id': priority_id
}
for key in additional_data:
data[key] = additional_data[key]
response = self._send_post(uri=uri, data=data)
return cast(JsonDict, response)
def add_test_run(self, project_id: Id, suite_id: Id = None, name: str = "Test run"):
uri = 'add_run/{project_id}'.format(project_id=project_id)
data: Dict[str, Union[int, str]] = {'name': name}
if suite_id is not None:
data['suite_id'] = suite_id
response = self._send_post(uri=uri, data=data)
self.run_id = response['id']
return cast(JsonDict, response)
| true | true |
1c37a07e0ed2366f0d8ed081f69ba7572ae5a7d7 | 1,108 | py | Python | python/paddle_fl/split_learning/core/layer_handler/layer_base.py | kaih70/PaddleFL | 515906e2c61ee90f8a1c3f8e8210aac2f4177a4a | [
"Apache-2.0"
] | 379 | 2019-09-27T14:26:42.000Z | 2022-03-29T14:28:12.000Z | python/paddle_fl/split_learning/core/layer_handler/layer_base.py | Sprate/PaddleFL | 583691acd5db0a7ca331cc9a72415017b18669b8 | [
"Apache-2.0"
] | 132 | 2019-10-16T03:22:03.000Z | 2022-03-23T08:54:29.000Z | python/paddle_fl/split_learning/core/layer_handler/layer_base.py | Sprate/PaddleFL | 583691acd5db0a7ca331cc9a72415017b18669b8 | [
"Apache-2.0"
] | 106 | 2019-09-27T12:47:18.000Z | 2022-03-29T09:07:25.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import numpy as np
import logging
_LOGGER = logging.getLogger(__name__)
class LayerBase(paddle.nn.Layer):
def __init__(self):
super(LayerBase, self).__init__()
@paddle.jit.to_static
def forward(self, **feed):
raise NotImplementedError("Failed to run forward")
def get_fetch_vars(self):
raise NotImplementedError("Failed to get fetch vars")
def get_loss(self, inputs, predict):
raise NotImplementedError("Failed to get loss")
| 31.657143 | 74 | 0.73556 |
import paddle
import numpy as np
import logging
_LOGGER = logging.getLogger(__name__)
class LayerBase(paddle.nn.Layer):
def __init__(self):
super(LayerBase, self).__init__()
@paddle.jit.to_static
def forward(self, **feed):
raise NotImplementedError("Failed to run forward")
def get_fetch_vars(self):
raise NotImplementedError("Failed to get fetch vars")
def get_loss(self, inputs, predict):
raise NotImplementedError("Failed to get loss")
| true | true |
1c37a1b755ce6106a519f07b5dd18552e9e34701 | 2,776 | py | Python | test/datacenters/test_gcp.py | aexvir/the-zoo | 7816afb9a0a26c6058b030b4a987c73e952d92bd | [
"MIT"
] | 90 | 2018-11-20T10:58:24.000Z | 2022-02-19T16:12:46.000Z | test/datacenters/test_gcp.py | kiwicom/the-zoo | fee0108ea7b65112e5b572a146cff4b1c54033fd | [
"MIT"
] | 348 | 2018-11-21T09:22:31.000Z | 2021-11-03T13:45:08.000Z | test/datacenters/test_gcp.py | aexvir/the-zoo | 7816afb9a0a26c6058b030b4a987c73e952d92bd | [
"MIT"
] | 11 | 2018-12-08T18:42:07.000Z | 2021-02-21T06:27:58.000Z | from unittest.mock import MagicMock
import pytest
from zoo.datacenters import gcp as uut
from zoo.datacenters import models
pytestmark = pytest.mark.django_db
def test_gcp_map_to_nodes(mocker):
mocker.patch("zoo.datacenters.utils.gcloud.GCPClient.__init__", return_value=None)
mocker.patch(
"zoo.datacenters.utils.gcloud.GCPClient.get_all_projects",
return_value=[{"projectId": "pid1"}, {"projectId": "pid2"}],
)
mocker.patch(
"zoo.datacenters.utils.gcloud.GCPClient.get_forwarding_rules",
return_value=[
{
"id": "test1",
"loadBalancingScheme": "EXTERNAL",
"IPAddress": "1.1.1.1",
"portRange": "443-443",
},
{
"id": "test2",
"loadBalancingScheme": "INTERNAL",
"IPAddress": "2.2.2.2",
"portRange": "443-443",
},
],
)
mocker.patch(
"zoo.datacenters.utils.GCPClient.get_all_clusters",
return_value=[{"name": "test", "zone": "europe-test"}],
)
mocker.patch(
"zoo.datacenters.utils.kube.KubernetesClient.__init__", return_value=None
)
workload = MagicMock()
image1 = MagicMock()
image2 = MagicMock()
image1.image = "test/image:0.0.1"
image2.image = "test/image2:0.0.2"
workload.metadata.namespace = "namespace-test"
workload.metadata.name = "resource-test"
workload.spec.template.spec.containers = [image1, image2]
mocker.patch(
"zoo.datacenters.utils.kube.KubernetesClient.iter_workloads",
return_value={"test-type": [workload]},
)
uut.map_to_nodes()
root = models.InfraNode.objects.get(kind=models.NodeKind.GCP_ROOT_PROJ)
projects = {project.value: project for project in root.targets.all()}
assert set(projects) == {"pid1", "pid2"}
ctx = "gke_pid1_europe-test_test"
clusters = {
cluster.value: cluster
for cluster in projects["pid1"].targets.filter(
kind=models.NodeKind.GCP_CLUSTER_NAME
)
}
assert set(clusters) == {ctx}
ip_rules = {
cluster.value: cluster
for cluster in projects["pid1"].targets.filter(
kind=models.NodeKind.GCP_IP_RULE_NAME
)
}
assert set(ip_rules) == {"test1:1.1.1.1:443-443"}
workloads = {
workload.value: workload
for workload in clusters["gke_pid1_europe-test_test"].targets.all()
}
full_name = "test-type:namespace-test/resource-test"
assert set(workloads) == {f"{ctx}:{full_name}"}
images = {
image.value: image for image in workloads[f"{ctx}:{full_name}"].targets.all()
}
assert set(images) == {"test/image:0.0.1", "test/image2:0.0.2"}
| 30.173913 | 86 | 0.607709 | from unittest.mock import MagicMock
import pytest
from zoo.datacenters import gcp as uut
from zoo.datacenters import models
pytestmark = pytest.mark.django_db
def test_gcp_map_to_nodes(mocker):
mocker.patch("zoo.datacenters.utils.gcloud.GCPClient.__init__", return_value=None)
mocker.patch(
"zoo.datacenters.utils.gcloud.GCPClient.get_all_projects",
return_value=[{"projectId": "pid1"}, {"projectId": "pid2"}],
)
mocker.patch(
"zoo.datacenters.utils.gcloud.GCPClient.get_forwarding_rules",
return_value=[
{
"id": "test1",
"loadBalancingScheme": "EXTERNAL",
"IPAddress": "1.1.1.1",
"portRange": "443-443",
},
{
"id": "test2",
"loadBalancingScheme": "INTERNAL",
"IPAddress": "2.2.2.2",
"portRange": "443-443",
},
],
)
mocker.patch(
"zoo.datacenters.utils.GCPClient.get_all_clusters",
return_value=[{"name": "test", "zone": "europe-test"}],
)
mocker.patch(
"zoo.datacenters.utils.kube.KubernetesClient.__init__", return_value=None
)
workload = MagicMock()
image1 = MagicMock()
image2 = MagicMock()
image1.image = "test/image:0.0.1"
image2.image = "test/image2:0.0.2"
workload.metadata.namespace = "namespace-test"
workload.metadata.name = "resource-test"
workload.spec.template.spec.containers = [image1, image2]
mocker.patch(
"zoo.datacenters.utils.kube.KubernetesClient.iter_workloads",
return_value={"test-type": [workload]},
)
uut.map_to_nodes()
root = models.InfraNode.objects.get(kind=models.NodeKind.GCP_ROOT_PROJ)
projects = {project.value: project for project in root.targets.all()}
assert set(projects) == {"pid1", "pid2"}
ctx = "gke_pid1_europe-test_test"
clusters = {
cluster.value: cluster
for cluster in projects["pid1"].targets.filter(
kind=models.NodeKind.GCP_CLUSTER_NAME
)
}
assert set(clusters) == {ctx}
ip_rules = {
cluster.value: cluster
for cluster in projects["pid1"].targets.filter(
kind=models.NodeKind.GCP_IP_RULE_NAME
)
}
assert set(ip_rules) == {"test1:1.1.1.1:443-443"}
workloads = {
workload.value: workload
for workload in clusters["gke_pid1_europe-test_test"].targets.all()
}
full_name = "test-type:namespace-test/resource-test"
assert set(workloads) == {f"{ctx}:{full_name}"}
images = {
image.value: image for image in workloads[f"{ctx}:{full_name}"].targets.all()
}
assert set(images) == {"test/image:0.0.1", "test/image2:0.0.2"}
| true | true |
1c37a1c346f1ed4454020911795eff358c3de77d | 2,559 | py | Python | plugins/tff_backend/bizz/todo/investor.py | threefoldfoundation/app_backend | b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a | [
"Apache-2.0"
] | null | null | null | plugins/tff_backend/bizz/todo/investor.py | threefoldfoundation/app_backend | b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a | [
"Apache-2.0"
] | 178 | 2017-08-02T12:58:06.000Z | 2017-12-20T15:01:12.000Z | plugins/tff_backend/bizz/todo/investor.py | threefoldfoundation/app_backend | b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a | [
"Apache-2.0"
] | 2 | 2018-01-10T10:43:12.000Z | 2018-03-18T10:42:23.000Z | # -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
import logging
class InvestorSteps(object):
DOWNLOAD = 'DOWNLOAD'
ITO_INVITES = 'ITO_INVITES'
FLOW_INIT = 'FLOW_INIT'
FLOW_AMOUNT = 'FLOW_AMOUNT'
FLOW_SIGN = 'FLOW_SIGN'
PAY = 'PAY'
PAY_PROCESS = 'PAY_PROCESS'
ASSIGN_TOKENS = 'ASSIGN_TOKENS'
DESCRIPTIONS = {
DOWNLOAD: 'Download the ThreeFold app',
ITO_INVITES: 'Register using an invitation code',
FLOW_INIT: 'Initiate “purchase iTokens” in the TF app',
FLOW_AMOUNT: 'Select currency and how much you want to invest',
FLOW_SIGN: 'Sign the purchase agreement',
PAY: 'We send you payment information',
PAY_PROCESS: 'We process the payment',
ASSIGN_TOKENS: 'Tokens are assigned',
}
@classmethod
def all(cls):
return [cls.DOWNLOAD,
cls.ITO_INVITES,
cls.FLOW_INIT,
cls.FLOW_AMOUNT,
cls.FLOW_SIGN,
cls.PAY,
cls.PAY_PROCESS,
cls.ASSIGN_TOKENS]
@classmethod
def should_archive(cls, step):
return cls.ASSIGN_TOKENS == step or step is None
@classmethod
def get_name_for_step(cls, step):
if step not in cls.DESCRIPTIONS:
logging.error('Investor description for step \'%s\' not set', step)
return cls.DESCRIPTIONS.get(step, step)
@classmethod
def get_progress(cls, last_checked_step):
checked = False
items = []
for step in reversed(cls.all()):
if not checked and step == last_checked_step:
checked = True
item = {
'id': step,
'name': cls.get_name_for_step(step),
'checked': checked
}
items.append(item)
return {
'id': 'investor',
'name': 'Become a token holder',
'items': list(reversed(items))
}
| 30.831325 | 79 | 0.609222 |
import logging
class InvestorSteps(object):
DOWNLOAD = 'DOWNLOAD'
ITO_INVITES = 'ITO_INVITES'
FLOW_INIT = 'FLOW_INIT'
FLOW_AMOUNT = 'FLOW_AMOUNT'
FLOW_SIGN = 'FLOW_SIGN'
PAY = 'PAY'
PAY_PROCESS = 'PAY_PROCESS'
ASSIGN_TOKENS = 'ASSIGN_TOKENS'
DESCRIPTIONS = {
DOWNLOAD: 'Download the ThreeFold app',
ITO_INVITES: 'Register using an invitation code',
FLOW_INIT: 'Initiate “purchase iTokens” in the TF app',
FLOW_AMOUNT: 'Select currency and how much you want to invest',
FLOW_SIGN: 'Sign the purchase agreement',
PAY: 'We send you payment information',
PAY_PROCESS: 'We process the payment',
ASSIGN_TOKENS: 'Tokens are assigned',
}
@classmethod
def all(cls):
return [cls.DOWNLOAD,
cls.ITO_INVITES,
cls.FLOW_INIT,
cls.FLOW_AMOUNT,
cls.FLOW_SIGN,
cls.PAY,
cls.PAY_PROCESS,
cls.ASSIGN_TOKENS]
@classmethod
def should_archive(cls, step):
return cls.ASSIGN_TOKENS == step or step is None
@classmethod
def get_name_for_step(cls, step):
if step not in cls.DESCRIPTIONS:
logging.error('Investor description for step \'%s\' not set', step)
return cls.DESCRIPTIONS.get(step, step)
@classmethod
def get_progress(cls, last_checked_step):
checked = False
items = []
for step in reversed(cls.all()):
if not checked and step == last_checked_step:
checked = True
item = {
'id': step,
'name': cls.get_name_for_step(step),
'checked': checked
}
items.append(item)
return {
'id': 'investor',
'name': 'Become a token holder',
'items': list(reversed(items))
}
| true | true |
1c37a20652cba8d9ec66c21d85f77b65e8fdd40d | 2,837 | py | Python | edna2/tasks/test/H5ToCBFTask/H5ToCBF_exec_test.py | shibom/edna2 | 31e39b887be88a47bca775cd91310f5a17841bdd | [
"CC0-1.0",
"MIT"
] | null | null | null | edna2/tasks/test/H5ToCBFTask/H5ToCBF_exec_test.py | shibom/edna2 | 31e39b887be88a47bca775cd91310f5a17841bdd | [
"CC0-1.0",
"MIT"
] | 2 | 2020-04-06T10:39:50.000Z | 2021-04-14T19:24:37.000Z | edna2/tasks/test/H5ToCBFTask/H5ToCBF_exec_test.py | shibom/edna2 | 31e39b887be88a47bca775cd91310f5a17841bdd | [
"CC0-1.0",
"MIT"
] | 5 | 2019-06-14T07:28:38.000Z | 2021-04-28T13:10:39.000Z | #
# Copyright (c) European Synchrotron Radiation Facility (ESRF)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__authors__ = ["O. Svensson"]
__license__ = "MIT"
__date__ = "21/04/2019"
import os
import unittest
from edna2.tasks.H5ToCBFTask import H5ToCBFTask
from edna2.utils import UtilsTest
from edna2.utils import UtilsConfig
class H5ToCBFExecTest(unittest.TestCase):
def setUp(self):
self.dataPath = UtilsTest.prepareTestDataPath(__file__)
@unittest.skipIf(UtilsConfig.getSite() == 'Default',
'Cannot run h5ToCbf test with default config')
def test_execute_withImageNumber(self):
referenceDataPath = self.dataPath / 'H5ToCBF_withImageNumber.json'
inData = UtilsTest.loadAndSubstitueTestData(referenceDataPath,
loadTestImages=False)
h5ToCBF = H5ToCBFTask(inData=inData)
h5ToCBF.execute()
self.assertTrue(h5ToCBF.isSuccess())
outData = h5ToCBF.outData
self.assertTrue(os.path.exists(outData['outputCBFFile']))
@unittest.skipIf(UtilsConfig.getSite() == 'Default',
'Cannot run h5ToCbf test with default config')
def test_execute_withImageRange(self):
referenceDataPath = self.dataPath / 'H5ToCBF_withImageRange.json'
inData = UtilsTest.loadAndSubstitueTestData(referenceDataPath,
loadTestImages=False)
h5ToCBF = H5ToCBFTask(inData=inData)
h5ToCBF.execute()
self.assertTrue(h5ToCBF.isSuccess())
outData = h5ToCBF.outData
for index in range(1,11):
template = outData['outputCBFFileTemplate']
filePath = template.replace('######', '{0:06d}').format(index)
self.assertTrue(os.path.exists(filePath))
| 42.984848 | 82 | 0.701445 |
__authors__ = ["O. Svensson"]
__license__ = "MIT"
__date__ = "21/04/2019"
import os
import unittest
from edna2.tasks.H5ToCBFTask import H5ToCBFTask
from edna2.utils import UtilsTest
from edna2.utils import UtilsConfig
class H5ToCBFExecTest(unittest.TestCase):
def setUp(self):
self.dataPath = UtilsTest.prepareTestDataPath(__file__)
@unittest.skipIf(UtilsConfig.getSite() == 'Default',
'Cannot run h5ToCbf test with default config')
def test_execute_withImageNumber(self):
referenceDataPath = self.dataPath / 'H5ToCBF_withImageNumber.json'
inData = UtilsTest.loadAndSubstitueTestData(referenceDataPath,
loadTestImages=False)
h5ToCBF = H5ToCBFTask(inData=inData)
h5ToCBF.execute()
self.assertTrue(h5ToCBF.isSuccess())
outData = h5ToCBF.outData
self.assertTrue(os.path.exists(outData['outputCBFFile']))
@unittest.skipIf(UtilsConfig.getSite() == 'Default',
'Cannot run h5ToCbf test with default config')
def test_execute_withImageRange(self):
referenceDataPath = self.dataPath / 'H5ToCBF_withImageRange.json'
inData = UtilsTest.loadAndSubstitueTestData(referenceDataPath,
loadTestImages=False)
h5ToCBF = H5ToCBFTask(inData=inData)
h5ToCBF.execute()
self.assertTrue(h5ToCBF.isSuccess())
outData = h5ToCBF.outData
for index in range(1,11):
template = outData['outputCBFFileTemplate']
filePath = template.replace('######', '{0:06d}').format(index)
self.assertTrue(os.path.exists(filePath))
| true | true |
1c37a23f90a79b0e103ea206f96f45072c69bd92 | 8,150 | py | Python | contrib/devtools/update-translations.py | nuyulcore/NuyulX | ccf82925dc72966e911f5c5613558b654dce7d96 | [
"MIT"
] | null | null | null | contrib/devtools/update-translations.py | nuyulcore/NuyulX | ccf82925dc72966e911f5c5613558b654dce7d96 | [
"MIT"
] | null | null | null | contrib/devtools/update-translations.py | nuyulcore/NuyulX | ccf82925dc72966e911f5c5613558b654dce7d96 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'nuyul_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
try:
specifiers.append(s[percent+1])
except:
print('Failed to get specifier')
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# If both numeric format specifiers and "others" are used, assume we're dealing
# with a Qt-formatted message. In the case of Qt formatting (see https://doc.qt.io/qt-5/qstring.html#arg)
# only numeric formats are replaced at all. This means "(percentage: %1%)" is valid, without needing
# any kind of escaping that would be necessary for strprintf. Without this, this function
# would wrongly detect '%)' as a printf format specifier.
if numeric:
other = []
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
| 38.625592 | 124 | 0.633865 |
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
TX = 'tx'
SOURCE_LANG = 'nuyul_en.ts'
LOCALE_DIR = 'src/qt/locale'
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
try:
specifiers.append(s[percent+1])
except:
print('Failed to get specifier')
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# with a Qt-formatted message. In the case of Qt formatting (see https://doc.qt.io/qt-5/qstring.html#arg)
# only numeric formats are replaced at all. This means "(percentage: %1%)" is valid, without needing
# any kind of escaping that would be necessary for strprintf. Without this, this function
# would wrongly detect '%)' as a printf format specifier.
if numeric:
other = []
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
| true | true |
1c37a34a195f3289c67ff26374a903d2e2c87e3b | 11,126 | py | Python | code/python/OverviewReportBuilder/v1/fds/sdk/OverviewReportBuilder/model/description_description.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | 6 | 2022-02-07T16:34:18.000Z | 2022-03-30T08:04:57.000Z | code/python/OverviewReportBuilder/v1/fds/sdk/OverviewReportBuilder/model/description_description.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | 2 | 2022-02-07T05:25:57.000Z | 2022-03-07T14:18:04.000Z | code/python/OverviewReportBuilder/v1/fds/sdk/OverviewReportBuilder/model/description_description.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | null | null | null | """
FactSet Overview Report Builder API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.OverviewReportBuilder.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.OverviewReportBuilder.exceptions import ApiAttributeError
class DescriptionDescription(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'value': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'value': 'value', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""DescriptionDescription - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
value (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""DescriptionDescription - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
value (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 43.460938 | 124 | 0.571994 |
import re
import sys
from fds.sdk.OverviewReportBuilder.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.OverviewReportBuilder.exceptions import ApiAttributeError
class DescriptionDescription(ModelNormal):
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
return (bool, date, datetime, dict, float, int, list, str, none_type,)
_nullable = False
@cached_property
def openapi_types():
return {
'value': (str,),
}
@cached_property
def discriminator():
return None
attribute_map = {
'value': 'value',
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| true | true |
1c37a46965905b69ae23131d9faa5c47d8f12d9c | 27,228 | py | Python | src/datasets/utils/file_utils.py | borisdayma/datasets | ab6d9759b8b15c0109947159ff1cb6cb3486fdb8 | [
"Apache-2.0"
] | 1 | 2020-09-09T00:44:49.000Z | 2020-09-09T00:44:49.000Z | src/datasets/utils/file_utils.py | borisdayma/datasets | ab6d9759b8b15c0109947159ff1cb6cb3486fdb8 | [
"Apache-2.0"
] | null | null | null | src/datasets/utils/file_utils.py | borisdayma/datasets | ab6d9759b8b15c0109947159ff1cb6cb3486fdb8 | [
"Apache-2.0"
] | 1 | 2020-09-04T02:33:51.000Z | 2020-09-04T02:33:51.000Z | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import copy
import gzip
import json
import lzma
import os
import re
import shutil
import sys
import tarfile
import tempfile
import time
import urllib
from contextlib import closing, contextmanager
from dataclasses import dataclass
from functools import partial
from hashlib import sha256
from pathlib import Path
from typing import Dict, Optional, Union
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import numpy as np
import posixpath
import pyarrow as pa
import requests
from tqdm.auto import tqdm
from .. import __version__, config
from .filelock import FileLock
from .logging import WARNING, get_logger
logger = get_logger(__name__) # pylint: disable=invalid-name
INCOMPLETE_SUFFIX = ".incomplete"
def init_hf_modules(hf_modules_cache: Optional[Union[Path, str]] = None) -> str:
"""
Add hf_modules_cache to the python path.
By default hf_modules_cache='~/.cache/huggingface/modules'.
It can also be set with the environment variable HF_MODULES_CACHE.
This is used to add modules such as `datasets_modules`
"""
hf_modules_cache = hf_modules_cache if hf_modules_cache is not None else config.HF_MODULES_CACHE
hf_modules_cache = str(hf_modules_cache)
if hf_modules_cache not in sys.path:
sys.path.append(hf_modules_cache)
os.makedirs(hf_modules_cache, exist_ok=True)
if not os.path.exists(os.path.join(hf_modules_cache, "__init__.py")):
with open(os.path.join(hf_modules_cache, "__init__.py"), "w"):
pass
return hf_modules_cache
@contextmanager
def temp_seed(seed: int, set_pytorch=False, set_tensorflow=False):
"""Temporarily set the random seed. This works for python numpy, pytorch and tensorflow."""
np_state = np.random.get_state()
np.random.seed(seed)
if set_pytorch and config.TORCH_AVAILABLE:
import torch
torch_state = torch.random.get_rng_state()
torch.random.manual_seed(seed)
if torch.cuda.is_available():
torch_cuda_states = torch.cuda.get_rng_state_all()
torch.cuda.manual_seed_all(seed)
if set_tensorflow and config.TF_AVAILABLE:
import tensorflow as tf
from tensorflow.python import context as tfpycontext
tf_state = tf.random.get_global_generator()
temp_gen = tf.random.Generator.from_seed(seed)
tf.random.set_global_generator(temp_gen)
if not tf.executing_eagerly():
raise ValueError("Setting random seed for TensorFlow is only available in eager mode")
tf_context = tfpycontext.context() # eager mode context
tf_seed = tf_context._seed
tf_rng_initialized = hasattr(tf_context, "_rng")
if tf_rng_initialized:
tf_rng = tf_context._rng
tf_context._set_global_seed(seed)
try:
yield
finally:
np.random.set_state(np_state)
if set_pytorch and config.TORCH_AVAILABLE:
torch.random.set_rng_state(torch_state)
if torch.cuda.is_available():
torch.cuda.set_rng_state_all(torch_cuda_states)
if set_tensorflow and config.TF_AVAILABLE:
tf.random.set_global_generator(tf_state)
tf_context._seed = tf_seed
if tf_rng_initialized:
tf_context._rng = tf_rng
else:
delattr(tf_context, "_rng")
def is_remote_url(url_or_filename: str) -> bool:
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https", "s3", "gs", "hdfs", "ftp")
def is_local_path(url_or_filename: str) -> bool:
# On unix the scheme of a local path is empty (for both absolute and relative),
# while on windows the scheme is the drive name (ex: "c") for absolute paths.
# for details on the windows behavior, see https://bugs.python.org/issue42215
return urlparse(url_or_filename).scheme == "" or os.path.ismount(urlparse(url_or_filename).scheme + ":/")
def is_relative_path(url_or_filename: str) -> bool:
return urlparse(url_or_filename).scheme == "" and not os.path.isabs(url_or_filename)
def hf_bucket_url(identifier: str, filename: str, use_cdn=False, dataset=True) -> str:
if dataset:
endpoint = config.CLOUDFRONT_DATASETS_DISTRIB_PREFIX if use_cdn else config.S3_DATASETS_BUCKET_PREFIX
else:
endpoint = config.CLOUDFRONT_METRICS_DISTRIB_PREFIX if use_cdn else config.S3_METRICS_BUCKET_PREFIX
return "/".join((endpoint, identifier, filename))
def head_hf_s3(
identifier: str, filename: str, use_cdn=False, dataset=True, max_retries=0
) -> Union[requests.Response, Exception]:
try:
return http_head(
hf_bucket_url(identifier=identifier, filename=filename, use_cdn=use_cdn, dataset=dataset),
max_retries=max_retries,
)
except Exception as e:
return e
def hf_github_url(path: str, name: str, dataset=True, version: Optional[str] = None) -> str:
from .. import SCRIPTS_VERSION
version = version or os.getenv("HF_SCRIPTS_VERSION", SCRIPTS_VERSION)
if dataset:
return config.REPO_DATASETS_URL.format(version=version, path=path, name=name)
else:
return config.REPO_METRICS_URL.format(version=version, path=path, name=name)
def hf_hub_url(path: str, name: str, version: Optional[str] = None) -> str:
version = version or config.HUB_DEFAULT_VERSION
return config.HUB_DATASETS_URL.format(path=path, name=name, version=version)
def url_or_path_join(base_name: str, *pathnames: str) -> str:
if is_remote_url(base_name):
return posixpath.join(base_name, *pathnames)
else:
return Path(base_name, *pathnames).as_posix()
def url_or_path_parent(url_or_path: str) -> str:
if is_remote_url(url_or_path):
return url_or_path[: url_or_path.rindex("/")]
else:
return os.path.dirname(url_or_path)
def hash_url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name
so that TF 2.0 can identify it as a HDF5 file
(see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380)
"""
url_bytes = url.encode("utf-8")
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
if url.endswith(".py"):
filename += ".py"
return filename
@dataclass
class DownloadConfig:
"""Configuration for our cached path manager.
Attributes:
cache_dir (:obj:`str` or :obj:`Path`, optional): Specify a cache directory to save the file to (overwrite the
default cache dir).
force_download (:obj:`bool`, default ``False``): If True, re-dowload the file even if it's already cached in
the cache dir.
resume_download (:obj:`bool`, default ``False``): If True, resume the download if incompletly recieved file is
found.
proxies (:obj:`dict`, optional):
user_agent (:obj:`str`, optional): Optional string or dict that will be appended to the user-agent on remote
requests.
extract_compressed_file (:obj:`bool`, default ``False``): If True and the path point to a zip or tar file,
extract the compressed file in a folder along the archive.
force_extract (:obj:`bool`, default ``False``): If True when extract_compressed_file is True and the archive
was already extracted, re-extract the archive and override the folder where it was extracted.
use_etag (:obj:`bool`, default ``True``):
num_proc (:obj:`int`, optional):
max_retries (:obj:`int`, default ``1``): The number of times to retry an HTTP request if it fails.
use_auth_token (:obj:`str` or :obj:`bool`, optional): Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If True, will get token from ~/.huggingface.
"""
cache_dir: Optional[Union[str, Path]] = None
force_download: bool = False
resume_download: bool = False
local_files_only: bool = False
proxies: Optional[Dict] = None
user_agent: Optional[str] = None
extract_compressed_file: bool = False
force_extract: bool = False
use_etag: bool = True
num_proc: Optional[int] = None
max_retries: int = 1
use_auth_token: Optional[Union[str, bool]] = None
def copy(self) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
def cached_path(
url_or_filename,
download_config=None,
**download_kwargs,
) -> str:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
Return:
Local path (string)
Raises:
FileNotFoundError: in case of non-recoverable file
(non-existent or no cache on disk)
ConnectionError: in case of unreachable url
and no cache on disk
ValueError: if it couldn't parse the url or filename correctly
requests.exceptions.ConnectionError: in case of internet connection issue
"""
if download_config is None:
download_config = DownloadConfig(**download_kwargs)
cache_dir = download_config.cache_dir or os.path.join(config.HF_DATASETS_CACHE, "downloads")
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if is_remote_url(url_or_filename):
# URL, so get it from the cache (downloading if necessary)
output_path = get_from_cache(
url_or_filename,
cache_dir=cache_dir,
force_download=download_config.force_download,
proxies=download_config.proxies,
resume_download=download_config.resume_download,
user_agent=download_config.user_agent,
local_files_only=download_config.local_files_only,
use_etag=download_config.use_etag,
max_retries=download_config.max_retries,
use_auth_token=download_config.use_auth_token,
)
elif os.path.exists(url_or_filename):
# File, and it exists.
output_path = url_or_filename
elif is_local_path(url_or_filename):
# File, but it doesn't exist.
raise FileNotFoundError("Local file {} doesn't exist".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
if download_config.extract_compressed_file and output_path is not None:
if (
not is_zipfile(output_path)
and not tarfile.is_tarfile(output_path)
and not is_gzip(output_path)
and not is_xz(output_path)
and not is_rarfile(output_path)
):
return output_path
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
abs_output_path = os.path.abspath(output_path)
output_path_extracted = os.path.join(cache_dir, "extracted", hash_url_to_filename(abs_output_path))
if (
os.path.isdir(output_path_extracted)
and os.listdir(output_path_extracted)
and not download_config.force_extract
) or (os.path.isfile(output_path_extracted) and not download_config.force_extract):
return output_path_extracted
# Prevent parallel extractions
lock_path = output_path + ".lock"
with FileLock(lock_path):
shutil.rmtree(output_path_extracted, ignore_errors=True)
os.makedirs(output_path_extracted, exist_ok=True)
if tarfile.is_tarfile(output_path):
tar_file = tarfile.open(output_path)
tar_file.extractall(output_path_extracted)
tar_file.close()
elif is_gzip(output_path):
os.rmdir(output_path_extracted)
with gzip.open(output_path, "rb") as gzip_file:
with open(output_path_extracted, "wb") as extracted_file:
shutil.copyfileobj(gzip_file, extracted_file)
elif is_zipfile(output_path): # put zip file to the last, b/c it is possible wrongly detected as zip
with ZipFile(output_path, "r") as zip_file:
zip_file.extractall(output_path_extracted)
zip_file.close()
elif is_xz(output_path):
os.rmdir(output_path_extracted)
with lzma.open(output_path) as compressed_file:
with open(output_path_extracted, "wb") as extracted_file:
shutil.copyfileobj(compressed_file, extracted_file)
elif is_rarfile(output_path):
if config.RARFILE_AVAILABLE:
import rarfile
rf = rarfile.RarFile(output_path)
rf.extractall(output_path_extracted)
rf.close()
else:
raise EnvironmentError("Please pip install rarfile")
else:
raise EnvironmentError("Archive format of {} could not be identified".format(output_path))
return output_path_extracted
return output_path
def get_datasets_user_agent(user_agent: Optional[Union[str, dict]] = None) -> str:
ua = "datasets/{}; python/{}".format(__version__, config.PY_VERSION)
ua += "; pyarrow/{}".format(pa.__version__)
if config.TORCH_AVAILABLE:
ua += "; torch/{}".format(config.TORCH_VERSION)
if config.TF_AVAILABLE:
ua += "; tensorflow/{}".format(config.TF_VERSION)
if config.BEAM_AVAILABLE:
ua += "; apache_beam/{}".format(config.BEAM_VERSION)
if isinstance(user_agent, dict):
ua += "; " + "; ".join("{}/{}".format(k, v) for k, v in user_agent.items())
elif isinstance(user_agent, str):
ua += "; " + user_agent
return ua
def get_authentication_headers_for_url(url: str, use_auth_token: Optional[Union[str, bool]] = None) -> dict:
"""Handle the HF authentication"""
headers = {}
if url.startswith("https://huggingface.co/"):
token = None
if isinstance(use_auth_token, str):
token = use_auth_token
elif bool(use_auth_token):
from huggingface_hub import hf_api
token = hf_api.HfFolder.get_token()
if token:
headers["authorization"] = "Bearer {}".format(token)
return headers
class OfflineModeIsEnabled(ConnectionError):
pass
def _raise_if_offline_mode_is_enabled(msg: Optional[str] = None):
"""Raise a OfflineModeIsEnabled error (subclass of ConnectionError) if HF_DATASETS_OFFLINE is True."""
if config.HF_DATASETS_OFFLINE:
raise OfflineModeIsEnabled(
"Offline mode is enabled." if msg is None else "Offline mode is enabled. " + str(msg)
)
def _request_with_retry(
method: str,
url: str,
max_retries: int = 0,
base_wait_time: float = 0.5,
max_wait_time: float = 2,
timeout: float = 10.0,
**params,
) -> requests.Response:
"""Wrapper around requests to retry in case it fails with a ConnectTimeout, with exponential backoff.
Note that if the environment variable HF_DATASETS_OFFLINE is set to 1, then a OfflineModeIsEnabled error is raised.
Args:
method (str): HTTP method, such as 'GET' or 'HEAD'
url (str): The URL of the ressource to fetch
max_retries (int): Maximum number of retries, defaults to 0 (no retries)
base_wait_time (float): Duration (in seconds) to wait before retrying the first time. Wait time between
retries then grows exponentially, capped by max_wait_time.
max_wait_time (float): Maximum amount of time between two retries, in seconds
**params: Params to pass to `requests.request`
"""
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
tries, success = 0, False
while not success:
tries += 1
try:
response = requests.request(method=method.upper(), url=url, timeout=timeout, **params)
success = True
except requests.exceptions.ConnectTimeout as err:
if tries > max_retries:
raise err
else:
logger.info(f"{method} request to {url} timed out, retrying... [{tries/max_retries}]")
sleep_time = min(max_wait_time, base_wait_time * 2 ** (tries - 1)) # Exponential backoff
time.sleep(sleep_time)
return response
def ftp_head(url, timeout=10.0):
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
try:
with closing(urllib.request.urlopen(url, timeout=timeout)) as r:
r.read(1)
except Exception:
return False
return True
def ftp_get(url, temp_file, timeout=10.0):
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
try:
logger.info(f"Getting through FTP {url} into {temp_file.name}")
with closing(urllib.request.urlopen(url, timeout=timeout)) as r:
shutil.copyfileobj(r, temp_file)
except urllib.error.URLError as e:
raise ConnectionError(e)
def http_get(url, temp_file, proxies=None, resume_size=0, headers=None, cookies=None, timeout=10.0, max_retries=0):
headers = copy.deepcopy(headers) or {}
headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent"))
if resume_size > 0:
headers["Range"] = "bytes=%d-" % (resume_size,)
response = _request_with_retry(
method="GET",
url=url,
stream=True,
proxies=proxies,
headers=headers,
cookies=cookies,
max_retries=max_retries,
timeout=timeout,
)
if response.status_code == 416: # Range not satisfiable
return
content_length = response.headers.get("Content-Length")
total = resume_size + int(content_length) if content_length is not None else None
not_verbose = bool(logger.getEffectiveLevel() > WARNING)
progress = tqdm(
unit="B",
unit_scale=True,
total=total,
initial=resume_size,
desc="Downloading",
disable=not_verbose,
)
for chunk in response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def http_head(
url, proxies=None, headers=None, cookies=None, allow_redirects=True, timeout=10.0, max_retries=0
) -> requests.Response:
headers = copy.deepcopy(headers) or {}
headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent"))
response = _request_with_retry(
method="HEAD",
url=url,
proxies=proxies,
headers=headers,
cookies=cookies,
allow_redirects=allow_redirects,
timeout=timeout,
max_retries=max_retries,
)
return response
def get_from_cache(
url,
cache_dir=None,
force_download=False,
proxies=None,
etag_timeout=10,
resume_download=False,
user_agent=None,
local_files_only=False,
use_etag=True,
max_retries=0,
use_auth_token=None,
) -> str:
"""
Given a URL, look for the corresponding file in the local cache.
If it's not there, download it. Then return the path to the cached file.
Return:
Local path (string)
Raises:
FileNotFoundError: in case of non-recoverable file
(non-existent or no cache on disk)
ConnectionError: in case of unreachable url
and no cache on disk
"""
if cache_dir is None:
cache_dir = config.HF_DATASETS_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
original_url = url # Some parameters may be added
connected = False
response = None
cookies = None
etag = None
# Try a first time to file the file on the local file system without eTag (None)
# if we don't ask for 'force_download' then we spare a request
filename = hash_url_to_filename(original_url, etag=None)
cache_path = os.path.join(cache_dir, filename)
if os.path.exists(cache_path) and not force_download and not use_etag:
return cache_path
# Prepare headers for authentication
headers = get_authentication_headers_for_url(url, use_auth_token=use_auth_token)
if user_agent is not None:
headers["user-agent"] = user_agent
# We don't have the file locally or we need an eTag
if not local_files_only:
if url.startswith("ftp://"):
connected = ftp_head(url)
try:
response = http_head(
url,
allow_redirects=True,
proxies=proxies,
timeout=etag_timeout,
max_retries=max_retries,
headers=headers,
)
if response.status_code == 200: # ok
etag = response.headers.get("ETag") if use_etag else None
for k, v in response.cookies.items():
# In some edge cases, we need to get a confirmation token
if k.startswith("download_warning") and "drive.google.com" in url:
url += "&confirm=" + v
cookies = response.cookies
connected = True
# In some edge cases, head request returns 400 but the connection is actually ok
elif (
(response.status_code == 400 and "firebasestorage.googleapis.com" in url)
or (response.status_code == 405 and "drive.google.com" in url)
or (
response.status_code == 403
and re.match(r"^https?://github.com/.*?/.*?/releases/download/.*?/.*?$", url)
)
):
connected = True
logger.info("Couldn't get ETag version for url {}".format(url))
except (EnvironmentError, requests.exceptions.Timeout):
# not connected
pass
# connected == False = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if not connected:
if os.path.exists(cache_path):
return cache_path
if local_files_only:
raise FileNotFoundError(
f"Cannot find the requested files in the cached path at {cache_path} and outgoing traffic has been"
" disabled. To enable file online look-ups, set 'local_files_only' to False."
)
elif response is not None and response.status_code == 404:
raise FileNotFoundError("Couldn't find file at {}".format(url))
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
raise ConnectionError("Couldn't reach {}".format(url))
# Try a second time
filename = hash_url_to_filename(original_url, etag)
cache_path = os.path.join(cache_dir, filename)
if os.path.exists(cache_path) and not force_download:
return cache_path
# From now on, connected is True.
# Prevent parallel downloads of the same file with a lock.
lock_path = cache_path + ".lock"
with FileLock(lock_path):
if resume_download:
incomplete_path = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(incomplete_path, "a+b") as f:
yield f
temp_file_manager = _resumable_file_manager
if os.path.exists(incomplete_path):
resume_size = os.stat(incomplete_path).st_size
else:
resume_size = 0
else:
temp_file_manager = partial(tempfile.NamedTemporaryFile, dir=cache_dir, delete=False)
resume_size = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
logger.info("%s not found in cache or force_download set to True, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("ftp://"):
ftp_get(url, temp_file)
else:
http_get(
url,
temp_file,
proxies=proxies,
resume_size=resume_size,
headers=headers,
cookies=cookies,
max_retries=max_retries,
)
logger.info("storing %s in cache at %s", url, cache_path)
shutil.move(temp_file.name, cache_path)
logger.info("creating metadata file for %s", cache_path)
meta = {"url": url, "etag": etag}
meta_path = cache_path + ".json"
with open(meta_path, "w", encoding="utf-8") as meta_file:
json.dump(meta, meta_file)
return cache_path
def is_gzip(path: str) -> bool:
"""from https://stackoverflow.com/a/60634210"""
with gzip.open(path, "r") as fh:
try:
fh.read(1)
return True
except OSError:
return False
def is_xz(path: str) -> bool:
"""https://tukaani.org/xz/xz-file-format-1.0.4.txt"""
with open(path, "rb") as f:
try:
header_magic_bytes = f.read(6)
except OSError:
return False
if header_magic_bytes == b"\xfd7zXZ\x00":
return True
else:
return False
def is_rarfile(path: str) -> bool:
"""https://github.com/markokr/rarfile/blob/master/rarfile.py"""
RAR_ID = b"Rar!\x1a\x07\x00"
RAR5_ID = b"Rar!\x1a\x07\x01\x00"
with open(path, "rb", 1024) as fd:
buf = fd.read(len(RAR5_ID))
if buf.startswith(RAR_ID) or buf.startswith(RAR5_ID):
return True
else:
return False
def add_start_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "")
return fn
return docstring_decorator
def add_end_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else "") + "".join(docstr)
return fn
return docstring_decorator
def estimate_dataset_size(paths):
return sum(path.stat().st_size for path in paths)
| 36.844384 | 144 | 0.648046 |
import copy
import gzip
import json
import lzma
import os
import re
import shutil
import sys
import tarfile
import tempfile
import time
import urllib
from contextlib import closing, contextmanager
from dataclasses import dataclass
from functools import partial
from hashlib import sha256
from pathlib import Path
from typing import Dict, Optional, Union
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import numpy as np
import posixpath
import pyarrow as pa
import requests
from tqdm.auto import tqdm
from .. import __version__, config
from .filelock import FileLock
from .logging import WARNING, get_logger
logger = get_logger(__name__)
INCOMPLETE_SUFFIX = ".incomplete"
def init_hf_modules(hf_modules_cache: Optional[Union[Path, str]] = None) -> str:
hf_modules_cache = hf_modules_cache if hf_modules_cache is not None else config.HF_MODULES_CACHE
hf_modules_cache = str(hf_modules_cache)
if hf_modules_cache not in sys.path:
sys.path.append(hf_modules_cache)
os.makedirs(hf_modules_cache, exist_ok=True)
if not os.path.exists(os.path.join(hf_modules_cache, "__init__.py")):
with open(os.path.join(hf_modules_cache, "__init__.py"), "w"):
pass
return hf_modules_cache
@contextmanager
def temp_seed(seed: int, set_pytorch=False, set_tensorflow=False):
np_state = np.random.get_state()
np.random.seed(seed)
if set_pytorch and config.TORCH_AVAILABLE:
import torch
torch_state = torch.random.get_rng_state()
torch.random.manual_seed(seed)
if torch.cuda.is_available():
torch_cuda_states = torch.cuda.get_rng_state_all()
torch.cuda.manual_seed_all(seed)
if set_tensorflow and config.TF_AVAILABLE:
import tensorflow as tf
from tensorflow.python import context as tfpycontext
tf_state = tf.random.get_global_generator()
temp_gen = tf.random.Generator.from_seed(seed)
tf.random.set_global_generator(temp_gen)
if not tf.executing_eagerly():
raise ValueError("Setting random seed for TensorFlow is only available in eager mode")
tf_context = tfpycontext.context()
tf_seed = tf_context._seed
tf_rng_initialized = hasattr(tf_context, "_rng")
if tf_rng_initialized:
tf_rng = tf_context._rng
tf_context._set_global_seed(seed)
try:
yield
finally:
np.random.set_state(np_state)
if set_pytorch and config.TORCH_AVAILABLE:
torch.random.set_rng_state(torch_state)
if torch.cuda.is_available():
torch.cuda.set_rng_state_all(torch_cuda_states)
if set_tensorflow and config.TF_AVAILABLE:
tf.random.set_global_generator(tf_state)
tf_context._seed = tf_seed
if tf_rng_initialized:
tf_context._rng = tf_rng
else:
delattr(tf_context, "_rng")
def is_remote_url(url_or_filename: str) -> bool:
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https", "s3", "gs", "hdfs", "ftp")
def is_local_path(url_or_filename: str) -> bool:
return urlparse(url_or_filename).scheme == "" or os.path.ismount(urlparse(url_or_filename).scheme + ":/")
def is_relative_path(url_or_filename: str) -> bool:
return urlparse(url_or_filename).scheme == "" and not os.path.isabs(url_or_filename)
def hf_bucket_url(identifier: str, filename: str, use_cdn=False, dataset=True) -> str:
if dataset:
endpoint = config.CLOUDFRONT_DATASETS_DISTRIB_PREFIX if use_cdn else config.S3_DATASETS_BUCKET_PREFIX
else:
endpoint = config.CLOUDFRONT_METRICS_DISTRIB_PREFIX if use_cdn else config.S3_METRICS_BUCKET_PREFIX
return "/".join((endpoint, identifier, filename))
def head_hf_s3(
identifier: str, filename: str, use_cdn=False, dataset=True, max_retries=0
) -> Union[requests.Response, Exception]:
try:
return http_head(
hf_bucket_url(identifier=identifier, filename=filename, use_cdn=use_cdn, dataset=dataset),
max_retries=max_retries,
)
except Exception as e:
return e
def hf_github_url(path: str, name: str, dataset=True, version: Optional[str] = None) -> str:
from .. import SCRIPTS_VERSION
version = version or os.getenv("HF_SCRIPTS_VERSION", SCRIPTS_VERSION)
if dataset:
return config.REPO_DATASETS_URL.format(version=version, path=path, name=name)
else:
return config.REPO_METRICS_URL.format(version=version, path=path, name=name)
def hf_hub_url(path: str, name: str, version: Optional[str] = None) -> str:
version = version or config.HUB_DEFAULT_VERSION
return config.HUB_DATASETS_URL.format(path=path, name=name, version=version)
def url_or_path_join(base_name: str, *pathnames: str) -> str:
if is_remote_url(base_name):
return posixpath.join(base_name, *pathnames)
else:
return Path(base_name, *pathnames).as_posix()
def url_or_path_parent(url_or_path: str) -> str:
if is_remote_url(url_or_path):
return url_or_path[: url_or_path.rindex("/")]
else:
return os.path.dirname(url_or_path)
def hash_url_to_filename(url, etag=None):
url_bytes = url.encode("utf-8")
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
if url.endswith(".py"):
filename += ".py"
return filename
@dataclass
class DownloadConfig:
cache_dir: Optional[Union[str, Path]] = None
force_download: bool = False
resume_download: bool = False
local_files_only: bool = False
proxies: Optional[Dict] = None
user_agent: Optional[str] = None
extract_compressed_file: bool = False
force_extract: bool = False
use_etag: bool = True
num_proc: Optional[int] = None
max_retries: int = 1
use_auth_token: Optional[Union[str, bool]] = None
def copy(self) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
def cached_path(
url_or_filename,
download_config=None,
**download_kwargs,
) -> str:
if download_config is None:
download_config = DownloadConfig(**download_kwargs)
cache_dir = download_config.cache_dir or os.path.join(config.HF_DATASETS_CACHE, "downloads")
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if is_remote_url(url_or_filename):
output_path = get_from_cache(
url_or_filename,
cache_dir=cache_dir,
force_download=download_config.force_download,
proxies=download_config.proxies,
resume_download=download_config.resume_download,
user_agent=download_config.user_agent,
local_files_only=download_config.local_files_only,
use_etag=download_config.use_etag,
max_retries=download_config.max_retries,
use_auth_token=download_config.use_auth_token,
)
elif os.path.exists(url_or_filename):
output_path = url_or_filename
elif is_local_path(url_or_filename):
raise FileNotFoundError("Local file {} doesn't exist".format(url_or_filename))
else:
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
if download_config.extract_compressed_file and output_path is not None:
if (
not is_zipfile(output_path)
and not tarfile.is_tarfile(output_path)
and not is_gzip(output_path)
and not is_xz(output_path)
and not is_rarfile(output_path)
):
return output_path
abs_output_path = os.path.abspath(output_path)
output_path_extracted = os.path.join(cache_dir, "extracted", hash_url_to_filename(abs_output_path))
if (
os.path.isdir(output_path_extracted)
and os.listdir(output_path_extracted)
and not download_config.force_extract
) or (os.path.isfile(output_path_extracted) and not download_config.force_extract):
return output_path_extracted
# Prevent parallel extractions
lock_path = output_path + ".lock"
with FileLock(lock_path):
shutil.rmtree(output_path_extracted, ignore_errors=True)
os.makedirs(output_path_extracted, exist_ok=True)
if tarfile.is_tarfile(output_path):
tar_file = tarfile.open(output_path)
tar_file.extractall(output_path_extracted)
tar_file.close()
elif is_gzip(output_path):
os.rmdir(output_path_extracted)
with gzip.open(output_path, "rb") as gzip_file:
with open(output_path_extracted, "wb") as extracted_file:
shutil.copyfileobj(gzip_file, extracted_file)
elif is_zipfile(output_path): # put zip file to the last, b/c it is possible wrongly detected as zip
with ZipFile(output_path, "r") as zip_file:
zip_file.extractall(output_path_extracted)
zip_file.close()
elif is_xz(output_path):
os.rmdir(output_path_extracted)
with lzma.open(output_path) as compressed_file:
with open(output_path_extracted, "wb") as extracted_file:
shutil.copyfileobj(compressed_file, extracted_file)
elif is_rarfile(output_path):
if config.RARFILE_AVAILABLE:
import rarfile
rf = rarfile.RarFile(output_path)
rf.extractall(output_path_extracted)
rf.close()
else:
raise EnvironmentError("Please pip install rarfile")
else:
raise EnvironmentError("Archive format of {} could not be identified".format(output_path))
return output_path_extracted
return output_path
def get_datasets_user_agent(user_agent: Optional[Union[str, dict]] = None) -> str:
ua = "datasets/{}; python/{}".format(__version__, config.PY_VERSION)
ua += "; pyarrow/{}".format(pa.__version__)
if config.TORCH_AVAILABLE:
ua += "; torch/{}".format(config.TORCH_VERSION)
if config.TF_AVAILABLE:
ua += "; tensorflow/{}".format(config.TF_VERSION)
if config.BEAM_AVAILABLE:
ua += "; apache_beam/{}".format(config.BEAM_VERSION)
if isinstance(user_agent, dict):
ua += "; " + "; ".join("{}/{}".format(k, v) for k, v in user_agent.items())
elif isinstance(user_agent, str):
ua += "; " + user_agent
return ua
def get_authentication_headers_for_url(url: str, use_auth_token: Optional[Union[str, bool]] = None) -> dict:
headers = {}
if url.startswith("https://huggingface.co/"):
token = None
if isinstance(use_auth_token, str):
token = use_auth_token
elif bool(use_auth_token):
from huggingface_hub import hf_api
token = hf_api.HfFolder.get_token()
if token:
headers["authorization"] = "Bearer {}".format(token)
return headers
class OfflineModeIsEnabled(ConnectionError):
pass
def _raise_if_offline_mode_is_enabled(msg: Optional[str] = None):
if config.HF_DATASETS_OFFLINE:
raise OfflineModeIsEnabled(
"Offline mode is enabled." if msg is None else "Offline mode is enabled. " + str(msg)
)
def _request_with_retry(
method: str,
url: str,
max_retries: int = 0,
base_wait_time: float = 0.5,
max_wait_time: float = 2,
timeout: float = 10.0,
**params,
) -> requests.Response:
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
tries, success = 0, False
while not success:
tries += 1
try:
response = requests.request(method=method.upper(), url=url, timeout=timeout, **params)
success = True
except requests.exceptions.ConnectTimeout as err:
if tries > max_retries:
raise err
else:
logger.info(f"{method} request to {url} timed out, retrying... [{tries/max_retries}]")
sleep_time = min(max_wait_time, base_wait_time * 2 ** (tries - 1)) # Exponential backoff
time.sleep(sleep_time)
return response
def ftp_head(url, timeout=10.0):
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
try:
with closing(urllib.request.urlopen(url, timeout=timeout)) as r:
r.read(1)
except Exception:
return False
return True
def ftp_get(url, temp_file, timeout=10.0):
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
try:
logger.info(f"Getting through FTP {url} into {temp_file.name}")
with closing(urllib.request.urlopen(url, timeout=timeout)) as r:
shutil.copyfileobj(r, temp_file)
except urllib.error.URLError as e:
raise ConnectionError(e)
def http_get(url, temp_file, proxies=None, resume_size=0, headers=None, cookies=None, timeout=10.0, max_retries=0):
headers = copy.deepcopy(headers) or {}
headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent"))
if resume_size > 0:
headers["Range"] = "bytes=%d-" % (resume_size,)
response = _request_with_retry(
method="GET",
url=url,
stream=True,
proxies=proxies,
headers=headers,
cookies=cookies,
max_retries=max_retries,
timeout=timeout,
)
if response.status_code == 416: # Range not satisfiable
return
content_length = response.headers.get("Content-Length")
total = resume_size + int(content_length) if content_length is not None else None
not_verbose = bool(logger.getEffectiveLevel() > WARNING)
progress = tqdm(
unit="B",
unit_scale=True,
total=total,
initial=resume_size,
desc="Downloading",
disable=not_verbose,
)
for chunk in response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def http_head(
url, proxies=None, headers=None, cookies=None, allow_redirects=True, timeout=10.0, max_retries=0
) -> requests.Response:
headers = copy.deepcopy(headers) or {}
headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent"))
response = _request_with_retry(
method="HEAD",
url=url,
proxies=proxies,
headers=headers,
cookies=cookies,
allow_redirects=allow_redirects,
timeout=timeout,
max_retries=max_retries,
)
return response
def get_from_cache(
url,
cache_dir=None,
force_download=False,
proxies=None,
etag_timeout=10,
resume_download=False,
user_agent=None,
local_files_only=False,
use_etag=True,
max_retries=0,
use_auth_token=None,
) -> str:
if cache_dir is None:
cache_dir = config.HF_DATASETS_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
original_url = url # Some parameters may be added
connected = False
response = None
cookies = None
etag = None
# Try a first time to file the file on the local file system without eTag (None)
# if we don't ask for 'force_download' then we spare a request
filename = hash_url_to_filename(original_url, etag=None)
cache_path = os.path.join(cache_dir, filename)
if os.path.exists(cache_path) and not force_download and not use_etag:
return cache_path
# Prepare headers for authentication
headers = get_authentication_headers_for_url(url, use_auth_token=use_auth_token)
if user_agent is not None:
headers["user-agent"] = user_agent
# We don't have the file locally or we need an eTag
if not local_files_only:
if url.startswith("ftp://"):
connected = ftp_head(url)
try:
response = http_head(
url,
allow_redirects=True,
proxies=proxies,
timeout=etag_timeout,
max_retries=max_retries,
headers=headers,
)
if response.status_code == 200: # ok
etag = response.headers.get("ETag") if use_etag else None
for k, v in response.cookies.items():
# In some edge cases, we need to get a confirmation token
if k.startswith("download_warning") and "drive.google.com" in url:
url += "&confirm=" + v
cookies = response.cookies
connected = True
# In some edge cases, head request returns 400 but the connection is actually ok
elif (
(response.status_code == 400 and "firebasestorage.googleapis.com" in url)
or (response.status_code == 405 and "drive.google.com" in url)
or (
response.status_code == 403
and re.match(r"^https?://github.com/.*?/.*?/releases/download/.*?/.*?$", url)
)
):
connected = True
logger.info("Couldn't get ETag version for url {}".format(url))
except (EnvironmentError, requests.exceptions.Timeout):
# not connected
pass
# connected == False = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if not connected:
if os.path.exists(cache_path):
return cache_path
if local_files_only:
raise FileNotFoundError(
f"Cannot find the requested files in the cached path at {cache_path} and outgoing traffic has been"
" disabled. To enable file online look-ups, set 'local_files_only' to False."
)
elif response is not None and response.status_code == 404:
raise FileNotFoundError("Couldn't find file at {}".format(url))
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
raise ConnectionError("Couldn't reach {}".format(url))
# Try a second time
filename = hash_url_to_filename(original_url, etag)
cache_path = os.path.join(cache_dir, filename)
if os.path.exists(cache_path) and not force_download:
return cache_path
# From now on, connected is True.
# Prevent parallel downloads of the same file with a lock.
lock_path = cache_path + ".lock"
with FileLock(lock_path):
if resume_download:
incomplete_path = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(incomplete_path, "a+b") as f:
yield f
temp_file_manager = _resumable_file_manager
if os.path.exists(incomplete_path):
resume_size = os.stat(incomplete_path).st_size
else:
resume_size = 0
else:
temp_file_manager = partial(tempfile.NamedTemporaryFile, dir=cache_dir, delete=False)
resume_size = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
logger.info("%s not found in cache or force_download set to True, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("ftp://"):
ftp_get(url, temp_file)
else:
http_get(
url,
temp_file,
proxies=proxies,
resume_size=resume_size,
headers=headers,
cookies=cookies,
max_retries=max_retries,
)
logger.info("storing %s in cache at %s", url, cache_path)
shutil.move(temp_file.name, cache_path)
logger.info("creating metadata file for %s", cache_path)
meta = {"url": url, "etag": etag}
meta_path = cache_path + ".json"
with open(meta_path, "w", encoding="utf-8") as meta_file:
json.dump(meta, meta_file)
return cache_path
def is_gzip(path: str) -> bool:
with gzip.open(path, "r") as fh:
try:
fh.read(1)
return True
except OSError:
return False
def is_xz(path: str) -> bool:
with open(path, "rb") as f:
try:
header_magic_bytes = f.read(6)
except OSError:
return False
if header_magic_bytes == b"\xfd7zXZ\x00":
return True
else:
return False
def is_rarfile(path: str) -> bool:
RAR_ID = b"Rar!\x1a\x07\x00"
RAR5_ID = b"Rar!\x1a\x07\x01\x00"
with open(path, "rb", 1024) as fd:
buf = fd.read(len(RAR5_ID))
if buf.startswith(RAR_ID) or buf.startswith(RAR5_ID):
return True
else:
return False
def add_start_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "")
return fn
return docstring_decorator
def add_end_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else "") + "".join(docstr)
return fn
return docstring_decorator
def estimate_dataset_size(paths):
return sum(path.stat().st_size for path in paths)
| true | true |
1c37a7a27277e84f157ef1a0c27c525b20bb63fe | 614 | py | Python | tests/test_sox.py | ayoubBouziane/TrainingSpeech | 799e95d644d69890fa69e488712f10e662827c10 | [
"MIT"
] | 9 | 2022-01-24T00:42:31.000Z | 2022-03-23T06:32:43.000Z | tests/test_sox.py | wasertech/TrainingSpeech | 2a0a7674aa41b3526aeafde58d820a16397923f4 | [
"MIT"
] | null | null | null | tests/test_sox.py | wasertech/TrainingSpeech | 2a0a7674aa41b3526aeafde58d820a16397923f4 | [
"MIT"
] | 3 | 2020-05-05T21:17:12.000Z | 2022-01-30T09:20:28.000Z | import subprocess
import pytest
from training_speech import sox
@pytest.mark.parametrize('kwargs, expected_call', [
(dict(path_to_file='/path/to/foo.mp3'), 'play -q /path/to/foo.mp3'),
(dict(path_to_file='/path/to/foo.mp3', speed=1.2), 'play -q /path/to/foo.mp3 tempo 1.2'),
])
def test_convert(kwargs, expected_call, mocker):
wait_mock = mocker.patch('subprocess.Popen.wait')
with sox.play(**kwargs) as player:
assert isinstance(player, subprocess.Popen)
assert wait_mock.call_count == 0
assert ' '.join(player.args) == expected_call
wait_mock.assert_called_once()
| 32.315789 | 93 | 0.69544 | import subprocess
import pytest
from training_speech import sox
@pytest.mark.parametrize('kwargs, expected_call', [
(dict(path_to_file='/path/to/foo.mp3'), 'play -q /path/to/foo.mp3'),
(dict(path_to_file='/path/to/foo.mp3', speed=1.2), 'play -q /path/to/foo.mp3 tempo 1.2'),
])
def test_convert(kwargs, expected_call, mocker):
wait_mock = mocker.patch('subprocess.Popen.wait')
with sox.play(**kwargs) as player:
assert isinstance(player, subprocess.Popen)
assert wait_mock.call_count == 0
assert ' '.join(player.args) == expected_call
wait_mock.assert_called_once()
| true | true |
1c37a83c830cb55ad1916a0c80361da3be597d97 | 2,801 | py | Python | BagModules/bag_digital_ec/tinv.py | xyabc/bag_digital_ec | 71b982fc0fbe275fc3901db2e25ab7ca62fb319f | [
"BSD-3-Clause"
] | null | null | null | BagModules/bag_digital_ec/tinv.py | xyabc/bag_digital_ec | 71b982fc0fbe275fc3901db2e25ab7ca62fb319f | [
"BSD-3-Clause"
] | null | null | null | BagModules/bag_digital_ec/tinv.py | xyabc/bag_digital_ec | 71b982fc0fbe275fc3901db2e25ab7ca62fb319f | [
"BSD-3-Clause"
] | 2 | 2019-06-30T07:03:02.000Z | 2020-01-07T04:55:21.000Z | # -*- coding: utf-8 -*-
from typing import Dict, Any
import os
import pkg_resources
from bag.design import Module
yaml_file = pkg_resources.resource_filename(__name__, os.path.join('netlist_info', 'tinv.yaml'))
# noinspection PyPep8Naming
class bag_digital_ec__tinv(Module):
"""Module for library bag_digital_ec cell tinv.
Fill in high level description here.
"""
def __init__(self, bag_config, parent=None, prj=None, **kwargs):
Module.__init__(self, bag_config, yaml_file, parent=parent, prj=prj, **kwargs)
@classmethod
def get_params_info(cls):
# type: () -> Dict[str, str]
return dict(
lch='channel length.',
wp='PMOS width.',
wn='NMOS width.',
thp='PMOS threshold.',
thn='NMOS threshold.',
segp='PMOS segments.',
segn='NMOS segments.',
pmos_switch='True to add PMOS enable switch.',
wpen='PMOS enable width.',
wnen='NMOS enable width.',
thpen='PMOS enable threshold.',
thnen='NMOS enable threshold.',
)
@classmethod
def get_default_param_values(cls):
# type: () -> Dict[str, Any]
return dict(
pmos_switch=True,
wpen=None,
wnen=None,
thpen=None,
thnen=None,
)
def get_master_basename(self):
# type: () -> str
if self.params['pmos_switch']:
return 'tinv'
else:
return 'tinv_pass0'
def design(self, lch, wp, wn, thp, thn, segp, segn, pmos_switch, wpen, wnen, thpen, thnen):
if segp < 1 or segn < 1:
raise ValueError('number of segments must be >= 1.')
self._set_segments('XN', 'XNEN', 'mn', lch, wn, thn, wnen, thnen, segn)
if pmos_switch:
self._set_segments('XP', 'XPEN', 'mp', lch, wp, thp, wpen, thpen, segp)
else:
self.delete_instance('XPEN')
self.remove_pin('enb')
self.instances['XP'].design(w=wp, l=lch, nf=segp, intent=thp)
self.reconnect_instance_terminal('XP', 'D', 'out')
def _set_segments(self, bot_name, top_name, mid_name, lch, w, th, wen, then, seg):
if wen is None:
wen = w
if then is None:
then = th
self.instances[bot_name].design(w=w, l=lch, nf=1, intent=th)
self.instances[top_name].design(w=wen, l=lch, nf=1, intent=then)
if seg > 1:
suffix = '<%d:0>' % (seg - 1)
self.array_instance(bot_name, [bot_name + suffix],
term_list=[dict(D=mid_name + suffix)])
self.array_instance(top_name, [top_name + suffix],
term_list=[dict(S=mid_name + suffix)])
| 32.569767 | 96 | 0.555873 |
from typing import Dict, Any
import os
import pkg_resources
from bag.design import Module
yaml_file = pkg_resources.resource_filename(__name__, os.path.join('netlist_info', 'tinv.yaml'))
class bag_digital_ec__tinv(Module):
def __init__(self, bag_config, parent=None, prj=None, **kwargs):
Module.__init__(self, bag_config, yaml_file, parent=parent, prj=prj, **kwargs)
@classmethod
def get_params_info(cls):
return dict(
lch='channel length.',
wp='PMOS width.',
wn='NMOS width.',
thp='PMOS threshold.',
thn='NMOS threshold.',
segp='PMOS segments.',
segn='NMOS segments.',
pmos_switch='True to add PMOS enable switch.',
wpen='PMOS enable width.',
wnen='NMOS enable width.',
thpen='PMOS enable threshold.',
thnen='NMOS enable threshold.',
)
@classmethod
def get_default_param_values(cls):
return dict(
pmos_switch=True,
wpen=None,
wnen=None,
thpen=None,
thnen=None,
)
def get_master_basename(self):
if self.params['pmos_switch']:
return 'tinv'
else:
return 'tinv_pass0'
def design(self, lch, wp, wn, thp, thn, segp, segn, pmos_switch, wpen, wnen, thpen, thnen):
if segp < 1 or segn < 1:
raise ValueError('number of segments must be >= 1.')
self._set_segments('XN', 'XNEN', 'mn', lch, wn, thn, wnen, thnen, segn)
if pmos_switch:
self._set_segments('XP', 'XPEN', 'mp', lch, wp, thp, wpen, thpen, segp)
else:
self.delete_instance('XPEN')
self.remove_pin('enb')
self.instances['XP'].design(w=wp, l=lch, nf=segp, intent=thp)
self.reconnect_instance_terminal('XP', 'D', 'out')
def _set_segments(self, bot_name, top_name, mid_name, lch, w, th, wen, then, seg):
if wen is None:
wen = w
if then is None:
then = th
self.instances[bot_name].design(w=w, l=lch, nf=1, intent=th)
self.instances[top_name].design(w=wen, l=lch, nf=1, intent=then)
if seg > 1:
suffix = '<%d:0>' % (seg - 1)
self.array_instance(bot_name, [bot_name + suffix],
term_list=[dict(D=mid_name + suffix)])
self.array_instance(top_name, [top_name + suffix],
term_list=[dict(S=mid_name + suffix)])
| true | true |
1c37a872ef16d6404b68cac4763c8943fe47e889 | 387 | py | Python | tests/dump/graph.full.py | noabauma/Mirheo | bf7979bfbbf402d33c26ac5dc879f880e78e7017 | [
"MIT"
] | null | null | null | tests/dump/graph.full.py | noabauma/Mirheo | bf7979bfbbf402d33c26ac5dc879f880e78e7017 | [
"MIT"
] | null | null | null | tests/dump/graph.full.py | noabauma/Mirheo | bf7979bfbbf402d33c26ac5dc879f880e78e7017 | [
"MIT"
] | 1 | 2021-07-14T13:24:05.000Z | 2021-07-14T13:24:05.000Z | #!/usr/bin/env python
import mirheo as mir
dt = 0.001
ranks = (1, 1, 1)
domain = (4, 4, 4)
u = mir.Mirheo(ranks, domain, dt, debug_level=3, log_filename='log', no_splash=True)
u.save_dependency_graph_graphml("tasks.full", current=False)
# sTEST: dump.graph.full
# cd dump
# rm -rf tasks.graphml
# mir.run --runargs "-n 1" ./graph.full.py
# cat tasks.full.graphml > tasks.out.txt
| 19.35 | 84 | 0.684755 |
import mirheo as mir
dt = 0.001
ranks = (1, 1, 1)
domain = (4, 4, 4)
u = mir.Mirheo(ranks, domain, dt, debug_level=3, log_filename='log', no_splash=True)
u.save_dependency_graph_graphml("tasks.full", current=False)
| true | true |
1c37a8f2f8562f4283ccad4cc4a0bd62e91558d2 | 4,788 | py | Python | src/m9b_summing_again.py | frazeedj/03-AccumulatorsAndFunctionsWithParameters | 1b83b3b33da7ec855563182478526469f682fe51 | [
"MIT"
] | null | null | null | src/m9b_summing_again.py | frazeedj/03-AccumulatorsAndFunctionsWithParameters | 1b83b3b33da7ec855563182478526469f682fe51 | [
"MIT"
] | null | null | null | src/m9b_summing_again.py | frazeedj/03-AccumulatorsAndFunctionsWithParameters | 1b83b3b33da7ec855563182478526469f682fe51 | [
"MIT"
] | null | null | null | """
This module lets you practice the ACCUMULATOR pattern
in its simplest classic forms:
SUMMING: total = total + number
Authors: David Mutchler, Dave Fisher, Vibha Alangar, Mark Hays, Amanda Stouder,
their colleagues and Dylan Frazee.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
def main():
""" Calls the TEST functions in this module. """
run_test_sum_powers()
run_test_sum_powers_in_range()
def run_test_sum_powers():
""" Tests the sum_powers function. """
# ------------------------------------------------------------------
# DONE: 2. Implement this function.
# It TESTS the sum_powers function defined below.
# Include at least ** 3 ** tests.
#
# Use the same 4-step process as in implementing previous
# TEST functions, including the same way to print expected/actual.
# ------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the sum_powers function:')
print('--------------------------------------------------')
expected = 3.80826
answer = sum_powers(5,-0.3)
print('Test 1 expected:', expected)
print(' actual: ', answer)
expected = 144.45655
answer = sum_powers(100,0.1)
print('Test 2 expected:', expected)
print(' actual: ', answer)
expected = 1025
answer = sum_powers(2,10)
print('Test 3 expected:', expected)
print(' actual: ', answer)
def sum_powers(n, p):
"""
What comes in: A non-negative integer n
and a number p.
What goes out: The sum 1**p + 2**p + 3**p + ... + n**p
for the given numbers n and p. The latter may be any number
(possibly a floating point number, and possibly negative).
Side effects: None.
Examples:
-- sum_powers(5, -0.3) returns about 3.80826
-- sum_powers(100, 0.1) returns about 144.45655
"""
# ------------------------------------------------------------------
# DONE: 3. Implement and test this function.
# Note that you should write its TEST function first (above).
#
# No fair running the code of sum_powers to GENERATE
# test cases; that would defeat the purpose of TESTING!
# ------------------------------------------------------------------
total = 0
for k in range(n):
total = total + ((k+1)**p)
return(total)
def run_test_sum_powers_in_range():
""" Tests the sum_powers_in_range function. """
# ------------------------------------------------------------------
# DONE: 4. Implement this function.
# It TESTS the sum_powers_in_range function defined below.
# Include at least ** 3 ** tests.
#
# Use the same 4-step process as in implementing previous
# TEST functions, including the same way to print expected/actual.
# ------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the sum_powers_in_range function:')
print('--------------------------------------------------')
expected = 142.384776
answer = sum_powers_in_range(3,100,0.1)
print('Test 1 expected:', expected)
print(' actual: ', answer)
expected = 2024
answer = sum_powers_in_range(2,10,3)
print('Test 2 expected:', expected)
print(' actual: ', answer)
expected = 40
answer = sum_powers_in_range(6,7,1)
print('Test 3 expected:', expected)
print(' actual: ', answer)
def sum_powers_in_range(m, n, p):
"""
What comes in: Non-negative integers m and n, with n >= m,
and a number p.
What goes out: the sum
m**p + (m+1)**p + (m+2)**p + ... + n**p
for the given numbers m, n and p. The latter may be any number
(possibly a floating point number, and possibly negative).
Side effects: None.
Example:
-- sum_powers_in_range(3, 100, 0.1) returns about 142.384776
"""
# ------------------------------------------------------------------
# DONE: 5. Implement and test this function.
# Note that you should write its TEST function first (above).
#
# No fair running the code of sum_powers_in_range to GENERATE
# test cases; that would defeat the purpose of TESTING!
# ------------------------------------------------------------------
total = 0
for k in range(n-2):
total = total + ((m + k)**p)
return(total)
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
| 37.40625 | 79 | 0.496032 |
def main():
run_test_sum_powers()
run_test_sum_powers_in_range()
def run_test_sum_powers():
print()
print('--------------------------------------------------')
print('Testing the sum_powers function:')
print('--------------------------------------------------')
expected = 3.80826
answer = sum_powers(5,-0.3)
print('Test 1 expected:', expected)
print(' actual: ', answer)
expected = 144.45655
answer = sum_powers(100,0.1)
print('Test 2 expected:', expected)
print(' actual: ', answer)
expected = 1025
answer = sum_powers(2,10)
print('Test 3 expected:', expected)
print(' actual: ', answer)
def sum_powers(n, p):
total = 0
for k in range(n):
total = total + ((k+1)**p)
return(total)
def run_test_sum_powers_in_range():
print()
print('--------------------------------------------------')
print('Testing the sum_powers_in_range function:')
print('--------------------------------------------------')
expected = 142.384776
answer = sum_powers_in_range(3,100,0.1)
print('Test 1 expected:', expected)
print(' actual: ', answer)
expected = 2024
answer = sum_powers_in_range(2,10,3)
print('Test 2 expected:', expected)
print(' actual: ', answer)
expected = 40
answer = sum_powers_in_range(6,7,1)
print('Test 3 expected:', expected)
print(' actual: ', answer)
def sum_powers_in_range(m, n, p):
total = 0
for k in range(n-2):
total = total + ((m + k)**p)
return(total)
main()
| true | true |
1c37aa413333e3d03690186888d312c16dded533 | 2,165 | py | Python | tavern/_plugins/mqtt/request.py | BangWork/tavern | 050308841461894a28b07bd2ece85a9b48ff2df4 | [
"MIT"
] | null | null | null | tavern/_plugins/mqtt/request.py | BangWork/tavern | 050308841461894a28b07bd2ece85a9b48ff2df4 | [
"MIT"
] | null | null | null | tavern/_plugins/mqtt/request.py | BangWork/tavern | 050308841461894a28b07bd2ece85a9b48ff2df4 | [
"MIT"
] | null | null | null | import logging
import json
import functools
from future.utils import raise_from
from box import Box
from tavern.util import exceptions
from tavern.util.dict_util import format_keys, check_expected_keys
from tavern.request.base import BaseRequest
logger = logging.getLogger(__name__)
def get_publish_args(rspec, test_block_config):
"""Format mqtt request args
Todo:
Anything else to do here?
"""
fspec = format_keys(rspec, test_block_config["variables"])
if "json" in rspec:
if "payload" in rspec:
raise exceptions.BadSchemaError(
"Can only specify one of 'payload' or 'json' in MQTT request")
fspec["payload"] = json.dumps(fspec.pop("json"))
return fspec
class MQTTRequest(BaseRequest):
"""Wrapper for a single mqtt request on a client
Similar to RestRequest, publishes a single message.
"""
def __init__(self, client, rspec, test_block_config):
expected = {
"topic",
"payload",
"json",
"qos",
# TODO retain?
}
check_expected_keys(expected, rspec)
publish_args = get_publish_args(rspec, test_block_config)
self._prepared = functools.partial(client.publish, **publish_args)
# Need to do this here because get_publish_args will modify the original
# input, which we might want to use to format. No error handling because
# all the error handling is done in the previous call
self._original_publish_args = format_keys(
rspec, test_block_config["variables"])
# TODO
# From paho:
# > raise TypeError('payload must be a string, bytearray, int, float or None.')
# Need to be able to take all of these somehow, and also match these
# against any payload received on the topic
def run(self):
try:
return self._prepared()
except ValueError as e:
logger.exception("Error publishing")
raise_from(exceptions.MQTTRequestException, e)
@property
def request_vars(self):
return Box(self._original_publish_args)
| 27.405063 | 87 | 0.651732 | import logging
import json
import functools
from future.utils import raise_from
from box import Box
from tavern.util import exceptions
from tavern.util.dict_util import format_keys, check_expected_keys
from tavern.request.base import BaseRequest
logger = logging.getLogger(__name__)
def get_publish_args(rspec, test_block_config):
fspec = format_keys(rspec, test_block_config["variables"])
if "json" in rspec:
if "payload" in rspec:
raise exceptions.BadSchemaError(
"Can only specify one of 'payload' or 'json' in MQTT request")
fspec["payload"] = json.dumps(fspec.pop("json"))
return fspec
class MQTTRequest(BaseRequest):
def __init__(self, client, rspec, test_block_config):
expected = {
"topic",
"payload",
"json",
"qos",
}
check_expected_keys(expected, rspec)
publish_args = get_publish_args(rspec, test_block_config)
self._prepared = functools.partial(client.publish, **publish_args)
self._original_publish_args = format_keys(
rspec, test_block_config["variables"])
def run(self):
try:
return self._prepared()
except ValueError as e:
logger.exception("Error publishing")
raise_from(exceptions.MQTTRequestException, e)
@property
def request_vars(self):
return Box(self._original_publish_args)
| true | true |
1c37aaa572f430e147ce57c950d49214c220c7f1 | 13,738 | py | Python | game/data/scripts/quests/605_AllianceWithKetraOrcs/__init__.py | TheDemonLife/Lineage2Server-Interlude | d23d145db533fd899d4064026e4bc7ee45c6624a | [
"Apache-2.0"
] | 10 | 2019-07-27T13:12:11.000Z | 2022-01-15T19:13:26.000Z | game/data/scripts/quests/605_AllianceWithKetraOrcs/__init__.py | TheDemonLife/Lineage2Server-Interlude | d23d145db533fd899d4064026e4bc7ee45c6624a | [
"Apache-2.0"
] | 1 | 2021-08-06T12:15:01.000Z | 2021-08-09T10:18:47.000Z | game/data/scripts/quests/605_AllianceWithKetraOrcs/__init__.py | TheDemonLife/Lineage2Server-Interlude | d23d145db533fd899d4064026e4bc7ee45c6624a | [
"Apache-2.0"
] | 2 | 2020-02-20T23:02:26.000Z | 2020-11-22T09:27:51.000Z | # Made by Emperorc
# Rate Fix by Gnat
import sys
from ru.catssoftware import Config
from ru.catssoftware.gameserver.model.quest import State
from ru.catssoftware.gameserver.model.quest import QuestState
from ru.catssoftware.gameserver.model.quest.jython import QuestJython as JQuest
qn = "605_AllianceWithKetraOrcs"
#NPC
Wahkan = 31371
#MOB
#mobs for Alliance lvl 1:Varka Silenos- Recruit, Footman, Scout, Hunter, Shaman
Varka_One = [ 21350, 21351, 21353, 21354, 21355 ]
#mobs for Alliance lvl 2 SHOULD BE:Varka Silenos- priests, warriors, mediums, \
#magi, officers, legionnaire captains, and elite escorts
#AS LISTED in npc.sql: Varka Silenos-priests, warriors, mediums, magi, officers;\
#Varka's- Commander, Elite Guard
Varka_Two = [ 21357, 21358, 21360, 21361, 21362, 21369, 21370 ]
#mobs for Alliance lvl 3 and up SHOULD BE:Varka Silenos- great mystics, captains, \
#grand seers, prophets, prophet's disciples, prophet's royal guards, chief magi and chief escorts
#AS LISTED in npc.sql: Varka Silenos-Seer, Great Magus, General, Great Seer,
#Varka's - Head Magus, Head Guard, Prophet, Prophet Guard, and Disciple of Prophet
Varka_Three = [ 21364, 21365, 21366, 21368, 21371, 21372, 21373, 21374, 21375 ]
#All Ketra Orc mobs
Ketra_Orcs = [ 21324, 21325, 21327, 21328, 21329, 21331, 21332, 21334, 21335, \
21336, 21338, 21339, 21340, 21342, 21343, 21344, 21345, 21346, 21347, 21348, 21349 ]
Chance = {
21351:500,#Footman
21366:628,#General
21365:500,#Great Magus
21368:508,#Great Seer
21354:521,#Hunter
21361:518,#Magus
21360:509,#Medium
21362:500,#Officer
21357:500,#Priest
21350:500,#Recruit
21353:509,#Scout
21364:527,#Seer
21355:519,#Shaman
21358:500,#Warrior
21369:518,#Commander
21370:604,#Elite guard
21372:604,#Head guard
21371:627,#Head magus
21374:626,#Prophet Guard
21375:626,#Disciple of Prophet
21373:649#Prophet
}
Chance_mane = {
21366:664,#General
21365:568,#Great Magus
21368:568,#Great Seer
21354:522,#Hunter
21360:539,#Medium
21362:568,#Officer
21357:529,#Priest
21350:500,#Recruit
21353:510,#Scout
21364:558,#Seer
21355:519,#Shaman
21358:529,#Warrior
21369:548,#Commander
21371:713,#Head magus
21373:738#Prophet
}
#Quest Items
Varka_Badge_Soldier, Varka_Badge_Officer, Varka_Badge_Captain = [7216, 7217, 7218]
Ketra_Alliance_One, Ketra_Alliance_Two, Ketra_Alliance_Three, \
Ketra_Alliance_Four, Ketra_Alliance_Five = [7211, 7212, 7213, 7214, 7215]
Varka_Alliance_One, Varka_Alliance_Two, Varka_Alliance_Three, \
Varka_Alliance_Four, Varka_Alliance_Five = [7221, 7222, 7223, 7224, 7225]
Ketra_Badge_Soldier, Ketra_Badge_Officer, Ketra_Badge_Captain = [7226, 7227, 7228]
Valor_Totem, Wisdom_Totem = [ 7219,7220 ]
Mane = 7233
#drop system - cond:[item_id,max,drop_id]
One ={
1:[57,100,Varka_Badge_Soldier],
2:[Ketra_Alliance_One,200,Varka_Badge_Soldier],
3:[Ketra_Alliance_Two,300,Varka_Badge_Soldier],
4:[Ketra_Alliance_Three,300,Varka_Badge_Soldier],
5:[Ketra_Alliance_Four,400,Varka_Badge_Soldier]
}
Two ={
2:[Ketra_Alliance_One,100,Varka_Badge_Officer],
3:[Ketra_Alliance_Two,200,Varka_Badge_Officer],
4:[Ketra_Alliance_Three,300,Varka_Badge_Officer],
5:[Ketra_Alliance_Four,400,Varka_Badge_Officer]
}
Three ={
3:[Ketra_Alliance_Two,100,Varka_Badge_Captain],
4:[Ketra_Alliance_Three,200,Varka_Badge_Captain],
5:[Ketra_Alliance_Four,200,Varka_Badge_Captain]
}
def giveReward(st,item,chance,MAX,drop) :
if st.getQuestItemsCount(item) > 0 :
count = st.getQuestItemsCount(drop)
if count < MAX or drop == Mane :
numItems,chance = divmod(chance*Config.RATE_DROP_QUEST,1000)
if st.getRandom(1000) < chance :
numItems += 1
numItems = int(numItems)
if numItems != 0 :
if count + numItems >= MAX and drop != Mane :
numItems = MAX - count
st.playSound("ItemSound.quest_middle")
elif drop == Mane and int((count+numItems)/100) > int(count/100) :
st.playSound("ItemSound.quest_middle")
else :
st.playSound("ItemSound.quest_itemget")
st.giveItems(drop,numItems)
class Quest (JQuest) :
def __init__(self,id,name,descr):
JQuest.__init__(self,id,name,descr)
self.questItemIds = [Varka_Badge_Soldier, Varka_Badge_Officer, Varka_Badge_Captain]
def onEvent (self,event,st) :
cond = st.getInt("cond")
id = st.getInt("id")
htmltext = event
player = st.getPlayer()
if event == "31371-03a.htm" :
if player.getLevel() >= 74 :
st.set("cond","1")
st.set("id","2")
st.setState(State.STARTED)
st.playSound("ItemSound.quest_accept")
htmltext = "31371-03a.htm"
else :
htmltext = "31371-02b.htm"
st.exitQuest(1)
player.setAllianceWithVarkaKetra(0)
elif event == "31371-10-1.htm" :
htmltext = "31371-10-1.htm"
st.set("id","3")
st.takeItems(Varka_Badge_Soldier, 100)
st.giveItems(Ketra_Alliance_One, 1)
player.setAllianceWithVarkaKetra(1)
st.playSound("ItemSound.quest_middle")
elif event == "31371-10-2.htm" :
htmltext = "31371-10-2.htm"
st.set("id","3")
st.takeItems(Varka_Badge_Soldier, 200)
st.takeItems(Varka_Badge_Officer, 100)
st.takeItems(Ketra_Alliance_One, -1)
st.giveItems(Ketra_Alliance_Two, 1)
player.setAllianceWithVarkaKetra(2)
st.playSound("ItemSound.quest_middle")
elif event == "31371-10-3.htm" :
htmltext = "31371-10-3.htm"
st.set("id","3")
st.takeItems(Varka_Badge_Soldier, 300)
st.takeItems(Varka_Badge_Officer, 200)
st.takeItems(Varka_Badge_Captain, 100)
st.takeItems(Ketra_Alliance_Two, -1)
st.giveItems(Ketra_Alliance_Three, 1)
player.setAllianceWithVarkaKetra(3)
st.playSound("ItemSound.quest_middle")
elif event == "31371-10-4.htm" :
htmltext = "31371-10-4.htm"
st.set("id","3")
st.takeItems(Varka_Badge_Soldier, 300)
st.takeItems(Varka_Badge_Officer, 300)
st.takeItems(Varka_Badge_Captain, 200)
st.takeItems(Ketra_Alliance_Three, -1)
st.takeItems(Valor_Totem,-1)
st.giveItems(Ketra_Alliance_Four, 1)
player.setAllianceWithVarkaKetra(4)
st.playSound("ItemSound.quest_middle")
elif event == "31371-11a.htm" :
htmltext = "31371-11a.htm"
elif event == "31371-19.htm" :
htmltext = "31371-19.htm"
elif event == "31371-11b.htm" :
htmltext = "31371-11b.htm"
elif event == "31371-20.htm" :
htmltext = "31371-20.htm"
st.takeItems(Varka_Badge_Soldier, -1)
st.takeItems(Varka_Badge_Officer, -1)
st.takeItems(Varka_Badge_Captain, -1)
st.takeItems(Ketra_Alliance_One, -1)
st.takeItems(Ketra_Alliance_Two, -1)
st.takeItems(Ketra_Alliance_Three, -1)
st.takeItems(Ketra_Alliance_Four, -1)
st.takeItems(Ketra_Alliance_Five, -1)
st.takeItems(Valor_Totem,-1)
st.takeItems(Wisdom_Totem,-1)
player.setAllianceWithVarkaKetra(0)
st.exitQuest(1)
return htmltext
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not on a quest that involves this NPC, or you don't meet this NPC's minimum quest requirements.</body></html>"
st = player.getQuestState(qn)
if st :
npcId = npc.getNpcId()
cond = st.getInt("cond")
id = st.getInt("id")
VBadgeS = st.getQuestItemsCount(Varka_Badge_Soldier)
VBadgeO = st.getQuestItemsCount(Varka_Badge_Officer)
VBadgeC = st.getQuestItemsCount(Varka_Badge_Captain)
KAlliance1 = st.getQuestItemsCount(Ketra_Alliance_One)
KAlliance2 = st.getQuestItemsCount(Ketra_Alliance_Two)
KAlliance3 = st.getQuestItemsCount(Ketra_Alliance_Three)
KAlliance4 = st.getQuestItemsCount(Ketra_Alliance_Four)
KAlliance5 = st.getQuestItemsCount(Ketra_Alliance_Five)
KAlliance = KAlliance1 + KAlliance2 + KAlliance3 + KAlliance4 + KAlliance5
VAlliance = st.getQuestItemsCount(Varka_Alliance_One) + \
st.getQuestItemsCount(Varka_Alliance_Two) + st.getQuestItemsCount(Varka_Alliance_Three) + \
st.getQuestItemsCount(Varka_Alliance_Four) + st.getQuestItemsCount(Varka_Alliance_Five)
Valor = st.getQuestItemsCount(Valor_Totem)
Wisdom = st.getQuestItemsCount(Wisdom_Totem)
if npcId == Wahkan :
st.set("id","1")
if player.isAlliedWithVarka() or VAlliance :
htmltext= "31371-02a.htm"
st.exitQuest(1)
elif KAlliance == 0 :
if cond != 1 :
htmltext = "31371-01.htm"
else :
st.set("id","2")
if VBadgeS < 100 :
htmltext= "31371-03b.htm"
elif VBadgeS >= 100 :
htmltext = "31371-09.htm"
elif KAlliance :
st.setState(State.STARTED)
st.set("id","2")
if KAlliance1 :
if cond != 2 :
htmltext = "31371-04.htm"
st.set("cond","2")
player.setAllianceWithVarkaKetra(1)
else :
if VBadgeS < 200 or VBadgeO < 100 :
htmltext = "31371-12.htm"
elif VBadgeS >= 200 and VBadgeO >= 100 :
htmltext = "31371-13.htm"
elif KAlliance2 :
if cond != 3 :
htmltext = "31371-05.htm"
st.set("cond","3")
player.setAllianceWithVarkaKetra(2)
else :
if VBadgeS < 300 or VBadgeO < 200 or VBadgeC < 100 :
htmltext = "31371-15.htm"
elif VBadgeS >= 300 and VBadgeO >= 200 and VBadgeC >= 100 :
htmltext = "31371-16.htm"
elif KAlliance3 :
if cond != 4 :
htmltext = "31371-06.htm"
st.set("cond","4")
player.setAllianceWithVarkaKetra(3)
else:
if VBadgeS < 300 or VBadgeO < 300 or VBadgeC < 200 or Valor == 0 :
htmltext = "31371-21.htm"
elif VBadgeS >= 300 and VBadgeO >= 300 and VBadgeC >= 200 and Valor > 0 :
htmltext = "31371-22.htm"
elif KAlliance4 :
if cond != 5 :
htmltext = "31371-07.htm"
st.set("cond","5")
player.setAllianceWithVarkaKetra(4)
else :
if VBadgeS < 400 or VBadgeO < 400 or VBadgeC < 200 or Wisdom == 0 :
htmltext = "31371-17.htm"
elif VBadgeS >= 400 and VBadgeO >= 400 and VBadgeC >= 200 and Wisdom > 0 :
htmltext = "31371-10-5.htm"
st.takeItems(Varka_Badge_Soldier, 400)
st.takeItems(Varka_Badge_Officer, 400)
st.takeItems(Varka_Badge_Captain, 200)
st.takeItems(Ketra_Alliance_Four, -1)
st.takeItems(Wisdom_Totem,-1)
st.giveItems(Ketra_Alliance_Five, 1)
player.setAllianceWithVarkaKetra(5)
st.set("id","3")
st.playSound("ItemSound.quest_middle")
elif KAlliance5 :
if cond != 6 :
htmltext = "31371-18.htm"
st.set("cond","6")
player.setAllianceWithVarkaKetra(5)
else:
htmltext = "31371-08.htm"
return htmltext
def onKill(self,npc,player,isPet):
partyMember = self.getRandomPartyMemberState(player,State.STARTED)
if not partyMember : return
st = partyMember.getQuestState(qn)
if st :
if st.getState() == State.STARTED :
npcId = npc.getNpcId()
cond = st.getInt("cond")
id = st.getInt("id")
st2 = partyMember.getQuestState("606_WarWithVarkaSilenos")
if not partyMember.isAlliedWithVarka() :
if (npcId in Varka_One) or (npcId in Varka_Two) or (npcId in Varka_Three) :
item = 0
if cond <= 5 :
if npcId in Varka_One :
item,MAX,drop = One[cond]
elif npcId in Varka_Two and cond > 1:
item,MAX,drop = Two[cond]
elif npcId in Varka_Three and cond > 2 :
item,MAX,drop = Three[cond]
if item != 0 :
if st.getQuestItemsCount(drop) == MAX :
item = 0
chance = Chance[npcId]
if st2 :
if (st.getRandom(2) == 1 or item == 0) and npcId in Chance_mane.keys() :
item = 57
MAX = 100
drop = Mane
chance = Chance_mane[npcId]
giveReward(st,item,chance,MAX,drop)
elif id == 2 and item != 0 :
giveReward(st,item,chance,MAX,drop)
elif id == 2 and item != 0 :
giveReward(st,item,chance,MAX,drop)
return
QUEST = Quest(605,qn,"Alliance With Ketra Orcs")
QUEST.addStartNpc(Wahkan)
QUEST.addTalkId(Wahkan)
for mobId in Chance.keys() :
QUEST.addKillId(mobId)
for mobId in Ketra_Orcs :
QUEST.addKillId(mobId) | 39.705202 | 153 | 0.59521 |
import sys
from ru.catssoftware import Config
from ru.catssoftware.gameserver.model.quest import State
from ru.catssoftware.gameserver.model.quest import QuestState
from ru.catssoftware.gameserver.model.quest.jython import QuestJython as JQuest
qn = "605_AllianceWithKetraOrcs"
Wahkan = 31371
Varka_One = [ 21350, 21351, 21353, 21354, 21355 ]
Varka_Two = [ 21357, 21358, 21360, 21361, 21362, 21369, 21370 ]
#mobs for Alliance lvl 3 and up SHOULD BE:Varka Silenos- great mystics, captains, \
#grand seers, prophets, prophet's disciples, prophet's royal guards, chief magi and chief escorts
#AS LISTED in npc.sql: Varka Silenos-Seer, Great Magus, General, Great Seer,
#Varka's - Head Magus, Head Guard, Prophet, Prophet Guard, and Disciple of Prophet
Varka_Three = [ 21364, 21365, 21366, 21368, 21371, 21372, 21373, 21374, 21375 ]
Ketra_Orcs = [ 21324, 21325, 21327, 21328, 21329, 21331, 21332, 21334, 21335, \
21336, 21338, 21339, 21340, 21342, 21343, 21344, 21345, 21346, 21347, 21348, 21349 ]
Chance = {
21351:500,
21366:628,
21365:500,
21368:508,
21354:521,
21361:518,
21360:509,
21362:500,
21357:500,
21350:500,
21353:509,
21364:527,
21355:519,
21358:500,
21369:518,
21370:604,
21372:604,
21371:627,
21374:626,
21375:626,
21373:649
}
Chance_mane = {
21366:664,
21365:568,
21368:568,
21354:522,
21360:539,
21362:568,
21357:529,
21350:500,
21353:510,
21364:558,
21355:519,
21358:529,
21369:548,
21371:713,
21373:738
}
Varka_Badge_Soldier, Varka_Badge_Officer, Varka_Badge_Captain = [7216, 7217, 7218]
Ketra_Alliance_One, Ketra_Alliance_Two, Ketra_Alliance_Three, \
Ketra_Alliance_Four, Ketra_Alliance_Five = [7211, 7212, 7213, 7214, 7215]
Varka_Alliance_One, Varka_Alliance_Two, Varka_Alliance_Three, \
Varka_Alliance_Four, Varka_Alliance_Five = [7221, 7222, 7223, 7224, 7225]
Ketra_Badge_Soldier, Ketra_Badge_Officer, Ketra_Badge_Captain = [7226, 7227, 7228]
Valor_Totem, Wisdom_Totem = [ 7219,7220 ]
Mane = 7233
One ={
1:[57,100,Varka_Badge_Soldier],
2:[Ketra_Alliance_One,200,Varka_Badge_Soldier],
3:[Ketra_Alliance_Two,300,Varka_Badge_Soldier],
4:[Ketra_Alliance_Three,300,Varka_Badge_Soldier],
5:[Ketra_Alliance_Four,400,Varka_Badge_Soldier]
}
Two ={
2:[Ketra_Alliance_One,100,Varka_Badge_Officer],
3:[Ketra_Alliance_Two,200,Varka_Badge_Officer],
4:[Ketra_Alliance_Three,300,Varka_Badge_Officer],
5:[Ketra_Alliance_Four,400,Varka_Badge_Officer]
}
Three ={
3:[Ketra_Alliance_Two,100,Varka_Badge_Captain],
4:[Ketra_Alliance_Three,200,Varka_Badge_Captain],
5:[Ketra_Alliance_Four,200,Varka_Badge_Captain]
}
def giveReward(st,item,chance,MAX,drop) :
if st.getQuestItemsCount(item) > 0 :
count = st.getQuestItemsCount(drop)
if count < MAX or drop == Mane :
numItems,chance = divmod(chance*Config.RATE_DROP_QUEST,1000)
if st.getRandom(1000) < chance :
numItems += 1
numItems = int(numItems)
if numItems != 0 :
if count + numItems >= MAX and drop != Mane :
numItems = MAX - count
st.playSound("ItemSound.quest_middle")
elif drop == Mane and int((count+numItems)/100) > int(count/100) :
st.playSound("ItemSound.quest_middle")
else :
st.playSound("ItemSound.quest_itemget")
st.giveItems(drop,numItems)
class Quest (JQuest) :
def __init__(self,id,name,descr):
JQuest.__init__(self,id,name,descr)
self.questItemIds = [Varka_Badge_Soldier, Varka_Badge_Officer, Varka_Badge_Captain]
def onEvent (self,event,st) :
cond = st.getInt("cond")
id = st.getInt("id")
htmltext = event
player = st.getPlayer()
if event == "31371-03a.htm" :
if player.getLevel() >= 74 :
st.set("cond","1")
st.set("id","2")
st.setState(State.STARTED)
st.playSound("ItemSound.quest_accept")
htmltext = "31371-03a.htm"
else :
htmltext = "31371-02b.htm"
st.exitQuest(1)
player.setAllianceWithVarkaKetra(0)
elif event == "31371-10-1.htm" :
htmltext = "31371-10-1.htm"
st.set("id","3")
st.takeItems(Varka_Badge_Soldier, 100)
st.giveItems(Ketra_Alliance_One, 1)
player.setAllianceWithVarkaKetra(1)
st.playSound("ItemSound.quest_middle")
elif event == "31371-10-2.htm" :
htmltext = "31371-10-2.htm"
st.set("id","3")
st.takeItems(Varka_Badge_Soldier, 200)
st.takeItems(Varka_Badge_Officer, 100)
st.takeItems(Ketra_Alliance_One, -1)
st.giveItems(Ketra_Alliance_Two, 1)
player.setAllianceWithVarkaKetra(2)
st.playSound("ItemSound.quest_middle")
elif event == "31371-10-3.htm" :
htmltext = "31371-10-3.htm"
st.set("id","3")
st.takeItems(Varka_Badge_Soldier, 300)
st.takeItems(Varka_Badge_Officer, 200)
st.takeItems(Varka_Badge_Captain, 100)
st.takeItems(Ketra_Alliance_Two, -1)
st.giveItems(Ketra_Alliance_Three, 1)
player.setAllianceWithVarkaKetra(3)
st.playSound("ItemSound.quest_middle")
elif event == "31371-10-4.htm" :
htmltext = "31371-10-4.htm"
st.set("id","3")
st.takeItems(Varka_Badge_Soldier, 300)
st.takeItems(Varka_Badge_Officer, 300)
st.takeItems(Varka_Badge_Captain, 200)
st.takeItems(Ketra_Alliance_Three, -1)
st.takeItems(Valor_Totem,-1)
st.giveItems(Ketra_Alliance_Four, 1)
player.setAllianceWithVarkaKetra(4)
st.playSound("ItemSound.quest_middle")
elif event == "31371-11a.htm" :
htmltext = "31371-11a.htm"
elif event == "31371-19.htm" :
htmltext = "31371-19.htm"
elif event == "31371-11b.htm" :
htmltext = "31371-11b.htm"
elif event == "31371-20.htm" :
htmltext = "31371-20.htm"
st.takeItems(Varka_Badge_Soldier, -1)
st.takeItems(Varka_Badge_Officer, -1)
st.takeItems(Varka_Badge_Captain, -1)
st.takeItems(Ketra_Alliance_One, -1)
st.takeItems(Ketra_Alliance_Two, -1)
st.takeItems(Ketra_Alliance_Three, -1)
st.takeItems(Ketra_Alliance_Four, -1)
st.takeItems(Ketra_Alliance_Five, -1)
st.takeItems(Valor_Totem,-1)
st.takeItems(Wisdom_Totem,-1)
player.setAllianceWithVarkaKetra(0)
st.exitQuest(1)
return htmltext
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not on a quest that involves this NPC, or you don't meet this NPC's minimum quest requirements.</body></html>"
st = player.getQuestState(qn)
if st :
npcId = npc.getNpcId()
cond = st.getInt("cond")
id = st.getInt("id")
VBadgeS = st.getQuestItemsCount(Varka_Badge_Soldier)
VBadgeO = st.getQuestItemsCount(Varka_Badge_Officer)
VBadgeC = st.getQuestItemsCount(Varka_Badge_Captain)
KAlliance1 = st.getQuestItemsCount(Ketra_Alliance_One)
KAlliance2 = st.getQuestItemsCount(Ketra_Alliance_Two)
KAlliance3 = st.getQuestItemsCount(Ketra_Alliance_Three)
KAlliance4 = st.getQuestItemsCount(Ketra_Alliance_Four)
KAlliance5 = st.getQuestItemsCount(Ketra_Alliance_Five)
KAlliance = KAlliance1 + KAlliance2 + KAlliance3 + KAlliance4 + KAlliance5
VAlliance = st.getQuestItemsCount(Varka_Alliance_One) + \
st.getQuestItemsCount(Varka_Alliance_Two) + st.getQuestItemsCount(Varka_Alliance_Three) + \
st.getQuestItemsCount(Varka_Alliance_Four) + st.getQuestItemsCount(Varka_Alliance_Five)
Valor = st.getQuestItemsCount(Valor_Totem)
Wisdom = st.getQuestItemsCount(Wisdom_Totem)
if npcId == Wahkan :
st.set("id","1")
if player.isAlliedWithVarka() or VAlliance :
htmltext= "31371-02a.htm"
st.exitQuest(1)
elif KAlliance == 0 :
if cond != 1 :
htmltext = "31371-01.htm"
else :
st.set("id","2")
if VBadgeS < 100 :
htmltext= "31371-03b.htm"
elif VBadgeS >= 100 :
htmltext = "31371-09.htm"
elif KAlliance :
st.setState(State.STARTED)
st.set("id","2")
if KAlliance1 :
if cond != 2 :
htmltext = "31371-04.htm"
st.set("cond","2")
player.setAllianceWithVarkaKetra(1)
else :
if VBadgeS < 200 or VBadgeO < 100 :
htmltext = "31371-12.htm"
elif VBadgeS >= 200 and VBadgeO >= 100 :
htmltext = "31371-13.htm"
elif KAlliance2 :
if cond != 3 :
htmltext = "31371-05.htm"
st.set("cond","3")
player.setAllianceWithVarkaKetra(2)
else :
if VBadgeS < 300 or VBadgeO < 200 or VBadgeC < 100 :
htmltext = "31371-15.htm"
elif VBadgeS >= 300 and VBadgeO >= 200 and VBadgeC >= 100 :
htmltext = "31371-16.htm"
elif KAlliance3 :
if cond != 4 :
htmltext = "31371-06.htm"
st.set("cond","4")
player.setAllianceWithVarkaKetra(3)
else:
if VBadgeS < 300 or VBadgeO < 300 or VBadgeC < 200 or Valor == 0 :
htmltext = "31371-21.htm"
elif VBadgeS >= 300 and VBadgeO >= 300 and VBadgeC >= 200 and Valor > 0 :
htmltext = "31371-22.htm"
elif KAlliance4 :
if cond != 5 :
htmltext = "31371-07.htm"
st.set("cond","5")
player.setAllianceWithVarkaKetra(4)
else :
if VBadgeS < 400 or VBadgeO < 400 or VBadgeC < 200 or Wisdom == 0 :
htmltext = "31371-17.htm"
elif VBadgeS >= 400 and VBadgeO >= 400 and VBadgeC >= 200 and Wisdom > 0 :
htmltext = "31371-10-5.htm"
st.takeItems(Varka_Badge_Soldier, 400)
st.takeItems(Varka_Badge_Officer, 400)
st.takeItems(Varka_Badge_Captain, 200)
st.takeItems(Ketra_Alliance_Four, -1)
st.takeItems(Wisdom_Totem,-1)
st.giveItems(Ketra_Alliance_Five, 1)
player.setAllianceWithVarkaKetra(5)
st.set("id","3")
st.playSound("ItemSound.quest_middle")
elif KAlliance5 :
if cond != 6 :
htmltext = "31371-18.htm"
st.set("cond","6")
player.setAllianceWithVarkaKetra(5)
else:
htmltext = "31371-08.htm"
return htmltext
def onKill(self,npc,player,isPet):
partyMember = self.getRandomPartyMemberState(player,State.STARTED)
if not partyMember : return
st = partyMember.getQuestState(qn)
if st :
if st.getState() == State.STARTED :
npcId = npc.getNpcId()
cond = st.getInt("cond")
id = st.getInt("id")
st2 = partyMember.getQuestState("606_WarWithVarkaSilenos")
if not partyMember.isAlliedWithVarka() :
if (npcId in Varka_One) or (npcId in Varka_Two) or (npcId in Varka_Three) :
item = 0
if cond <= 5 :
if npcId in Varka_One :
item,MAX,drop = One[cond]
elif npcId in Varka_Two and cond > 1:
item,MAX,drop = Two[cond]
elif npcId in Varka_Three and cond > 2 :
item,MAX,drop = Three[cond]
if item != 0 :
if st.getQuestItemsCount(drop) == MAX :
item = 0
chance = Chance[npcId]
if st2 :
if (st.getRandom(2) == 1 or item == 0) and npcId in Chance_mane.keys() :
item = 57
MAX = 100
drop = Mane
chance = Chance_mane[npcId]
giveReward(st,item,chance,MAX,drop)
elif id == 2 and item != 0 :
giveReward(st,item,chance,MAX,drop)
elif id == 2 and item != 0 :
giveReward(st,item,chance,MAX,drop)
return
QUEST = Quest(605,qn,"Alliance With Ketra Orcs")
QUEST.addStartNpc(Wahkan)
QUEST.addTalkId(Wahkan)
for mobId in Chance.keys() :
QUEST.addKillId(mobId)
for mobId in Ketra_Orcs :
QUEST.addKillId(mobId) | true | true |
1c37ab078af008294ab1a5513be042ef121b6fe1 | 2,517 | py | Python | src/sim/power/PowerModelState.py | majid169/gem5-RFDB | a8950687b8cb6a701a387fca4409ff273facb459 | [
"BSD-3-Clause"
] | 8 | 2020-02-04T23:39:49.000Z | 2021-05-18T14:33:14.000Z | src/sim/power/PowerModelState.py | majid169/gem5-RFDB | a8950687b8cb6a701a387fca4409ff273facb459 | [
"BSD-3-Clause"
] | 2 | 2022-02-13T15:54:43.000Z | 2022-03-22T06:19:28.000Z | src/sim/power/PowerModelState.py | majid169/gem5-RFDB | a8950687b8cb6a701a387fca4409ff273facb459 | [
"BSD-3-Clause"
] | 5 | 2020-04-07T03:38:31.000Z | 2020-11-28T04:03:15.000Z | # Copyright (c) 2016 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: David Guillen Fandos
from m5.SimObject import SimObject
from m5.params import *
# Represents a power model for a simobj
class PowerModelState(SimObject):
type = 'PowerModelState'
cxx_header = "sim/power/power_model.hh"
abstract = True
cxx_class = 'PowerModelState'
@classmethod
def export_methods(cls, code):
code('''
double getDynamicPower() const;
double getStaticPower() const;
''')
| 44.946429 | 72 | 0.779102 |
from m5.SimObject import SimObject
from m5.params import *
class PowerModelState(SimObject):
type = 'PowerModelState'
cxx_header = "sim/power/power_model.hh"
abstract = True
cxx_class = 'PowerModelState'
@classmethod
def export_methods(cls, code):
code('''
double getDynamicPower() const;
double getStaticPower() const;
''')
| true | true |
1c37ab451ed1882f94ed21d6cef55d1de5b3dfb2 | 732 | py | Python | app_one/migrations/0003_auto_20180906_1100.py | ngohoangyell/python-django-cbv-to-do-task | 325ddbacce44baa6b06f50edd93615eb6c281fb9 | [
"MIT"
] | 1 | 2020-03-28T05:41:23.000Z | 2020-03-28T05:41:23.000Z | app_one/migrations/0003_auto_20180906_1100.py | ngohoangyell/python-django-cbv-to-do-task | 325ddbacce44baa6b06f50edd93615eb6c281fb9 | [
"MIT"
] | null | null | null | app_one/migrations/0003_auto_20180906_1100.py | ngohoangyell/python-django-cbv-to-do-task | 325ddbacce44baa6b06f50edd93615eb6c281fb9 | [
"MIT"
] | null | null | null | # Generated by Django 2.1 on 2018-09-06 04:00
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('app_one', '0002_auto_20180906_1100'),
]
operations = [
migrations.AlterField(
model_name='task',
name='ending_date',
field=models.DateField(default=datetime.datetime(2018, 9, 6, 4, 0, 56, 504423, tzinfo=utc)),
),
migrations.AlterField(
model_name='task',
name='starting_date',
field=models.DateField(default=datetime.datetime(2018, 9, 6, 4, 0, 56, 504423, tzinfo=utc)),
),
]
| 28.153846 | 105 | 0.592896 |
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('app_one', '0002_auto_20180906_1100'),
]
operations = [
migrations.AlterField(
model_name='task',
name='ending_date',
field=models.DateField(default=datetime.datetime(2018, 9, 6, 4, 0, 56, 504423, tzinfo=utc)),
),
migrations.AlterField(
model_name='task',
name='starting_date',
field=models.DateField(default=datetime.datetime(2018, 9, 6, 4, 0, 56, 504423, tzinfo=utc)),
),
]
| true | true |
1c37ac4c8498272775d81d61df98f0c918460bcc | 618 | py | Python | molecule/default/tests/test_default.py | avinetworks/ansible-role-avise | cce7e4e1b741601aace902e7a28a8e2e9766df36 | [
"Apache-2.0"
] | 3 | 2016-10-11T16:43:04.000Z | 2016-11-21T16:59:15.000Z | molecule/default/tests/test_default.py | avinetworks/ansible-role-avise | cce7e4e1b741601aace902e7a28a8e2e9766df36 | [
"Apache-2.0"
] | 2 | 2019-09-20T05:52:14.000Z | 2020-11-26T13:56:33.000Z | molecule/default/tests/test_default.py | avinetworks/ansible-role-avise | cce7e4e1b741601aace902e7a28a8e2e9766df36 | [
"Apache-2.0"
] | 5 | 2016-10-11T19:48:37.000Z | 2021-09-26T16:17:10.000Z |
############################################################################
# ========================================================================
# Copyright 2021 VMware, Inc. All rights reserved. VMware Confidential
# ========================================================================
###
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_hosts_file(host):
f = host.file('/etc/hosts')
assert f.exists
assert f.user == 'root'
assert f.group == 'root'
| 28.090909 | 76 | 0.456311 | true | true | |
1c37ac78b3368b2cceda6e3f5b8fcb0fbd3a51ab | 4,500 | py | Python | openstack_dashboard/dashboards/admin/images/tests.py | ameoba/horizon | ff9e367c98a8bb79f10914abffaaa04b0a461819 | [
"Apache-2.0"
] | 2 | 2019-12-29T09:20:13.000Z | 2020-01-01T13:12:34.000Z | openstack_dashboard/dashboards/admin/images/tests.py | yongquanf/horizon | 9aad7fd6f66588fed7c27b720642e47a4a12854b | [
"Apache-2.0"
] | 1 | 2015-03-12T01:03:44.000Z | 2015-03-12T01:03:44.000Z | openstack_dashboard/dashboards/admin/images/tests.py | yongquanf/horizon | 9aad7fd6f66588fed7c27b720642e47a4a12854b | [
"Apache-2.0"
] | 4 | 2015-05-05T08:17:28.000Z | 2020-02-05T10:47:06.000Z | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings # noqa
from django.core.urlresolvers import reverse # noqa
from django import http
from django.test.utils import override_settings # noqa
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.admin.images import tables
class ImageCreateViewTest(test.BaseAdminViewTests):
def test_admin_image_create_view_uses_admin_template(self):
res = self.client.get(
reverse('horizon:admin:images:create'))
self.assertTemplateUsed(res, 'admin/images/create.html')
class ImagesViewTest(test.BaseAdminViewTests):
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_images_list(self):
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None,
paginate=True) \
.AndReturn([self.images.list(),
False])
self.mox.ReplayAll()
res = self.client.get(
reverse('horizon:admin:images:index'))
self.assertTemplateUsed(res, 'admin/images/index.html')
self.assertEqual(len(res.context['images_table'].data),
len(self.images.list()))
@override_settings(API_RESULT_PAGE_SIZE=2)
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_images_list_get_pagination(self):
images = self.images.list()[:5]
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None,
paginate=True) \
.AndReturn([images,
True])
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None,
paginate=True) \
.AndReturn([images[:2],
True])
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=images[2].id,
paginate=True) \
.AndReturn([images[2:4],
True])
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=images[4].id,
paginate=True) \
.AndReturn([images[4:],
True])
self.mox.ReplayAll()
url = reverse('horizon:admin:images:index')
res = self.client.get(url)
# get all
self.assertEqual(len(res.context['images_table'].data),
len(images))
self.assertTemplateUsed(res, 'admin/images/index.html')
res = self.client.get(url)
# get first page with 2 items
self.assertEqual(len(res.context['images_table'].data),
settings.API_RESULT_PAGE_SIZE)
url = "?".join([reverse('horizon:admin:images:index'),
"=".join([tables.AdminImagesTable._meta.pagination_param,
images[2].id])])
res = self.client.get(url)
# get second page (items 2-4)
self.assertEqual(len(res.context['images_table'].data),
settings.API_RESULT_PAGE_SIZE)
url = "?".join([reverse('horizon:admin:images:index'),
"=".join([tables.AdminImagesTable._meta.pagination_param,
images[4].id])])
res = self.client.get(url)
# get third page (item 5)
self.assertEqual(len(res.context['images_table'].data),
1)
| 42.056075 | 78 | 0.555333 |
from django.conf import settings
from django.core.urlresolvers import reverse
from django import http
from django.test.utils import override_settings
from mox import IsA
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.admin.images import tables
class ImageCreateViewTest(test.BaseAdminViewTests):
def test_admin_image_create_view_uses_admin_template(self):
res = self.client.get(
reverse('horizon:admin:images:create'))
self.assertTemplateUsed(res, 'admin/images/create.html')
class ImagesViewTest(test.BaseAdminViewTests):
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_images_list(self):
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None,
paginate=True) \
.AndReturn([self.images.list(),
False])
self.mox.ReplayAll()
res = self.client.get(
reverse('horizon:admin:images:index'))
self.assertTemplateUsed(res, 'admin/images/index.html')
self.assertEqual(len(res.context['images_table'].data),
len(self.images.list()))
@override_settings(API_RESULT_PAGE_SIZE=2)
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_images_list_get_pagination(self):
images = self.images.list()[:5]
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None,
paginate=True) \
.AndReturn([images,
True])
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None,
paginate=True) \
.AndReturn([images[:2],
True])
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=images[2].id,
paginate=True) \
.AndReturn([images[2:4],
True])
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=images[4].id,
paginate=True) \
.AndReturn([images[4:],
True])
self.mox.ReplayAll()
url = reverse('horizon:admin:images:index')
res = self.client.get(url)
self.assertEqual(len(res.context['images_table'].data),
len(images))
self.assertTemplateUsed(res, 'admin/images/index.html')
res = self.client.get(url)
self.assertEqual(len(res.context['images_table'].data),
settings.API_RESULT_PAGE_SIZE)
url = "?".join([reverse('horizon:admin:images:index'),
"=".join([tables.AdminImagesTable._meta.pagination_param,
images[2].id])])
res = self.client.get(url)
self.assertEqual(len(res.context['images_table'].data),
settings.API_RESULT_PAGE_SIZE)
url = "?".join([reverse('horizon:admin:images:index'),
"=".join([tables.AdminImagesTable._meta.pagination_param,
images[4].id])])
res = self.client.get(url)
self.assertEqual(len(res.context['images_table'].data),
1)
| true | true |
1c37ad3c6e85968813e0b668ff632ccc6145eb04 | 1,292 | py | Python | examples/event_handling/legend_picking.py | jbbrokaw/matplotlib | 86ec1b6fc5628bfb2d09797c58d7eed0ca8c2427 | [
"MIT",
"BSD-3-Clause"
] | 16 | 2016-06-14T19:45:35.000Z | 2020-11-30T19:02:58.000Z | lib/mpl_examples/event_handling/legend_picking.py | yingkailiang/matplotlib | 255a79b106c98c1904489afe6a754e4d943179d6 | [
"MIT",
"BSD-3-Clause"
] | 7 | 2015-05-08T19:36:25.000Z | 2015-06-30T15:32:17.000Z | lib/mpl_examples/event_handling/legend_picking.py | yingkailiang/matplotlib | 255a79b106c98c1904489afe6a754e4d943179d6 | [
"MIT",
"BSD-3-Clause"
] | 14 | 2015-10-05T04:15:46.000Z | 2020-06-11T18:06:02.000Z | """
Enable picking on the legend to toggle the legended line on and off
"""
import numpy as np
import matplotlib.pyplot as plt
t = np.arange(0.0, 0.2, 0.1)
y1 = 2*np.sin(2*np.pi*t)
y2 = 4*np.sin(2*np.pi*2*t)
fig, ax = plt.subplots()
ax.set_title('Click on legend line to toggle line on/off')
line1, = ax.plot(t, y1, lw=2, color='red', label='1 HZ')
line2, = ax.plot(t, y2, lw=2, color='blue', label='2 HZ')
leg = ax.legend(loc='upper left', fancybox=True, shadow=True)
leg.get_frame().set_alpha(0.4)
# we will set up a dict mapping legend line to orig line, and enable
# picking on the legend line
lines = [line1, line2]
lined = dict()
for legline, origline in zip(leg.get_lines(), lines):
legline.set_picker(5) # 5 pts tolerance
lined[legline] = origline
def onpick(event):
# on the pick event, find the orig line corresponding to the
# legend proxy line, and toggle the visibility
legline = event.artist
origline = lined[legline]
vis = not origline.get_visible()
origline.set_visible(vis)
# Change the alpha on the line in the legend so we can see what lines
# have been toggled
if vis:
legline.set_alpha(1.0)
else:
legline.set_alpha(0.2)
fig.canvas.draw()
fig.canvas.mpl_connect('pick_event', onpick)
plt.show()
| 28.086957 | 73 | 0.679567 | import numpy as np
import matplotlib.pyplot as plt
t = np.arange(0.0, 0.2, 0.1)
y1 = 2*np.sin(2*np.pi*t)
y2 = 4*np.sin(2*np.pi*2*t)
fig, ax = plt.subplots()
ax.set_title('Click on legend line to toggle line on/off')
line1, = ax.plot(t, y1, lw=2, color='red', label='1 HZ')
line2, = ax.plot(t, y2, lw=2, color='blue', label='2 HZ')
leg = ax.legend(loc='upper left', fancybox=True, shadow=True)
leg.get_frame().set_alpha(0.4)
lines = [line1, line2]
lined = dict()
for legline, origline in zip(leg.get_lines(), lines):
legline.set_picker(5)
lined[legline] = origline
def onpick(event):
legline = event.artist
origline = lined[legline]
vis = not origline.get_visible()
origline.set_visible(vis)
if vis:
legline.set_alpha(1.0)
else:
legline.set_alpha(0.2)
fig.canvas.draw()
fig.canvas.mpl_connect('pick_event', onpick)
plt.show()
| true | true |
1c37ae3f173da72ddbce0e44e1eaf2cc654095f9 | 305 | py | Python | 2018/11/graphics/bitcoin-nov-drop-20181128/graphic_config.py | nprapps/graphics-archive | 97b0ef326b46a959df930f5522d325e537f7a655 | [
"FSFAP"
] | 14 | 2015-05-08T13:41:51.000Z | 2021-02-24T12:34:55.000Z | 2018/11/graphics/bitcoin-nov-drop-20181128/graphic_config.py | nprapps/graphics-archive | 97b0ef326b46a959df930f5522d325e537f7a655 | [
"FSFAP"
] | null | null | null | 2018/11/graphics/bitcoin-nov-drop-20181128/graphic_config.py | nprapps/graphics-archive | 97b0ef326b46a959df930f5522d325e537f7a655 | [
"FSFAP"
] | 7 | 2015-04-04T04:45:54.000Z | 2021-02-18T11:12:48.000Z | #!/usr/bin/env python
import base_filters
COPY_GOOGLE_DOC_KEY = '1FKqTvQtjYjN9LlhCtkO59hUqZ6SbvUHfYpgetAS5sek'
USE_ASSETS = False
# Use these variables to override the default cache timeouts for this graphic
# DEFAULT_MAX_AGE = 20
# ASSETS_MAX_AGE = 300
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
| 21.785714 | 77 | 0.819672 |
import base_filters
COPY_GOOGLE_DOC_KEY = '1FKqTvQtjYjN9LlhCtkO59hUqZ6SbvUHfYpgetAS5sek'
USE_ASSETS = False
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
| true | true |
1c37ae41f6079eccf08a8ecad0c0187105559d92 | 388 | py | Python | test1/wsgi.py | arohom/test1 | 2f8c662fbb347c017aba986e1cd36e2a428bade7 | [
"MIT"
] | 1 | 2019-12-15T16:56:44.000Z | 2019-12-15T16:56:44.000Z | test1/test1/wsgi.py | 1923488289/myfisttwo | a4b30b6944407f3525787eea777c327615e0caa7 | [
"MIT"
] | 87 | 2018-01-06T10:18:31.000Z | 2022-03-11T23:32:30.000Z | test1/test1/wsgi.py | 1923488289/myfisttwo | a4b30b6944407f3525787eea777c327615e0caa7 | [
"MIT"
] | null | null | null | """
WSGI config for test1 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test1.settings")
application = get_wsgi_application()
| 22.823529 | 78 | 0.783505 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test1.settings")
application = get_wsgi_application()
| true | true |
1c37aebef98dc918d1cecaf7e1721cdf921d976a | 3,045 | py | Python | source/main/transcribe.py | SN-18/scrivener | 76ca835b47f84ad231a6b4bf6ab9e212fc6b8724 | [
"MIT"
] | null | null | null | source/main/transcribe.py | SN-18/scrivener | 76ca835b47f84ad231a6b4bf6ab9e212fc6b8724 | [
"MIT"
] | 27 | 2021-10-21T18:39:01.000Z | 2021-11-05T14:17:29.000Z | source/main/transcribe.py | SN-18/scrivener | 76ca835b47f84ad231a6b4bf6ab9e212fc6b8724 | [
"MIT"
] | 2 | 2021-10-30T03:51:33.000Z | 2021-11-30T02:10:49.000Z | """
Copyright (c) 2021 Anshul Patel
This code is licensed under MIT license (see LICENSE.MD for details)
@author: Scrivener
"""
# Import Libraries
from source.main.summarize import Summary
import speech_recognition as sr
import moviepy.editor as mp
from source.helper.split_audio import splitwavaudio
import os
from source.helper.cleanup import Cleanup
from source.main.punctuation import Punctuation
class TranscribeVideo:
"""
A class used to summarize video without Closed Captions
...
Attributes
----------
summary: str
Summary of the video
Methods
-------
transcribe_video:
Generate summary from video
split_init:
Split audio file into multiple small chunks
"""
def __init(self):
self.summary = ""
def transcribe_video(self, ip_path):
"""
Generate summary on punctuated transcript from video without Closed Captions
"""
# Read video input
video = mp.VideoFileClip(ip_path)
# Check if temp directory available
if not os.path.exists(os.getcwd() + "/temp"):
# Create temp directory
os.mkdir("temp")
# Generate audio file for the input video
video.audio.write_audiofile(os.getcwd() + "/temp/temp_audio.wav")
# Call split_init to generate small chunks of audio files
num_of_files = self.split_init()
transcript_text = ""
# Read through all chunks of audio files
for i in range(num_of_files):
recognizer = sr.Recognizer()
# Read single audio file chunk
audio = sr.AudioFile("temp/" + str(i * 2) + "_temp_audio.wav")
# Get audio data
with audio as src:
audio_data = recognizer.record(src)
# Perform speech to text and store the text
transcript_text += recognizer.recognize_google(audio_data)
# Adding punctuation to transcript
punctuated_transcription = Punctuation.add_punctuation_transcript(
transcript_text
)
# Call the summarization script on the punctuated transcript
transcript_summary = Summary(punctuated_transcription)
summary = transcript_summary.summarize_text()
for lines in summary:
print(lines)
# Join summary list with ' '
self.summary = "\n".join(summary)
# Perform clean up to remove temporary files
clean_up = Cleanup()
clean_up.delete_temp_files()
# Return summary
return self.summary
def split_init(self):
"""
Split audio file into multiple small chunks
"""
# Get current working directory
folder = os.getcwd() + "/" + "temp"
file = "temp_audio.wav"
# Call the script to split audio files into smaller files
split_wav = splitwavaudio(folder, file)
num_of_files = split_wav.multiple_split(min_per_split=2)
# Return number of small files created
return num_of_files
| 30.45 | 84 | 0.636453 |
from source.main.summarize import Summary
import speech_recognition as sr
import moviepy.editor as mp
from source.helper.split_audio import splitwavaudio
import os
from source.helper.cleanup import Cleanup
from source.main.punctuation import Punctuation
class TranscribeVideo:
def __init(self):
self.summary = ""
def transcribe_video(self, ip_path):
video = mp.VideoFileClip(ip_path)
if not os.path.exists(os.getcwd() + "/temp"):
os.mkdir("temp")
video.audio.write_audiofile(os.getcwd() + "/temp/temp_audio.wav")
num_of_files = self.split_init()
transcript_text = ""
for i in range(num_of_files):
recognizer = sr.Recognizer()
audio = sr.AudioFile("temp/" + str(i * 2) + "_temp_audio.wav")
with audio as src:
audio_data = recognizer.record(src)
transcript_text += recognizer.recognize_google(audio_data)
punctuated_transcription = Punctuation.add_punctuation_transcript(
transcript_text
)
transcript_summary = Summary(punctuated_transcription)
summary = transcript_summary.summarize_text()
for lines in summary:
print(lines)
self.summary = "\n".join(summary)
clean_up = Cleanup()
clean_up.delete_temp_files()
return self.summary
def split_init(self):
folder = os.getcwd() + "/" + "temp"
file = "temp_audio.wav"
split_wav = splitwavaudio(folder, file)
num_of_files = split_wav.multiple_split(min_per_split=2)
return num_of_files
| true | true |
1c37afebb69ae9547131b21420229a0cdf1df93e | 5,438 | py | Python | SpeakerIdentification.py | LL03-Identity-Dowell/100054-dowellvoiceapp | 391df14aa4d438591bd7f9cb740d1f751b59e419 | [
"Apache-2.0"
] | null | null | null | SpeakerIdentification.py | LL03-Identity-Dowell/100054-dowellvoiceapp | 391df14aa4d438591bd7f9cb740d1f751b59e419 | [
"Apache-2.0"
] | null | null | null | SpeakerIdentification.py | LL03-Identity-Dowell/100054-dowellvoiceapp | 391df14aa4d438591bd7f9cb740d1f751b59e419 | [
"Apache-2.0"
] | 1 | 2021-09-16T09:19:38.000Z | 2021-09-16T09:19:38.000Z | import os
import wave
import time
import pickle
#import pyaudio
import warnings
import numpy as np
import sounddevice as sd
from scipy.io.wavfile import write
from sklearn import preprocessing
from scipy.io.wavfile import read
import python_speech_features as mfcc
from sklearn.mixture import GaussianMixture
warnings.filterwarnings("ignore")
def calculate_delta(array):
rows,cols = array.shape
print(rows)
print(cols)
deltas = np.zeros((rows,20))
N = 2
for i in range(rows):
index = []
j = 1
while j <= N:
if i-j < 0:
first =0
else:
first = i-j
if i+j > rows-1:
second = rows-1
else:
second = i+j
index.append((second,first))
j+=1
deltas[i] = ( array[index[0][0]]-array[index[0][1]] + (2 * (array[index[1][0]]-array[index[1][1]])) ) / 10
return deltas
def extract_features(audio,rate):
mfcc_feature = mfcc.mfcc(audio,rate, 0.025, 0.01,20,nfft = 1200, appendEnergy = True)
mfcc_feature = preprocessing.scale(mfcc_feature)
print(mfcc_feature)
delta = calculate_delta(mfcc_feature)
combined = np.hstack((mfcc_feature,delta))
return combined
def record_audio_train():
Name =(input("Please Enter Your Name:"))
for count in range(5):
freq = 44100
# Recording duration
duration = 10
# Start recorder with the given values
# of duration and sample frequency
recording = sd.rec(int(duration * freq), samplerate=freq, channels=2)
# Record audio for the given number of seconds
print ("recording started")
sd.wait()
print ("recording stopped")
OUTPUT_FILENAME=Name+"-sample"+str(count)+".wav"
WAVE_OUTPUT_FILENAME=os.path.join("training_set",OUTPUT_FILENAME)
trainedfilelist = open("training_set_addition.txt", 'a')
trainedfilelist.write(OUTPUT_FILENAME+"\n")
write(WAVE_OUTPUT_FILENAME, freq, recording)
def record_audio_test():
freq = 44100
# Recording duration
duration = 5
# Start recorder with the given values
# of duration and sample frequency
recording = sd.rec(int(duration * freq), samplerate=freq, channels=2)
# Record audio for the given number of seconds
print ("recording started")
sd.wait()
print ("recording stopped")
OUTPUT_FILENAME="sample.wav"
WAVE_OUTPUT_FILENAME=os.path.join("testing_set",OUTPUT_FILENAME)
trainedfilelist = open("testing_set_addition.txt", 'a')
trainedfilelist.write(OUTPUT_FILENAME+"\n")
write(WAVE_OUTPUT_FILENAME, freq, recording)
def train_model():
source = "/home/sky_walker/Music/spkr2/training_set/"
dest = "/home/sky_walker/Music/spkr2/trained_models/"
train_file = "/home/sky_walker/Music/spkr2/training_set_addition.txt"
file_paths = open(train_file,'r')
count = 1
features = np.asarray(())
for path in file_paths:
path = path.strip()
print(path)
sr,audio = read(source + path)
print(sr)
vector = extract_features(audio,sr)
if features.size == 0:
features = vector
else:
features = np.vstack((features, vector))
if count == 5:
gmm = GaussianMixture(n_components = 6, max_iter = 200, covariance_type='diag',n_init = 3)
gmm.fit(features)
# dumping the trained gaussian model
picklefile = path.split("-")[0]+".gmm"
pickle.dump(gmm,open(dest + picklefile,'wb'))
print('+ modeling completed for speaker:',picklefile," with data point = ",features.shape)
features = np.asarray(())
count = 0
count = count + 1
def test_model():
source = "/home/sky_walker/Music/spkr2/testing_set/"
modelpath = "/home/sky_walker/Music/spkr2/trained_models/"
test_file = "/home/sky_walker/Music/spkr2/testing_set_addition.txt"
file_paths = open(test_file,'r')
gmm_files = [os.path.join(modelpath,fname) for fname in
os.listdir(modelpath) if fname.endswith('.gmm')]
#Load the Gaussian gender Models
models = [pickle.load(open(fname,'rb')) for fname in gmm_files]
speakers = [fname.split("/")[-1].split(".gmm")[0] for fname
in gmm_files]
# Read the test directory and get the list of test audio files
for path in file_paths:
path = path.strip()
print(path)
sr,audio = read(source + path)
vector = extract_features(audio,sr)
log_likelihood = np.zeros(len(models))
for i in range(len(models)):
gmm = models[i] #checking with each model one by one
scores = np.array(gmm.score(vector))
log_likelihood[i] = scores.sum()
winner = np.argmax(log_likelihood)
print("\tdetected as - ", speakers[winner])
time.sleep(1.0)
#choice=int(input("\n1.Record audio for training \n 2.Train Model \n 3.Record audio for testing \n 4.Test Model\n"))
while True:
choice=int(input("\n 1.Record audio for training \n 2.Train Model \n 3.Record audio for testing \n 4.Test Model\n"))
if(choice==1):
record_audio_train()
elif(choice==2):
train_model()
elif(choice==3):
record_audio_test()
elif(choice==4):
test_model()
if(choice>4):
exit()
| 29.394595 | 118 | 0.624127 | import os
import wave
import time
import pickle
import warnings
import numpy as np
import sounddevice as sd
from scipy.io.wavfile import write
from sklearn import preprocessing
from scipy.io.wavfile import read
import python_speech_features as mfcc
from sklearn.mixture import GaussianMixture
warnings.filterwarnings("ignore")
def calculate_delta(array):
rows,cols = array.shape
print(rows)
print(cols)
deltas = np.zeros((rows,20))
N = 2
for i in range(rows):
index = []
j = 1
while j <= N:
if i-j < 0:
first =0
else:
first = i-j
if i+j > rows-1:
second = rows-1
else:
second = i+j
index.append((second,first))
j+=1
deltas[i] = ( array[index[0][0]]-array[index[0][1]] + (2 * (array[index[1][0]]-array[index[1][1]])) ) / 10
return deltas
def extract_features(audio,rate):
mfcc_feature = mfcc.mfcc(audio,rate, 0.025, 0.01,20,nfft = 1200, appendEnergy = True)
mfcc_feature = preprocessing.scale(mfcc_feature)
print(mfcc_feature)
delta = calculate_delta(mfcc_feature)
combined = np.hstack((mfcc_feature,delta))
return combined
def record_audio_train():
Name =(input("Please Enter Your Name:"))
for count in range(5):
freq = 44100
duration = 10
recording = sd.rec(int(duration * freq), samplerate=freq, channels=2)
print ("recording started")
sd.wait()
print ("recording stopped")
OUTPUT_FILENAME=Name+"-sample"+str(count)+".wav"
WAVE_OUTPUT_FILENAME=os.path.join("training_set",OUTPUT_FILENAME)
trainedfilelist = open("training_set_addition.txt", 'a')
trainedfilelist.write(OUTPUT_FILENAME+"\n")
write(WAVE_OUTPUT_FILENAME, freq, recording)
def record_audio_test():
freq = 44100
duration = 5
recording = sd.rec(int(duration * freq), samplerate=freq, channels=2)
print ("recording started")
sd.wait()
print ("recording stopped")
OUTPUT_FILENAME="sample.wav"
WAVE_OUTPUT_FILENAME=os.path.join("testing_set",OUTPUT_FILENAME)
trainedfilelist = open("testing_set_addition.txt", 'a')
trainedfilelist.write(OUTPUT_FILENAME+"\n")
write(WAVE_OUTPUT_FILENAME, freq, recording)
def train_model():
source = "/home/sky_walker/Music/spkr2/training_set/"
dest = "/home/sky_walker/Music/spkr2/trained_models/"
train_file = "/home/sky_walker/Music/spkr2/training_set_addition.txt"
file_paths = open(train_file,'r')
count = 1
features = np.asarray(())
for path in file_paths:
path = path.strip()
print(path)
sr,audio = read(source + path)
print(sr)
vector = extract_features(audio,sr)
if features.size == 0:
features = vector
else:
features = np.vstack((features, vector))
if count == 5:
gmm = GaussianMixture(n_components = 6, max_iter = 200, covariance_type='diag',n_init = 3)
gmm.fit(features)
picklefile = path.split("-")[0]+".gmm"
pickle.dump(gmm,open(dest + picklefile,'wb'))
print('+ modeling completed for speaker:',picklefile," with data point = ",features.shape)
features = np.asarray(())
count = 0
count = count + 1
def test_model():
source = "/home/sky_walker/Music/spkr2/testing_set/"
modelpath = "/home/sky_walker/Music/spkr2/trained_models/"
test_file = "/home/sky_walker/Music/spkr2/testing_set_addition.txt"
file_paths = open(test_file,'r')
gmm_files = [os.path.join(modelpath,fname) for fname in
os.listdir(modelpath) if fname.endswith('.gmm')]
models = [pickle.load(open(fname,'rb')) for fname in gmm_files]
speakers = [fname.split("/")[-1].split(".gmm")[0] for fname
in gmm_files]
for path in file_paths:
path = path.strip()
print(path)
sr,audio = read(source + path)
vector = extract_features(audio,sr)
log_likelihood = np.zeros(len(models))
for i in range(len(models)):
gmm = models[i]
scores = np.array(gmm.score(vector))
log_likelihood[i] = scores.sum()
winner = np.argmax(log_likelihood)
print("\tdetected as - ", speakers[winner])
time.sleep(1.0)
while True:
choice=int(input("\n 1.Record audio for training \n 2.Train Model \n 3.Record audio for testing \n 4.Test Model\n"))
if(choice==1):
record_audio_train()
elif(choice==2):
train_model()
elif(choice==3):
record_audio_test()
elif(choice==4):
test_model()
if(choice>4):
exit()
| true | true |
1c37b0d7ea55b6b89fcebfe22cb902e7179ecae3 | 2,733 | py | Python | shoppingtrends/data.py | jhooey/shopping-cart-trends | e2ee65c2cd1f95942000175479a6666459dff854 | [
"BSD-3-Clause"
] | 1 | 2015-01-04T17:02:43.000Z | 2015-01-04T17:02:43.000Z | shoppingtrends/data.py | jhooey/shopping-cart-trends | e2ee65c2cd1f95942000175479a6666459dff854 | [
"BSD-3-Clause"
] | null | null | null | shoppingtrends/data.py | jhooey/shopping-cart-trends | e2ee65c2cd1f95942000175479a6666459dff854 | [
"BSD-3-Clause"
] | null | null | null | from localization import Province, Country, Store
from user import User
from receipt import Receipt, Item, Category
import datetime
def populate_all_tables(session):
populate_provinces_tbl(session)
def populate_provinces_tbl(session):
canada = Country("CAN", "Canada")
ontario = Province('Ontario','ON', 13)
quebec = Province('Quebec','QC', 14.975)
canada.provinces = [Province('Alberta','AB', 5),
Province('British Columbia','BC', 12),
Province('Manitoba','MB', 13),
Province('New Brunswick','NB', 13),
Province('Newfoundland and Labrador','NL', 13),
Province('Northwest Territories','NT', 5),
Province('Nova Scotia','NS', 15),
Province('Nunavut','NU', 5),
ontario,
Province('Prince Edward Island','PE', 14),
quebec,
Province('Saskatchewan','SK', 10),
Province('Yukon','YT', 5)
]
session.add(canada)
#Create test user
jhooey = User("Jacob", "Hooey", "jhooey", "password")
#Create test Stores
loblaws = Store("Loblaws", "Rideau and Nelson", ontario)
Maxi = Store("Maxi", "Hull St. Joseph", quebec)
herbspice = Store("Herb and Spice Shop", "375 Bank Street", ontario)
#Create test Receipts
loblaws_receipt1 = Receipt(loblaws)
loblaws_receipt2 = Receipt(loblaws, datetime.date.fromordinal(datetime.date.today().toordinal()-1))
loblaws_receipt3 = Receipt(loblaws, datetime.date.fromordinal(datetime.date.today().toordinal()-4))
#Create Test Items
bananas = Item('Bananas', 'yellow fruit', False)
napkins = Item('Napkins', 'paper napkins', True)
#Add items to receipts
loblaws_receipt1.add_item(session, bananas, 2, 0.79)
loblaws_receipt1.add_item(session, napkins, 1, 2.99)
loblaws_receipt2.add_item(session, bananas, 1.54, 0.79)
loblaws_receipt3.add_item(session, bananas, 10.2, 0.59)
loblaws_receipt3.add_item(session, napkins, 3, 1.99)
#Add Receipts to test user
jhooey.add_receipt(loblaws_receipt1)
jhooey.add_receipt(loblaws_receipt2)
jhooey.add_receipt(loblaws_receipt3)
session.add_all([
loblaws,
Maxi,
herbspice,
jhooey,
bananas,
napkins,
Category('Food', 'Stuff you eat'),
Category('Household Supplies', "Stuff you don't eat")
],
)
session.commit()
| 34.594937 | 103 | 0.564581 | from localization import Province, Country, Store
from user import User
from receipt import Receipt, Item, Category
import datetime
def populate_all_tables(session):
populate_provinces_tbl(session)
def populate_provinces_tbl(session):
canada = Country("CAN", "Canada")
ontario = Province('Ontario','ON', 13)
quebec = Province('Quebec','QC', 14.975)
canada.provinces = [Province('Alberta','AB', 5),
Province('British Columbia','BC', 12),
Province('Manitoba','MB', 13),
Province('New Brunswick','NB', 13),
Province('Newfoundland and Labrador','NL', 13),
Province('Northwest Territories','NT', 5),
Province('Nova Scotia','NS', 15),
Province('Nunavut','NU', 5),
ontario,
Province('Prince Edward Island','PE', 14),
quebec,
Province('Saskatchewan','SK', 10),
Province('Yukon','YT', 5)
]
session.add(canada)
jhooey = User("Jacob", "Hooey", "jhooey", "password")
loblaws = Store("Loblaws", "Rideau and Nelson", ontario)
Maxi = Store("Maxi", "Hull St. Joseph", quebec)
herbspice = Store("Herb and Spice Shop", "375 Bank Street", ontario)
loblaws_receipt1 = Receipt(loblaws)
loblaws_receipt2 = Receipt(loblaws, datetime.date.fromordinal(datetime.date.today().toordinal()-1))
loblaws_receipt3 = Receipt(loblaws, datetime.date.fromordinal(datetime.date.today().toordinal()-4))
bananas = Item('Bananas', 'yellow fruit', False)
napkins = Item('Napkins', 'paper napkins', True)
loblaws_receipt1.add_item(session, bananas, 2, 0.79)
loblaws_receipt1.add_item(session, napkins, 1, 2.99)
loblaws_receipt2.add_item(session, bananas, 1.54, 0.79)
loblaws_receipt3.add_item(session, bananas, 10.2, 0.59)
loblaws_receipt3.add_item(session, napkins, 3, 1.99)
jhooey.add_receipt(loblaws_receipt1)
jhooey.add_receipt(loblaws_receipt2)
jhooey.add_receipt(loblaws_receipt3)
session.add_all([
loblaws,
Maxi,
herbspice,
jhooey,
bananas,
napkins,
Category('Food', 'Stuff you eat'),
Category('Household Supplies', "Stuff you don't eat")
],
)
session.commit()
| true | true |
1c37b17225300688a6ee6cd36fb9cade85c157be | 2,612 | py | Python | server/tmparser.py | averyhiebert/groundstation | 6df5dbbe83c0621f1adfef1f04bbcf098bb30c79 | [
"MIT"
] | 3 | 2018-07-01T19:21:22.000Z | 2020-09-28T05:52:47.000Z | server/tmparser.py | averyhiebert/groundstation | 6df5dbbe83c0621f1adfef1f04bbcf098bb30c79 | [
"MIT"
] | null | null | null | server/tmparser.py | averyhiebert/groundstation | 6df5dbbe83c0621f1adfef1f04bbcf098bb30c79 | [
"MIT"
] | 1 | 2018-06-16T04:47:40.000Z | 2018-06-16T04:47:40.000Z | import re
import json
import time
def decode_ll(s,is_lat=True):
# s is a 4-character string containing compressed lat or lon data as
# described in the APRS documentation.
# is_lat is True for latitude and False for longitude.
#
# Return value is in decimal degrees, or None if not a number.
if s == "NN!!":
return None
# Sorry for lack of readability here. Basically, the value is represented
# in base 91 using ascii characters.
# See page 38 of http://www.aprs.org/doc/APRS101.PDF
base_91 = sum((ord(c) - 33)*(91**i) for i,c in enumerate(reversed(s)))
if is_lat:
return 90 - base_91*1.0/380926
return -180 + base_91*1.0/190463
def decode_alt(alt):
# Decode 2-character string into altitude.
# (string contains log (base 1.002) of altitude in feet, represented in
# base 91 using ascii characters.)
# (See page 40 of http://www.aprs.org/doc/APRS101.PDF)
return 1.002**((ord(alt[0])-33)*91 + ord(alt[1]) - 33)
#Parse a line of APRS data from the telemetrum
def parseTM(line):
# As with the BRB parser, I can't guarantee that this works with all
# possible configuration options and not just those that we've tested
# with.
#
# In the future, if someone wants to make a parser that works for all
# valid APRS signals containing lat/lon/alt data, possibly by
# introducing another dependency, go for it.
#
# APRS format documentation can be found here:
# http://www.aprs.org/doc/APRS101.PDF
# TeleMetrum uses "Compressed Position Data" with Compressed Altitude
data = {}
data["raw"] = line
data["timestring"] = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime())
data["timestamp"] = time.time()*1000 #Gives ms as a floating point.
regex = ":!/(?P<lat>....)(?P<lon>....)'(?P<alt>..)Q(?P<info>.*)"
m = re.search(regex,line)
if(m):
data["info"] = m.group("info") # Includes continuity, GPS lock, etc.
data["error"] = False
lat = decode_ll(m.group("lat"),is_lat=True)
lon = decode_ll(m.group("lon"),is_lat=False)
alt = decode_alt(m.group("alt"))
if lat == None or lon == None:
data["error"] = True
if data["info"][0] == "U":
data["errorMessage"] = "No GPS lock."
else:
data["errorMessage"] = "Unknown parsing error."
data["latitude"] = lat
data["longitude"] = lon
data["altitude"] = alt
return json.dumps(data)
else:
raise RuntimeError("Error parsing TeleMetrum data.")
| 36.788732 | 77 | 0.613323 | import re
import json
import time
def decode_ll(s,is_lat=True):
if s == "NN!!":
return None
base_91 = sum((ord(c) - 33)*(91**i) for i,c in enumerate(reversed(s)))
if is_lat:
return 90 - base_91*1.0/380926
return -180 + base_91*1.0/190463
def decode_alt(alt):
return 1.002**((ord(alt[0])-33)*91 + ord(alt[1]) - 33)
def parseTM(line):
# possible configuration options and not just those that we've tested
data = {}
data["raw"] = line
data["timestring"] = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime())
data["timestamp"] = time.time()*1000
regex = ":!/(?P<lat>....)(?P<lon>....)'(?P<alt>..)Q(?P<info>.*)"
m = re.search(regex,line)
if(m):
data["info"] = m.group("info") # Includes continuity, GPS lock, etc.
data["error"] = False
lat = decode_ll(m.group("lat"),is_lat=True)
lon = decode_ll(m.group("lon"),is_lat=False)
alt = decode_alt(m.group("alt"))
if lat == None or lon == None:
data["error"] = True
if data["info"][0] == "U":
data["errorMessage"] = "No GPS lock."
else:
data["errorMessage"] = "Unknown parsing error."
data["latitude"] = lat
data["longitude"] = lon
data["altitude"] = alt
return json.dumps(data)
else:
raise RuntimeError("Error parsing TeleMetrum data.")
| true | true |
1c37b1be2c3811d36040ad7c8fa8645101838750 | 1,213 | py | Python | openstack_dashboard/test/integration_tests/tests/test_login.py | hemantsonawane95/horizon-apelby | 01a5e72219aeca8c1451701ee85e232ed0618751 | [
"Apache-2.0"
] | 930 | 2015-01-04T08:06:03.000Z | 2022-03-13T18:47:13.000Z | openstack_dashboard/test/integration_tests/tests/test_login.py | hemantsonawane95/horizon-apelby | 01a5e72219aeca8c1451701ee85e232ed0618751 | [
"Apache-2.0"
] | 106 | 2019-01-18T03:06:55.000Z | 2019-11-29T05:06:18.000Z | openstack_dashboard/test/integration_tests/tests/test_login.py | hemantsonawane95/horizon-apelby | 01a5e72219aeca8c1451701ee85e232ed0618751 | [
"Apache-2.0"
] | 1,040 | 2015-01-01T18:48:28.000Z | 2022-03-19T08:35:18.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests import helpers
from openstack_dashboard.test.integration_tests.pages import loginpage
class TestLogin(helpers.BaseTestCase):
"""This is a basic scenario test:
* checks that the login page is available
* logs in as a regular user
* checks that the user home page loads without error
"""
def test_login(self):
login_pg = loginpage.LoginPage(self.driver, self.CONFIG)
login_pg.go_to_login_page()
home_pg = login_pg.login()
if not home_pg.is_logged_in:
self.fail("Could not determine if logged in")
home_pg.log_out()
| 39.129032 | 78 | 0.716406 |
from openstack_dashboard.test.integration_tests import helpers
from openstack_dashboard.test.integration_tests.pages import loginpage
class TestLogin(helpers.BaseTestCase):
def test_login(self):
login_pg = loginpage.LoginPage(self.driver, self.CONFIG)
login_pg.go_to_login_page()
home_pg = login_pg.login()
if not home_pg.is_logged_in:
self.fail("Could not determine if logged in")
home_pg.log_out()
| true | true |
1c37b27ecc836f268bc8e919c2f06e85513de2ea | 3,804 | py | Python | carculator_truck/geomap.py | romainsacchi/carculator_truck | 2c709ac6a956570a56ad2778619aef457e8d42a2 | [
"BSD-3-Clause"
] | 7 | 2021-03-19T12:28:18.000Z | 2022-02-22T11:13:08.000Z | carculator_truck/geomap.py | romainsacchi/carculator_truck | 2c709ac6a956570a56ad2778619aef457e8d42a2 | [
"BSD-3-Clause"
] | 1 | 2021-05-21T09:14:53.000Z | 2021-05-27T09:23:29.000Z | carculator_truck/geomap.py | romainsacchi/carculator_truck | 2c709ac6a956570a56ad2778619aef457e8d42a2 | [
"BSD-3-Clause"
] | 1 | 2022-02-22T11:13:00.000Z | 2022-02-22T11:13:00.000Z | from wurst.geo import geomatcher
from . import DATA_DIR
REGION_MAPPING_FILEPATH = DATA_DIR / "regionmappingH12.csv"
class Geomap:
"""
Map ecoinvent locations to IAM regions and vice-versa.
"""
def __init__(self):
self.geo = self.get_IAM_geomatcher()
@staticmethod
def get_IAM_geomatcher():
"""
Geographical boundaries for IMAGE regions are initally included in geomatcher.
However, they are not properly labelled.
"""
d_image_regions = {
"BRA": "Brazil",
"CAN": "Canada",
"CEU": "Central Europe",
"CHN": "China Region",
"EAF": "Eastern Africa",
"INDIA": "India",
"INDO": "Indonesia Region",
"JAP": "Japan",
"KOR": "Korea Region",
"ME": "Middle east",
"MEX": "Mexico",
"NAF": "Northern Africa",
"OCE": "Oceania",
"RCAM": "Central America",
"RSAF": "Rest of Southern Africa",
"RSAM": "Rest of South America",
"RSAS": "Rest of South Asia",
"RUS": "Russia Region",
"SAF": "South Africa",
"SEAS": "South Asia",
"STAN": "Central Asia",
"TUR": "Turkey",
"UKR": "Ukraine region",
"USA": "USA",
"WAF": "Western Africa",
"WEU": "Western Europe",
}
d_map = {("IMAGE", v): ("IMAGE", k) for k, v in d_image_regions.items()}
new_def = dict()
for k, v in geomatcher.items():
if isinstance(k, tuple):
if k[0] == "IMAGE" and k[1] in list(d_image_regions.values()):
new_def[d_map[k]] = v
geo = geomatcher
for k in list(geomatcher.keys()):
if k[0] == "IMAGE" and k[1] in list(d_image_regions.values()):
geomatcher.pop(k)
geo.update(new_def)
with open(REGION_MAPPING_FILEPATH) as f:
f.readline()
csv_list = [[val.strip() for val in r.split(";")] for r in f.readlines()]
split_row = [(x[1], x[2]) for x in csv_list]
# List of countries not found
countries_not_found = ["CC", "CX", "GG", "JE", "BL"]
rmnd_to_iso = {}
iso_to_rmnd = {}
# Build a dictionary that maps region names (used by REMIND) to ISO country codes
# And a reverse dictionary that maps ISO country codes to region names
for ISO, region in split_row:
if ISO not in countries_not_found:
try:
rmnd_to_iso[region].append(ISO)
except KeyError:
rmnd_to_iso[region] = [ISO]
iso_to_rmnd[region] = ISO
geo.add_definitions(rmnd_to_iso, "REMIND")
return geo
def iam_to_ecoinvent_location(self, location, contained=False):
"""
Find the corresponding ecoinvent region given an IAM region.
:param location: name of a IAM region
:type location: str
:return: name of an ecoinvent region
:rtype: str
"""
if location == "World":
return ["GLO"]
ecoinvent_locations = []
searchfunc = self.geo.contained if contained else self.geo.intersects
for iam in ("REMIND", "IMAGE"):
loc = (iam, location)
try:
searchfunc(loc)
for r in searchfunc(loc):
if not isinstance(r, tuple):
ecoinvent_locations.append(r)
except KeyError:
pass
if len(ecoinvent_locations) == 0:
print("Can't find location {} using the geomatcher.".format(location))
return ecoinvent_locations
| 29.261538 | 89 | 0.52103 | from wurst.geo import geomatcher
from . import DATA_DIR
REGION_MAPPING_FILEPATH = DATA_DIR / "regionmappingH12.csv"
class Geomap:
def __init__(self):
self.geo = self.get_IAM_geomatcher()
@staticmethod
def get_IAM_geomatcher():
d_image_regions = {
"BRA": "Brazil",
"CAN": "Canada",
"CEU": "Central Europe",
"CHN": "China Region",
"EAF": "Eastern Africa",
"INDIA": "India",
"INDO": "Indonesia Region",
"JAP": "Japan",
"KOR": "Korea Region",
"ME": "Middle east",
"MEX": "Mexico",
"NAF": "Northern Africa",
"OCE": "Oceania",
"RCAM": "Central America",
"RSAF": "Rest of Southern Africa",
"RSAM": "Rest of South America",
"RSAS": "Rest of South Asia",
"RUS": "Russia Region",
"SAF": "South Africa",
"SEAS": "South Asia",
"STAN": "Central Asia",
"TUR": "Turkey",
"UKR": "Ukraine region",
"USA": "USA",
"WAF": "Western Africa",
"WEU": "Western Europe",
}
d_map = {("IMAGE", v): ("IMAGE", k) for k, v in d_image_regions.items()}
new_def = dict()
for k, v in geomatcher.items():
if isinstance(k, tuple):
if k[0] == "IMAGE" and k[1] in list(d_image_regions.values()):
new_def[d_map[k]] = v
geo = geomatcher
for k in list(geomatcher.keys()):
if k[0] == "IMAGE" and k[1] in list(d_image_regions.values()):
geomatcher.pop(k)
geo.update(new_def)
with open(REGION_MAPPING_FILEPATH) as f:
f.readline()
csv_list = [[val.strip() for val in r.split(";")] for r in f.readlines()]
split_row = [(x[1], x[2]) for x in csv_list]
countries_not_found = ["CC", "CX", "GG", "JE", "BL"]
rmnd_to_iso = {}
iso_to_rmnd = {}
for ISO, region in split_row:
if ISO not in countries_not_found:
try:
rmnd_to_iso[region].append(ISO)
except KeyError:
rmnd_to_iso[region] = [ISO]
iso_to_rmnd[region] = ISO
geo.add_definitions(rmnd_to_iso, "REMIND")
return geo
def iam_to_ecoinvent_location(self, location, contained=False):
if location == "World":
return ["GLO"]
ecoinvent_locations = []
searchfunc = self.geo.contained if contained else self.geo.intersects
for iam in ("REMIND", "IMAGE"):
loc = (iam, location)
try:
searchfunc(loc)
for r in searchfunc(loc):
if not isinstance(r, tuple):
ecoinvent_locations.append(r)
except KeyError:
pass
if len(ecoinvent_locations) == 0:
print("Can't find location {} using the geomatcher.".format(location))
return ecoinvent_locations
| true | true |
1c37b83833807df30f1f7fed748b6fe7b6d22bf5 | 1,321 | py | Python | plugins/explain.py | random-access7/corobo | 5e517e3ca677e1465a1003307dfe0a755fc48cfb | [
"MIT"
] | null | null | null | plugins/explain.py | random-access7/corobo | 5e517e3ca677e1465a1003307dfe0a755fc48cfb | [
"MIT"
] | null | null | null | plugins/explain.py | random-access7/corobo | 5e517e3ca677e1465a1003307dfe0a755fc48cfb | [
"MIT"
] | null | null | null | import re
import glob
import os.path
from errbot import BotPlugin, re_botcmd
from errbot.templating import tenv
class Explain(BotPlugin):
"""
Explain various terms
"""
files = glob.glob('plugins/templates/explain/*.jinja2.md')
KNOWN_KEYS = []
for fname in files:
KNOWN_KEYS.append(fname.replace(
'plugins/templates/explain/', ''
).replace('.jinja2.md', ''))
ERROR_MSG = (
'Sorry, I only know about these things:\n- ' +
'\n- '.join(KNOWN_KEYS)
)
@re_botcmd(pattern=r'^explain\s+(\w+)(?:\s+to\s+@?([\w-]+))?$',
re_cmd_name_help='explain <term>',
flags=re.IGNORECASE)
def explain(self, msg, match):
"""Explain various terms.""" # Ignore QuotesBear
user = msg.frm.nick
response = ''
filename = 'explain/{}.jinja2.md'.format(match.group(1).lower())
if match.group(1).lower() in self.KNOWN_KEYS:
if match.group(2):
response += '@{}: \n'.format(match.group(2))
response += tenv().get_template(filename).render(
username=user,
target=match.group(2),
bot_prefix=self.bot_config.BOT_PREFIX,
)
else:
response = self.ERROR_MSG
return response
| 28.717391 | 72 | 0.557154 | import re
import glob
import os.path
from errbot import BotPlugin, re_botcmd
from errbot.templating import tenv
class Explain(BotPlugin):
files = glob.glob('plugins/templates/explain/*.jinja2.md')
KNOWN_KEYS = []
for fname in files:
KNOWN_KEYS.append(fname.replace(
'plugins/templates/explain/', ''
).replace('.jinja2.md', ''))
ERROR_MSG = (
'Sorry, I only know about these things:\n- ' +
'\n- '.join(KNOWN_KEYS)
)
@re_botcmd(pattern=r'^explain\s+(\w+)(?:\s+to\s+@?([\w-]+))?$',
re_cmd_name_help='explain <term>',
flags=re.IGNORECASE)
def explain(self, msg, match):
user = msg.frm.nick
response = ''
filename = 'explain/{}.jinja2.md'.format(match.group(1).lower())
if match.group(1).lower() in self.KNOWN_KEYS:
if match.group(2):
response += '@{}: \n'.format(match.group(2))
response += tenv().get_template(filename).render(
username=user,
target=match.group(2),
bot_prefix=self.bot_config.BOT_PREFIX,
)
else:
response = self.ERROR_MSG
return response
| true | true |
1c37b83b83f4feed583f6c83a2af67c37998c475 | 1,350 | py | Python | Cloud9.dev2/src/third_party/gyp/test/defines/gyptest-defines-env-regyp.py | twang15/BarrierFinder | c20ff99ffeeeabc1508682bc99ffb4c7659e7e9f | [
"MIT"
] | 3 | 2019-02-12T04:14:39.000Z | 2020-11-05T08:46:20.000Z | tools/gyp/test/defines/gyptest-defines-env-regyp.py | kans/birgo | d9aca7356933c4bb95f5649353acbc95e3083a57 | [
"Apache-2.0"
] | null | null | null | tools/gyp/test/defines/gyptest-defines-env-regyp.py | kans/birgo | d9aca7356933c4bb95f5649353acbc95e3083a57 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies build of an executable with C++ define specified by a gyp define, and
the use of the environment during regeneration when the gyp file changes.
"""
import os
import TestGyp
# Regenerating build files when a gyp file changes is currently only supported
# by the make generator.
test = TestGyp.TestGyp(formats=['make'])
try:
os.environ['GYP_DEFINES'] = 'value=50'
test.run_gyp('defines.gyp')
finally:
# We clear the environ after calling gyp. When the auto-regeneration happens,
# the same define should be reused anyway. Reset to empty string first in
# case the platform doesn't support unsetenv.
os.environ['GYP_DEFINES'] = ''
del os.environ['GYP_DEFINES']
test.build('defines.gyp')
expect = """\
FOO is defined
VALUE is 1
2*PAREN_VALUE is 12
HASH_VALUE is a#1
"""
test.run_built_executable('defines', stdout=expect)
# Sleep so that the changed gyp file will have a newer timestamp than the
# previously generated build files.
test.sleep()
test.write('defines.gyp', test.read('defines-env.gyp'))
test.build('defines.gyp', test.ALL)
expect = """\
VALUE is 50
"""
test.run_built_executable('defines', stdout=expect)
test.pass_test()
| 25.961538 | 80 | 0.737778 |
import os
import TestGyp
test = TestGyp.TestGyp(formats=['make'])
try:
os.environ['GYP_DEFINES'] = 'value=50'
test.run_gyp('defines.gyp')
finally:
os.environ['GYP_DEFINES'] = ''
del os.environ['GYP_DEFINES']
test.build('defines.gyp')
expect = """\
FOO is defined
VALUE is 1
2*PAREN_VALUE is 12
HASH_VALUE is a#1
"""
test.run_built_executable('defines', stdout=expect)
# Sleep so that the changed gyp file will have a newer timestamp than the
# previously generated build files.
test.sleep()
test.write('defines.gyp', test.read('defines-env.gyp'))
test.build('defines.gyp', test.ALL)
expect = """\
VALUE is 50
"""
test.run_built_executable('defines', stdout=expect)
test.pass_test()
| true | true |
1c37b842c09e88da94ae6e385ae5c966222a46f9 | 513 | py | Python | src/test.py | kynmh69/cotoha_test | 6c01ca0477399a2a07bc36a694850ad3c3c40228 | [
"MIT"
] | 1 | 2020-03-14T14:02:57.000Z | 2020-03-14T14:02:57.000Z | src/test.py | kynmh69/cotoha_test | 6c01ca0477399a2a07bc36a694850ad3c3c40228 | [
"MIT"
] | null | null | null | src/test.py | kynmh69/cotoha_test | 6c01ca0477399a2a07bc36a694850ad3c3c40228 | [
"MIT"
] | null | null | null | import json
from tkinter import Tk
from gui.gui import Application
from src.cotoha_api.cotoha_api import CotohaApi, CotohaApiResponse
from src.logger.logger import logger_initialize, LoggerUtils
EQUAL_STR = "=" * 20
if __name__ == "__main__":
logger_initialize()
logger = LoggerUtils.get_instance()
logger.info(f"{EQUAL_STR} START {EQUAL_STR}")
app = Application()
app.create_window()
app.create_sentence_form()
app.master.mainloop()
logger.info(f"{EQUAL_STR} END {EQUAL_STR}")
| 27 | 66 | 0.738791 | import json
from tkinter import Tk
from gui.gui import Application
from src.cotoha_api.cotoha_api import CotohaApi, CotohaApiResponse
from src.logger.logger import logger_initialize, LoggerUtils
EQUAL_STR = "=" * 20
if __name__ == "__main__":
logger_initialize()
logger = LoggerUtils.get_instance()
logger.info(f"{EQUAL_STR} START {EQUAL_STR}")
app = Application()
app.create_window()
app.create_sentence_form()
app.master.mainloop()
logger.info(f"{EQUAL_STR} END {EQUAL_STR}")
| true | true |
1c37b8c017e046ed568dd6da235c0bc53fc0dd6a | 677 | py | Python | trace_operator.py | 6895mahfuzgit/Linear_Algebra_for_Machine_Learning | 3f266391491d9ab99e53a3547900c6b1bd657af1 | [
"Apache-2.0"
] | null | null | null | trace_operator.py | 6895mahfuzgit/Linear_Algebra_for_Machine_Learning | 3f266391491d9ab99e53a3547900c6b1bd657af1 | [
"Apache-2.0"
] | null | null | null | trace_operator.py | 6895mahfuzgit/Linear_Algebra_for_Machine_Learning | 3f266391491d9ab99e53a3547900c6b1bd657af1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 17 02:09:53 2021
@author: Mahfuz_Shazol
"""
import numpy as np
import torch as th
A=np.array([[25,2],
[5,4]])
A_trace=np.trace(A)
print(A_trace)
# Tr(A)=Tr(A.T)
result1=np.trace(A)
print(result1)
result2=np.trace(A.T)
print(result2)
print('Tr(A)=Tr(A.T) Ans:',result1==result2)
#Calculate Frobenius norm AF=(Tr(A A.T))**(1/2)
A_p=th.tensor([
[-1,2],
[3,-2],
[5,7],
])
calculated_frobenius_norm=(th.trace(th.matmul(th.as_tensor(A),th.as_tensor(A.T))))**(1/2)
print('calculated_frobenius_norm Ans:',calculated_frobenius_norm)
norm_result=np.linalg.norm(A)
print(norm_result)
| 13.54 | 89 | 0.635155 |
import numpy as np
import torch as th
A=np.array([[25,2],
[5,4]])
A_trace=np.trace(A)
print(A_trace)
result1=np.trace(A)
print(result1)
result2=np.trace(A.T)
print(result2)
print('Tr(A)=Tr(A.T) Ans:',result1==result2)
A_p=th.tensor([
[-1,2],
[3,-2],
[5,7],
])
calculated_frobenius_norm=(th.trace(th.matmul(th.as_tensor(A),th.as_tensor(A.T))))**(1/2)
print('calculated_frobenius_norm Ans:',calculated_frobenius_norm)
norm_result=np.linalg.norm(A)
print(norm_result)
| true | true |
1c37b94b8d6ed6dc771e8357788977085e797788 | 2,143 | py | Python | aliyun-python-sdk-mse/aliyunsdkmse/request/v20190531/DeleteNacosConfigRequest.py | jorsonzen/aliyun-openapi-python-sdk | 0afbfa8e5f9e19455695aa799f7dcc1cd853d827 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-mse/aliyunsdkmse/request/v20190531/DeleteNacosConfigRequest.py | jorsonzen/aliyun-openapi-python-sdk | 0afbfa8e5f9e19455695aa799f7dcc1cd853d827 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-mse/aliyunsdkmse/request/v20190531/DeleteNacosConfigRequest.py | jorsonzen/aliyun-openapi-python-sdk | 0afbfa8e5f9e19455695aa799f7dcc1cd853d827 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkmse.endpoint import endpoint_data
class DeleteNacosConfigRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'mse', '2019-05-31', 'DeleteNacosConfig')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_DataId(self): # String
return self.get_query_params().get('DataId')
def set_DataId(self, DataId): # String
self.add_query_param('DataId', DataId)
def get_NamespaceId(self): # String
return self.get_query_params().get('NamespaceId')
def set_NamespaceId(self, NamespaceId): # String
self.add_query_param('NamespaceId', NamespaceId)
def get_Beta(self): # Boolean
return self.get_query_params().get('Beta')
def set_Beta(self, Beta): # Boolean
self.add_query_param('Beta', Beta)
def get_Group(self): # String
return self.get_query_params().get('Group')
def set_Group(self, Group): # String
self.add_query_param('Group', Group)
| 36.322034 | 74 | 0.74895 |
from aliyunsdkcore.request import RpcRequest
from aliyunsdkmse.endpoint import endpoint_data
class DeleteNacosConfigRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'mse', '2019-05-31', 'DeleteNacosConfig')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId):
self.add_query_param('InstanceId', InstanceId)
def get_DataId(self):
return self.get_query_params().get('DataId')
def set_DataId(self, DataId):
self.add_query_param('DataId', DataId)
def get_NamespaceId(self):
return self.get_query_params().get('NamespaceId')
def set_NamespaceId(self, NamespaceId):
self.add_query_param('NamespaceId', NamespaceId)
def get_Beta(self):
return self.get_query_params().get('Beta')
def set_Beta(self, Beta):
self.add_query_param('Beta', Beta)
def get_Group(self):
return self.get_query_params().get('Group')
def set_Group(self, Group):
self.add_query_param('Group', Group)
| true | true |
1c37bc1dc6d774ea34be94b34a72e489ac4ada8f | 677 | py | Python | scrapyd/scheduler.py | senohaha/jzSpiderNode | 9ff725a00a25c74c4fc9b6096c7c4d8dd1de6ba4 | [
"BSD-3-Clause"
] | null | null | null | scrapyd/scheduler.py | senohaha/jzSpiderNode | 9ff725a00a25c74c4fc9b6096c7c4d8dd1de6ba4 | [
"BSD-3-Clause"
] | null | null | null | scrapyd/scheduler.py | senohaha/jzSpiderNode | 9ff725a00a25c74c4fc9b6096c7c4d8dd1de6ba4 | [
"BSD-3-Clause"
] | null | null | null | # -*-coding: utf-8 -*-
from zope.interface import implementer
from .interfaces import ISpiderScheduler
from .utils import get_spider_queues
@implementer(ISpiderScheduler)
class SpiderScheduler(object):
def __init__(self, config):
self.config = config
self.update_projects()
def schedule(self, project, spider_name, **spider_args):
q = self.queues[project]
# self.queues: {u'dfy': <scrapyd.spiderqueue.SqliteSpiderQueue object at 0x1c3df90>}
q.add(spider_name, **spider_args)
def list_projects(self):
return self.queues.keys()
def update_projects(self):
self.queues = get_spider_queues(self.config)
| 28.208333 | 93 | 0.700148 |
from zope.interface import implementer
from .interfaces import ISpiderScheduler
from .utils import get_spider_queues
@implementer(ISpiderScheduler)
class SpiderScheduler(object):
def __init__(self, config):
self.config = config
self.update_projects()
def schedule(self, project, spider_name, **spider_args):
q = self.queues[project]
q.add(spider_name, **spider_args)
def list_projects(self):
return self.queues.keys()
def update_projects(self):
self.queues = get_spider_queues(self.config)
| true | true |
1c37bc73f791a3975a6afa5c87788d3edc839815 | 692 | py | Python | strategy/minimize.py | shimomura314/non-bit-reversi | 587aaeea0476c5c6339b6c96f7525c66cbc5321f | [
"MIT"
] | null | null | null | strategy/minimize.py | shimomura314/non-bit-reversi | 587aaeea0476c5c6339b6c96f7525c66cbc5321f | [
"MIT"
] | null | null | null | strategy/minimize.py | shimomura314/non-bit-reversi | 587aaeea0476c5c6339b6c96f7525c66cbc5321f | [
"MIT"
] | null | null | null | """Various strategies for othello.
"""
import random
class Minimize:
"""Put disk to minimize number of one's disks."""
def __init__(self):
return
def put_disk(self, othello):
"""Put disk to minimize number of one's disks."""
min_strategy = []
min_merit = float('inf')
for candidate in othello.reversible.keys():
if min_merit > len(othello.reversible[candidate]):
min_strategy = [candidate]
min_merit = len(othello.reversible[candidate])
elif min_merit == len(othello.reversible[candidate]):
min_strategy.append(candidate)
return random.choice(min_strategy) | 31.454545 | 65 | 0.614162 |
import random
class Minimize:
def __init__(self):
return
def put_disk(self, othello):
min_strategy = []
min_merit = float('inf')
for candidate in othello.reversible.keys():
if min_merit > len(othello.reversible[candidate]):
min_strategy = [candidate]
min_merit = len(othello.reversible[candidate])
elif min_merit == len(othello.reversible[candidate]):
min_strategy.append(candidate)
return random.choice(min_strategy) | true | true |
1c37bc84fb69610331ac21e64650183042302f84 | 2,438 | py | Python | Organise-Files-According-To-Their-Extensions/script_dirs.py | A-kriti/Amazing-Python-Scripts | ebf607fe39e6d9e61f30ec3439fc8d6ab1f736b9 | [
"MIT"
] | 930 | 2020-09-05T22:07:28.000Z | 2022-03-30T07:56:18.000Z | Organise-Files-According-To-Their-Extensions/script_dirs.py | maheshdbabar9340/Amazing-Python-Scripts | e2272048cbe49b4bda5072bbdd8479739bb6c18d | [
"MIT"
] | 893 | 2020-09-04T07:57:24.000Z | 2022-02-08T02:12:26.000Z | Organise-Files-According-To-Their-Extensions/script_dirs.py | maheshdbabar9340/Amazing-Python-Scripts | e2272048cbe49b4bda5072bbdd8479739bb6c18d | [
"MIT"
] | 497 | 2020-09-05T08:16:24.000Z | 2022-03-31T00:55:57.000Z | import os
from pathlib import Path
import sys
# Taking input
print_string = """
Type Path of the directory
OR
Press enter for running the script on current directory:
OR
Type quit
"""
print(print_string + "\n\n")
input_path = input("Input:")
print("\n\n")
# Script will terminate if input is 'quit'
if input_path == "quit":
sys.exit(1)
# If nothing is entered then current working directory will be taken as the input path
if input_path == "":
input_path = os.getcwd()
input_path = Path(input_path)
# Changing the working directory to input path
os.chdir(input_path)
# Creates a dictionary "dic" with key,value pairs where key is extension and value is no. of files with that extension
dic = {}
for file in os.listdir(os.getcwd()):
if os.path.isfile(file):
extension = file.split(".")[-1]
dic[extension] = dic.get(extension, 0) + 1
for key in dic:
print(f"There are {dic[key]} files file with extension {key}")
print("\n\n")
# assigning a variable named current Path of current working directory just for simplicity.
# could have used input_path too
current = Path(os.getcwd())
'''
When this script would run the structure of the current directory would change.Hence,
we are assigning list_dir variable the files and dirs in current working directory which the script would modify
'''
list_dir = os.listdir(current)
# keys of dic are extensions of the file
for key in dic:
# try except block for making directory if it doesn't exists already
try:
os.mkdir(key)
except:
print(
f"directory named {key} already exists so it won't be overwrited \n"
)
# goes through the files in list_dir
# we are not using os.listdir() as the directory structure will change during the execution
for file in list_dir:
if file.split(".")[-1] == key and os.path.isfile(file):
# prints absolute path of the file
print(os.path.abspath(file))
# Renames the path of the file or moves the file in to the newly created directory
Path.rename(Path(os.path.abspath(file)),
current / Path("./{}/".format(key) + file))
# This block just prints a note and the current structure of the directory
print(
"\n Script has organised files as per their extensions into different directories! \n"
)
for file in os.listdir(os.getcwd()):
if not (os.path.isfile(file)):
print(file)
| 31.25641 | 118 | 0.684988 | import os
from pathlib import Path
import sys
print_string = """
Type Path of the directory
OR
Press enter for running the script on current directory:
OR
Type quit
"""
print(print_string + "\n\n")
input_path = input("Input:")
print("\n\n")
if input_path == "quit":
sys.exit(1)
if input_path == "":
input_path = os.getcwd()
input_path = Path(input_path)
os.chdir(input_path)
dic = {}
for file in os.listdir(os.getcwd()):
if os.path.isfile(file):
extension = file.split(".")[-1]
dic[extension] = dic.get(extension, 0) + 1
for key in dic:
print(f"There are {dic[key]} files file with extension {key}")
print("\n\n")
current = Path(os.getcwd())
list_dir = os.listdir(current)
for key in dic:
try:
os.mkdir(key)
except:
print(
f"directory named {key} already exists so it won't be overwrited \n"
)
for file in list_dir:
if file.split(".")[-1] == key and os.path.isfile(file):
print(os.path.abspath(file))
Path.rename(Path(os.path.abspath(file)),
current / Path("./{}/".format(key) + file))
print(
"\n Script has organised files as per their extensions into different directories! \n"
)
for file in os.listdir(os.getcwd()):
if not (os.path.isfile(file)):
print(file)
| true | true |
1c37bcdaebfc6149a8029cccfbac3b7468196323 | 6,554 | py | Python | awswrangler/s3/_write_dataset.py | isichei/aws-data-wrangler | 0ce3836000bc5f4b5f7adffdb81392cdcf135b7a | [
"Apache-2.0"
] | 2 | 2021-10-24T01:01:08.000Z | 2022-01-12T13:23:44.000Z | awswrangler/s3/_write_dataset.py | isichei/aws-data-wrangler | 0ce3836000bc5f4b5f7adffdb81392cdcf135b7a | [
"Apache-2.0"
] | 67 | 2021-01-15T15:00:37.000Z | 2022-03-21T09:27:42.000Z | awswrangler/s3/_write_dataset.py | isichei/aws-data-wrangler | 0ce3836000bc5f4b5f7adffdb81392cdcf135b7a | [
"Apache-2.0"
] | 3 | 2020-12-29T17:27:38.000Z | 2021-01-15T13:47:25.000Z | """Amazon S3 Write Dataset (PRIVATE)."""
import logging
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import boto3
import numpy as np
import pandas as pd
from awswrangler import exceptions
from awswrangler.s3._delete import delete_objects
from awswrangler.s3._write_concurrent import _WriteProxy
_logger: logging.Logger = logging.getLogger(__name__)
def _to_partitions(
func: Callable[..., List[str]],
concurrent_partitioning: bool,
df: pd.DataFrame,
path_root: str,
use_threads: bool,
mode: str,
partition_cols: List[str],
bucketing_info: Optional[Tuple[List[str], int]],
filename_prefix: str,
boto3_session: boto3.Session,
**func_kwargs: Any,
) -> Tuple[List[str], Dict[str, List[str]]]:
partitions_values: Dict[str, List[str]] = {}
proxy: _WriteProxy = _WriteProxy(use_threads=concurrent_partitioning)
for keys, subgroup in df.groupby(by=partition_cols, observed=True):
subgroup = subgroup.drop(partition_cols, axis="columns")
keys = (keys,) if not isinstance(keys, tuple) else keys
subdir = "/".join([f"{name}={val}" for name, val in zip(partition_cols, keys)])
prefix: str = f"{path_root}{subdir}/"
if mode == "overwrite_partitions":
delete_objects(
path=prefix,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=func_kwargs.get("s3_additional_kwargs"),
)
if bucketing_info:
_to_buckets(
func=func,
df=subgroup,
path_root=prefix,
bucketing_info=bucketing_info,
boto3_session=boto3_session,
use_threads=use_threads,
proxy=proxy,
filename_prefix=filename_prefix,
**func_kwargs,
)
else:
proxy.write(
func=func,
df=subgroup,
path_root=prefix,
filename_prefix=filename_prefix,
boto3_session=boto3_session,
use_threads=use_threads,
**func_kwargs,
)
partitions_values[prefix] = [str(k) for k in keys]
paths: List[str] = proxy.close() # blocking
return paths, partitions_values
def _to_buckets(
func: Callable[..., List[str]],
df: pd.DataFrame,
path_root: str,
bucketing_info: Tuple[List[str], int],
filename_prefix: str,
boto3_session: boto3.Session,
use_threads: bool,
proxy: Optional[_WriteProxy] = None,
**func_kwargs: Any,
) -> List[str]:
_proxy: _WriteProxy = proxy if proxy else _WriteProxy(use_threads=False)
bucket_number_series = df.astype("O").apply(
lambda row: _get_bucket_number(bucketing_info[1], [row[col_name] for col_name in bucketing_info[0]]),
axis="columns",
)
for bucket_number, subgroup in df.groupby(by=bucket_number_series, observed=True):
_proxy.write(
func=func,
df=subgroup,
path_root=path_root,
filename_prefix=f"{filename_prefix}_bucket-{bucket_number:05d}",
boto3_session=boto3_session,
use_threads=use_threads,
**func_kwargs,
)
if proxy:
return []
paths: List[str] = _proxy.close() # blocking
return paths
def _get_bucket_number(number_of_buckets: int, values: List[Union[str, int, bool]]) -> int:
hash_code = 0
for value in values:
hash_code = 31 * hash_code + _get_value_hash(value)
return hash_code % number_of_buckets
def _get_value_hash(value: Union[str, int, bool]) -> int:
if isinstance(value, (int, np.int_)):
return int(value)
if isinstance(value, (str, np.str_)):
value_hash = 0
for byte in value.encode():
value_hash = value_hash * 31 + byte
return value_hash
if isinstance(value, (bool, np.bool_)):
return int(value)
raise exceptions.InvalidDataFrame(
"Column specified for bucketing contains invalid data type. Only string, int and bool are supported."
)
def _to_dataset(
func: Callable[..., List[str]],
concurrent_partitioning: bool,
df: pd.DataFrame,
path_root: str,
filename_prefix: str,
index: bool,
use_threads: bool,
mode: str,
partition_cols: Optional[List[str]],
bucketing_info: Optional[Tuple[List[str], int]],
boto3_session: boto3.Session,
**func_kwargs: Any,
) -> Tuple[List[str], Dict[str, List[str]]]:
path_root = path_root if path_root.endswith("/") else f"{path_root}/"
# Evaluate mode
if mode not in ["append", "overwrite", "overwrite_partitions"]:
raise exceptions.InvalidArgumentValue(
f"{mode} is a invalid mode, please use append, overwrite or overwrite_partitions."
)
if (mode == "overwrite") or ((mode == "overwrite_partitions") and (not partition_cols)):
delete_objects(
path=path_root,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=func_kwargs.get("s3_additional_kwargs"),
)
# Writing
partitions_values: Dict[str, List[str]] = {}
paths: List[str]
if partition_cols:
paths, partitions_values = _to_partitions(
func=func,
concurrent_partitioning=concurrent_partitioning,
df=df,
path_root=path_root,
use_threads=use_threads,
mode=mode,
bucketing_info=bucketing_info,
filename_prefix=filename_prefix,
partition_cols=partition_cols,
boto3_session=boto3_session,
index=index,
**func_kwargs,
)
elif bucketing_info:
paths = _to_buckets(
func=func,
df=df,
path_root=path_root,
use_threads=use_threads,
bucketing_info=bucketing_info,
filename_prefix=filename_prefix,
boto3_session=boto3_session,
index=index,
**func_kwargs,
)
else:
paths = func(
df=df,
path_root=path_root,
filename_prefix=filename_prefix,
use_threads=use_threads,
boto3_session=boto3_session,
index=index,
**func_kwargs,
)
_logger.debug("paths: %s", paths)
_logger.debug("partitions_values: %s", partitions_values)
return paths, partitions_values
| 32.606965 | 109 | 0.614434 |
import logging
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import boto3
import numpy as np
import pandas as pd
from awswrangler import exceptions
from awswrangler.s3._delete import delete_objects
from awswrangler.s3._write_concurrent import _WriteProxy
_logger: logging.Logger = logging.getLogger(__name__)
def _to_partitions(
func: Callable[..., List[str]],
concurrent_partitioning: bool,
df: pd.DataFrame,
path_root: str,
use_threads: bool,
mode: str,
partition_cols: List[str],
bucketing_info: Optional[Tuple[List[str], int]],
filename_prefix: str,
boto3_session: boto3.Session,
**func_kwargs: Any,
) -> Tuple[List[str], Dict[str, List[str]]]:
partitions_values: Dict[str, List[str]] = {}
proxy: _WriteProxy = _WriteProxy(use_threads=concurrent_partitioning)
for keys, subgroup in df.groupby(by=partition_cols, observed=True):
subgroup = subgroup.drop(partition_cols, axis="columns")
keys = (keys,) if not isinstance(keys, tuple) else keys
subdir = "/".join([f"{name}={val}" for name, val in zip(partition_cols, keys)])
prefix: str = f"{path_root}{subdir}/"
if mode == "overwrite_partitions":
delete_objects(
path=prefix,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=func_kwargs.get("s3_additional_kwargs"),
)
if bucketing_info:
_to_buckets(
func=func,
df=subgroup,
path_root=prefix,
bucketing_info=bucketing_info,
boto3_session=boto3_session,
use_threads=use_threads,
proxy=proxy,
filename_prefix=filename_prefix,
**func_kwargs,
)
else:
proxy.write(
func=func,
df=subgroup,
path_root=prefix,
filename_prefix=filename_prefix,
boto3_session=boto3_session,
use_threads=use_threads,
**func_kwargs,
)
partitions_values[prefix] = [str(k) for k in keys]
paths: List[str] = proxy.close()
return paths, partitions_values
def _to_buckets(
func: Callable[..., List[str]],
df: pd.DataFrame,
path_root: str,
bucketing_info: Tuple[List[str], int],
filename_prefix: str,
boto3_session: boto3.Session,
use_threads: bool,
proxy: Optional[_WriteProxy] = None,
**func_kwargs: Any,
) -> List[str]:
_proxy: _WriteProxy = proxy if proxy else _WriteProxy(use_threads=False)
bucket_number_series = df.astype("O").apply(
lambda row: _get_bucket_number(bucketing_info[1], [row[col_name] for col_name in bucketing_info[0]]),
axis="columns",
)
for bucket_number, subgroup in df.groupby(by=bucket_number_series, observed=True):
_proxy.write(
func=func,
df=subgroup,
path_root=path_root,
filename_prefix=f"{filename_prefix}_bucket-{bucket_number:05d}",
boto3_session=boto3_session,
use_threads=use_threads,
**func_kwargs,
)
if proxy:
return []
paths: List[str] = _proxy.close()
return paths
def _get_bucket_number(number_of_buckets: int, values: List[Union[str, int, bool]]) -> int:
hash_code = 0
for value in values:
hash_code = 31 * hash_code + _get_value_hash(value)
return hash_code % number_of_buckets
def _get_value_hash(value: Union[str, int, bool]) -> int:
if isinstance(value, (int, np.int_)):
return int(value)
if isinstance(value, (str, np.str_)):
value_hash = 0
for byte in value.encode():
value_hash = value_hash * 31 + byte
return value_hash
if isinstance(value, (bool, np.bool_)):
return int(value)
raise exceptions.InvalidDataFrame(
"Column specified for bucketing contains invalid data type. Only string, int and bool are supported."
)
def _to_dataset(
func: Callable[..., List[str]],
concurrent_partitioning: bool,
df: pd.DataFrame,
path_root: str,
filename_prefix: str,
index: bool,
use_threads: bool,
mode: str,
partition_cols: Optional[List[str]],
bucketing_info: Optional[Tuple[List[str], int]],
boto3_session: boto3.Session,
**func_kwargs: Any,
) -> Tuple[List[str], Dict[str, List[str]]]:
path_root = path_root if path_root.endswith("/") else f"{path_root}/"
if mode not in ["append", "overwrite", "overwrite_partitions"]:
raise exceptions.InvalidArgumentValue(
f"{mode} is a invalid mode, please use append, overwrite or overwrite_partitions."
)
if (mode == "overwrite") or ((mode == "overwrite_partitions") and (not partition_cols)):
delete_objects(
path=path_root,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=func_kwargs.get("s3_additional_kwargs"),
)
partitions_values: Dict[str, List[str]] = {}
paths: List[str]
if partition_cols:
paths, partitions_values = _to_partitions(
func=func,
concurrent_partitioning=concurrent_partitioning,
df=df,
path_root=path_root,
use_threads=use_threads,
mode=mode,
bucketing_info=bucketing_info,
filename_prefix=filename_prefix,
partition_cols=partition_cols,
boto3_session=boto3_session,
index=index,
**func_kwargs,
)
elif bucketing_info:
paths = _to_buckets(
func=func,
df=df,
path_root=path_root,
use_threads=use_threads,
bucketing_info=bucketing_info,
filename_prefix=filename_prefix,
boto3_session=boto3_session,
index=index,
**func_kwargs,
)
else:
paths = func(
df=df,
path_root=path_root,
filename_prefix=filename_prefix,
use_threads=use_threads,
boto3_session=boto3_session,
index=index,
**func_kwargs,
)
_logger.debug("paths: %s", paths)
_logger.debug("partitions_values: %s", partitions_values)
return paths, partitions_values
| true | true |
1c37bd002df98cf406408b1b8384bf64c26f3e96 | 4,474 | py | Python | tests/apps/info/memo/commands_test.py | item4/yui | 8628d0d54b94ada3cbe7d1b0f624063258bad10a | [
"MIT"
] | 36 | 2017-06-12T01:09:46.000Z | 2021-01-31T17:57:41.000Z | tests/apps/info/memo/commands_test.py | item4/yui | 8628d0d54b94ada3cbe7d1b0f624063258bad10a | [
"MIT"
] | 145 | 2017-06-21T13:31:29.000Z | 2021-06-20T01:01:30.000Z | tests/apps/info/memo/commands_test.py | item4/yui | 8628d0d54b94ada3cbe7d1b0f624063258bad10a | [
"MIT"
] | 21 | 2017-07-24T15:53:19.000Z | 2021-12-23T04:18:31.000Z | import pytest
from yui.apps.info.memo.commands import memo_add
from yui.apps.info.memo.commands import memo_delete
from yui.apps.info.memo.commands import memo_show
from yui.apps.info.memo.models import Memo
from yui.orm.utils import get_count
@pytest.mark.asyncio
async def test_memo_flow(bot, fx_sess):
keyword1 = '키리토'
keyword2 = '밥'
text1 = '키리가야 카즈토의 게임 아이디'
text2 = '귀엽다'
text3 = '먹어야한다'
bot.add_channel('C1', 'test')
bot.add_user('U1', 'tester')
event = bot.create_message('C1', 'U1')
await memo_show(bot, event, fx_sess, keyword1)
said = bot.call_queue.pop(0)
assert said.method == 'chat.postMessage'
assert said.data['channel'] == 'C1'
assert said.data['text'] == f'`{keyword1}`란 이름을 가진 기억 레코드가 없어요!'
assert (
get_count(
fx_sess.query(Memo).filter_by(keyword=keyword1),
)
== 0
)
await memo_show(bot, event, fx_sess, keyword2)
said = bot.call_queue.pop(0)
assert said.method == 'chat.postMessage'
assert said.data['channel'] == 'C1'
assert said.data['text'] == f'`{keyword2}`이란 이름을 가진 기억 레코드가 없어요!'
assert (
get_count(
fx_sess.query(Memo).filter_by(keyword=keyword2),
)
== 0
)
await memo_add(bot, event, fx_sess, keyword1, text1)
said = bot.call_queue.pop(0)
assert said.method == 'chat.postMessage'
assert said.data['channel'] == 'C1'
assert said.data['text'] == f'`{keyword1}`로 기억 레코드를 생성했어요!'
assert (
get_count(
fx_sess.query(Memo).filter_by(keyword=keyword1),
)
== 1
)
await memo_add(bot, event, fx_sess, keyword2, text3)
said = bot.call_queue.pop(0)
assert said.method == 'chat.postMessage'
assert said.data['channel'] == 'C1'
assert said.data['text'] == f'`{keyword2}`으로 기억 레코드를 생성했어요!'
assert (
get_count(
fx_sess.query(Memo).filter_by(keyword=keyword2),
)
== 1
)
await memo_add(bot, event, fx_sess, keyword1, text2)
said = bot.call_queue.pop(0)
assert said.method == 'chat.postMessage'
assert said.data['channel'] == 'C1'
assert said.data['text'] == f'`{keyword1}`로 기억 레코드를 생성했어요!'
assert (
get_count(
fx_sess.query(Memo).filter_by(keyword=keyword1),
)
== 2
)
await memo_show(bot, event, fx_sess, keyword1)
said = bot.call_queue.pop(0)
assert said.method == 'chat.postMessage'
assert said.data['channel'] == 'C1'
assert said.data['text'] == f'`{keyword1}`: {text1} | {text2}'
await memo_show(bot, event, fx_sess, keyword2)
said = bot.call_queue.pop(0)
assert said.method == 'chat.postMessage'
assert said.data['channel'] == 'C1'
assert said.data['text'] == f'`{keyword2}`: {text3}'
await memo_delete(bot, event, fx_sess, keyword1)
said = bot.call_queue.pop(0)
assert said.method == 'chat.postMessage'
assert said.data['channel'] == 'C1'
assert said.data['text'] == f'`{keyword1}`에 관한 기억 레코드를 모두 삭제했어요!'
assert (
get_count(
fx_sess.query(Memo).filter_by(keyword=keyword1),
)
== 0
)
assert (
get_count(
fx_sess.query(Memo).filter_by(keyword=keyword2),
)
== 1
)
await memo_delete(bot, event, fx_sess, keyword2)
said = bot.call_queue.pop(0)
assert said.method == 'chat.postMessage'
assert said.data['channel'] == 'C1'
assert said.data['text'] == f'`{keyword2}`에 관한 기억 레코드를 모두 삭제했어요!'
assert (
get_count(
fx_sess.query(Memo).filter_by(keyword=keyword1),
)
== 0
)
assert (
get_count(
fx_sess.query(Memo).filter_by(keyword=keyword2),
)
== 0
)
@pytest.mark.asyncio
async def test_length_limit(bot, fx_sess):
bot.add_channel('C1', 'test')
bot.add_user('U1', 'tester')
event = bot.create_message('C1', 'U1')
await memo_add(bot, event, fx_sess, 'long' * 100, 'test')
said = bot.call_queue.pop(0)
assert said.method == 'chat.postMessage'
assert said.data['channel'] == 'C1'
assert said.data['text'] == '기억하려는 키워드가 너무 길어요! 20자 이하의 키워드만 가능해요!'
await memo_add(bot, event, fx_sess, 'test', 'long' * 1000)
said = bot.call_queue.pop(0)
assert said.method == 'chat.postMessage'
assert said.data['channel'] == 'C1'
assert said.data['text'] == '기억하려는 내용이 너무 길어요! 500자 이하의 내용만 가능해요!'
| 26.951807 | 71 | 0.602816 | import pytest
from yui.apps.info.memo.commands import memo_add
from yui.apps.info.memo.commands import memo_delete
from yui.apps.info.memo.commands import memo_show
from yui.apps.info.memo.models import Memo
from yui.orm.utils import get_count
@pytest.mark.asyncio
async def test_memo_flow(bot, fx_sess):
keyword1 = '키리토'
keyword2 = '밥'
text1 = '키리가야 카즈토의 게임 아이디'
text2 = '귀엽다'
text3 = '먹어야한다'
bot.add_channel('C1', 'test')
bot.add_user('U1', 'tester')
event = bot.create_message('C1', 'U1')
await memo_show(bot, event, fx_sess, keyword1)
said = bot.call_queue.pop(0)
assert said.method == 'chat.postMessage'
assert said.data['channel'] == 'C1'
assert said.data['text'] == f'`{keyword1}`란 이름을 가진 기억 레코드가 없어요!'
assert (
get_count(
fx_sess.query(Memo).filter_by(keyword=keyword1),
)
== 0
)
await memo_show(bot, event, fx_sess, keyword2)
said = bot.call_queue.pop(0)
assert said.method == 'chat.postMessage'
assert said.data['channel'] == 'C1'
assert said.data['text'] == f'`{keyword2}`이란 이름을 가진 기억 레코드가 없어요!'
assert (
get_count(
fx_sess.query(Memo).filter_by(keyword=keyword2),
)
== 0
)
await memo_add(bot, event, fx_sess, keyword1, text1)
said = bot.call_queue.pop(0)
assert said.method == 'chat.postMessage'
assert said.data['channel'] == 'C1'
assert said.data['text'] == f'`{keyword1}`로 기억 레코드를 생성했어요!'
assert (
get_count(
fx_sess.query(Memo).filter_by(keyword=keyword1),
)
== 1
)
await memo_add(bot, event, fx_sess, keyword2, text3)
said = bot.call_queue.pop(0)
assert said.method == 'chat.postMessage'
assert said.data['channel'] == 'C1'
assert said.data['text'] == f'`{keyword2}`으로 기억 레코드를 생성했어요!'
assert (
get_count(
fx_sess.query(Memo).filter_by(keyword=keyword2),
)
== 1
)
await memo_add(bot, event, fx_sess, keyword1, text2)
said = bot.call_queue.pop(0)
assert said.method == 'chat.postMessage'
assert said.data['channel'] == 'C1'
assert said.data['text'] == f'`{keyword1}`로 기억 레코드를 생성했어요!'
assert (
get_count(
fx_sess.query(Memo).filter_by(keyword=keyword1),
)
== 2
)
await memo_show(bot, event, fx_sess, keyword1)
said = bot.call_queue.pop(0)
assert said.method == 'chat.postMessage'
assert said.data['channel'] == 'C1'
assert said.data['text'] == f'`{keyword1}`: {text1} | {text2}'
await memo_show(bot, event, fx_sess, keyword2)
said = bot.call_queue.pop(0)
assert said.method == 'chat.postMessage'
assert said.data['channel'] == 'C1'
assert said.data['text'] == f'`{keyword2}`: {text3}'
await memo_delete(bot, event, fx_sess, keyword1)
said = bot.call_queue.pop(0)
assert said.method == 'chat.postMessage'
assert said.data['channel'] == 'C1'
assert said.data['text'] == f'`{keyword1}`에 관한 기억 레코드를 모두 삭제했어요!'
assert (
get_count(
fx_sess.query(Memo).filter_by(keyword=keyword1),
)
== 0
)
assert (
get_count(
fx_sess.query(Memo).filter_by(keyword=keyword2),
)
== 1
)
await memo_delete(bot, event, fx_sess, keyword2)
said = bot.call_queue.pop(0)
assert said.method == 'chat.postMessage'
assert said.data['channel'] == 'C1'
assert said.data['text'] == f'`{keyword2}`에 관한 기억 레코드를 모두 삭제했어요!'
assert (
get_count(
fx_sess.query(Memo).filter_by(keyword=keyword1),
)
== 0
)
assert (
get_count(
fx_sess.query(Memo).filter_by(keyword=keyword2),
)
== 0
)
@pytest.mark.asyncio
async def test_length_limit(bot, fx_sess):
bot.add_channel('C1', 'test')
bot.add_user('U1', 'tester')
event = bot.create_message('C1', 'U1')
await memo_add(bot, event, fx_sess, 'long' * 100, 'test')
said = bot.call_queue.pop(0)
assert said.method == 'chat.postMessage'
assert said.data['channel'] == 'C1'
assert said.data['text'] == '기억하려는 키워드가 너무 길어요! 20자 이하의 키워드만 가능해요!'
await memo_add(bot, event, fx_sess, 'test', 'long' * 1000)
said = bot.call_queue.pop(0)
assert said.method == 'chat.postMessage'
assert said.data['channel'] == 'C1'
assert said.data['text'] == '기억하려는 내용이 너무 길어요! 500자 이하의 내용만 가능해요!'
| true | true |
1c37bd1b5ff673772f12755c5de44777390b17f0 | 2,664 | py | Python | PythonClient/car/legacy_hello_car.py | whatseven/AirSim | fe7e4e7c782cf1077594c1ee6cc1bbfec3f66bd1 | [
"MIT"
] | 7 | 2020-05-22T18:00:19.000Z | 2021-01-07T08:31:19.000Z | PythonClient/car/legacy_hello_car.py | whatseven/AirSim | fe7e4e7c782cf1077594c1ee6cc1bbfec3f66bd1 | [
"MIT"
] | 4 | 2020-08-21T07:48:06.000Z | 2021-03-14T21:06:41.000Z | PythonClient/car/legacy_hello_car.py | whatseven/AirSim | fe7e4e7c782cf1077594c1ee6cc1bbfec3f66bd1 | [
"MIT"
] | 7 | 2020-05-22T20:08:22.000Z | 2021-01-22T09:39:17.000Z | """
For connecting to the AirSim drone environment and testing API functionality
"""
import os
import tempfile
import pprint
import setup_path
import airsim
# connect to the AirSim simulator
client = airsim.MultirotorClient()
client.confirmConnection()
client.enableApiControl(True)
client.armDisarm(True)
state = client.getMultirotorState()
s = pprint.pformat(state)
print("state: %s" % s)
airsim.wait_key('Press any key to takeoff')
client.takeoff()
state = client.getMultirotorState()
print("state: %s" % pprint.pformat(state))
airsim.wait_key('Press any key to move vehicle to (-10, 10, -10) at 5 m/s')
client.moveToPosition(-10, 10, -10, 5)
client.hover()
state = client.getMultirotorState()
print("state: %s" % pprint.pformat(state))
airsim.wait_key('Press any key to take images')
# get camera images from the car
responses = client.simGetImages([
ImageRequest(0, airsim.AirSimImageType.DepthVis), #depth visualiztion image
ImageRequest(1, airsim.AirSimImageType.DepthPerspective, True), #depth in perspective projection
ImageRequest(1, airsim.AirSimImageType.Scene), #scene vision image in png format
ImageRequest(1, airsim.AirSimImageType.Scene, False, False)]) #scene vision image in uncompressed RGB array
print('Retrieved images: %d' % len(responses))
tmp_dir = os.path.join(tempfile.gettempdir(), "airsim_drone")
print ("Saving images to %s" % tmp_dir)
try:
os.makedirs(tmp_dir)
except OSError:
if not os.path.isdir(tmp_dir):
raise
for idx, response in enumerate(responses):
filename = os.path.join(tmp_dir, str(idx))
if response.pixels_as_float:
print("Type %d, size %d" % (response.image_type, len(response.image_data_float)))
AirSimClientBase.write_pfm(os.path.normpath(filename + '.pfm'), AirSimClientBase.getPfmArray(response))
elif response.compress: #png format
print("Type %d, size %d" % (response.image_type, len(response.image_data_uint8)))
AirSimClientBase.write_file(os.path.normpath(filename + '.png'), response.image_data_uint8)
else: #uncompressed array
print("Type %d, size %d" % (response.image_type, len(response.image_data_uint8)))
img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8) #get numpy array
img_rgb = img1d.reshape(response.height, response.width, 3) #reshape array to 3 channel image array H X W X 3
AirSimClientBase.write_png(os.path.normpath(filename + '.png'), img_rgb) #write to png
AirSimClientBase.wait_key('Press any key to reset to original state')
client.armDisarm(False)
client.reset()
# that's enough fun for now. let's quit cleanly
client.enableApiControl(False)
| 34.597403 | 117 | 0.737988 |
import os
import tempfile
import pprint
import setup_path
import airsim
client = airsim.MultirotorClient()
client.confirmConnection()
client.enableApiControl(True)
client.armDisarm(True)
state = client.getMultirotorState()
s = pprint.pformat(state)
print("state: %s" % s)
airsim.wait_key('Press any key to takeoff')
client.takeoff()
state = client.getMultirotorState()
print("state: %s" % pprint.pformat(state))
airsim.wait_key('Press any key to move vehicle to (-10, 10, -10) at 5 m/s')
client.moveToPosition(-10, 10, -10, 5)
client.hover()
state = client.getMultirotorState()
print("state: %s" % pprint.pformat(state))
airsim.wait_key('Press any key to take images')
responses = client.simGetImages([
ImageRequest(0, airsim.AirSimImageType.DepthVis),
ImageRequest(1, airsim.AirSimImageType.DepthPerspective, True),
ImageRequest(1, airsim.AirSimImageType.Scene),
ImageRequest(1, airsim.AirSimImageType.Scene, False, False)])
print('Retrieved images: %d' % len(responses))
tmp_dir = os.path.join(tempfile.gettempdir(), "airsim_drone")
print ("Saving images to %s" % tmp_dir)
try:
os.makedirs(tmp_dir)
except OSError:
if not os.path.isdir(tmp_dir):
raise
for idx, response in enumerate(responses):
filename = os.path.join(tmp_dir, str(idx))
if response.pixels_as_float:
print("Type %d, size %d" % (response.image_type, len(response.image_data_float)))
AirSimClientBase.write_pfm(os.path.normpath(filename + '.pfm'), AirSimClientBase.getPfmArray(response))
elif response.compress:
print("Type %d, size %d" % (response.image_type, len(response.image_data_uint8)))
AirSimClientBase.write_file(os.path.normpath(filename + '.png'), response.image_data_uint8)
else:
print("Type %d, size %d" % (response.image_type, len(response.image_data_uint8)))
img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8)
img_rgb = img1d.reshape(response.height, response.width, 3)
AirSimClientBase.write_png(os.path.normpath(filename + '.png'), img_rgb)
AirSimClientBase.wait_key('Press any key to reset to original state')
client.armDisarm(False)
client.reset()
client.enableApiControl(False)
| true | true |
1c37bdc53354937df90dffa8566038d87a1e44cb | 16,065 | py | Python | geopandas/tests/test_extension_array.py | standakozak/geopandas | 9f5413f8e992472d89d312cc4853d74c76fbbdf1 | [
"BSD-3-Clause"
] | 2,914 | 2015-01-01T14:27:43.000Z | 2022-03-31T22:26:39.000Z | geopandas/tests/test_extension_array.py | standakozak/geopandas | 9f5413f8e992472d89d312cc4853d74c76fbbdf1 | [
"BSD-3-Clause"
] | 2,040 | 2015-01-16T11:34:26.000Z | 2022-03-31T12:13:39.000Z | geopandas/tests/test_extension_array.py | standakozak/geopandas | 9f5413f8e992472d89d312cc4853d74c76fbbdf1 | [
"BSD-3-Clause"
] | 758 | 2015-01-21T20:23:32.000Z | 2022-03-31T17:22:53.000Z | """
This file contains a minimal set of tests for compliance with the extension
array interface test suite (by inheriting the pandas test suite), and should
contain no other tests.
Other tests (eg related to the spatial functionality or integration
with GeoSeries/GeoDataFrame) should be added to test_array.py and others.
The tests in this file are inherited from the BaseExtensionTests, and only
minimal tweaks should be applied to get the tests passing (by overwriting a
parent method).
A set of fixtures are defined to provide data for the tests (the fixtures
expected to be available to pytest by the inherited pandas tests).
"""
import operator
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from pandas.tests.extension import base as extension_tests
import shapely.geometry
from geopandas.array import GeometryArray, GeometryDtype, from_shapely
from geopandas._compat import ignore_shapely2_warnings
import pytest
# -----------------------------------------------------------------------------
# Compat with extension tests in older pandas versions
# -----------------------------------------------------------------------------
not_yet_implemented = pytest.mark.skip(reason="Not yet implemented")
no_sorting = pytest.mark.skip(reason="Sorting not supported")
# -----------------------------------------------------------------------------
# Required fixtures
# -----------------------------------------------------------------------------
@pytest.fixture
def dtype():
"""A fixture providing the ExtensionDtype to validate."""
return GeometryDtype()
def make_data():
a = np.empty(100, dtype=object)
with ignore_shapely2_warnings():
a[:] = [shapely.geometry.Point(i, i) for i in range(100)]
ga = from_shapely(a)
return ga
@pytest.fixture
def data():
"""Length-100 array for this type.
* data[0] and data[1] should both be non missing
* data[0] and data[1] should not be equal
"""
return make_data()
@pytest.fixture
def data_for_twos():
"""Length-100 array in which all the elements are two."""
raise NotImplementedError
@pytest.fixture
def data_missing():
"""Length-2 array with [NA, Valid]"""
return from_shapely([None, shapely.geometry.Point(1, 1)])
@pytest.fixture(params=["data", "data_missing"])
def all_data(request, data, data_missing):
"""Parametrized fixture giving 'data' and 'data_missing'"""
if request.param == "data":
return data
elif request.param == "data_missing":
return data_missing
@pytest.fixture
def data_repeated(data):
"""
Generate many datasets.
Parameters
----------
data : fixture implementing `data`
Returns
-------
Callable[[int], Generator]:
A callable that takes a `count` argument and
returns a generator yielding `count` datasets.
"""
def gen(count):
for _ in range(count):
yield data
return gen
@pytest.fixture
def data_for_sorting():
"""Length-3 array with a known sort order.
This should be three items [B, C, A] with
A < B < C
"""
raise NotImplementedError
@pytest.fixture
def data_missing_for_sorting():
"""Length-3 array with a known sort order.
This should be three items [B, NA, A] with
A < B and NA missing.
"""
raise NotImplementedError
@pytest.fixture
def na_cmp():
"""Binary operator for comparing NA values.
Should return a function of two arguments that returns
True if both arguments are (scalar) NA for your type.
By default, uses ``operator.or``
"""
return lambda x, y: x is None and y is None
@pytest.fixture
def na_value():
"""The scalar missing value for this type. Default 'None'"""
return None
@pytest.fixture
def data_for_grouping():
"""Data for factorization, grouping, and unique tests.
Expected to be like [B, B, NA, NA, A, A, B, C]
Where A < B < C and NA is missing
"""
return from_shapely(
[
shapely.geometry.Point(1, 1),
shapely.geometry.Point(1, 1),
None,
None,
shapely.geometry.Point(0, 0),
shapely.geometry.Point(0, 0),
shapely.geometry.Point(1, 1),
shapely.geometry.Point(2, 2),
]
)
@pytest.fixture(params=[True, False])
def box_in_series(request):
"""Whether to box the data in a Series"""
return request.param
@pytest.fixture(
params=[
lambda x: 1,
lambda x: [1] * len(x),
lambda x: pd.Series([1] * len(x)),
lambda x: x,
],
ids=["scalar", "list", "series", "object"],
)
def groupby_apply_op(request):
"""
Functions to test groupby.apply().
"""
return request.param
@pytest.fixture(params=[True, False])
def as_frame(request):
"""
Boolean fixture to support Series and Series.to_frame() comparison testing.
"""
return request.param
@pytest.fixture(params=[True, False])
def as_series(request):
"""
Boolean fixture to support arr and Series(arr) comparison testing.
"""
return request.param
@pytest.fixture(params=[True, False])
def use_numpy(request):
"""
Boolean fixture to support comparison testing of ExtensionDtype array
and numpy array.
"""
return request.param
@pytest.fixture(params=["ffill", "bfill"])
def fillna_method(request):
"""
Parametrized fixture giving method parameters 'ffill' and 'bfill' for
Series.fillna(method=<method>) testing.
"""
return request.param
@pytest.fixture(params=[True, False])
def as_array(request):
"""
Boolean fixture to support ExtensionDtype _from_sequence method testing.
"""
return request.param
# Fixtures defined in pandas/conftest.py that are also needed: defining them
# here instead of importing for compatibility
@pytest.fixture(
params=["sum", "max", "min", "mean", "prod", "std", "var", "median", "kurt", "skew"]
)
def all_numeric_reductions(request):
"""
Fixture for numeric reduction names
"""
return request.param
@pytest.fixture(params=["all", "any"])
def all_boolean_reductions(request):
"""
Fixture for boolean reduction names
"""
return request.param
# only == and != are support for GeometryArray
# @pytest.fixture(params=["__eq__", "__ne__", "__le__", "__lt__", "__ge__", "__gt__"])
@pytest.fixture(params=["__eq__", "__ne__"])
def all_compare_operators(request):
"""
Fixture for dunder names for common compare operations
* >=
* >
* ==
* !=
* <
* <=
"""
return request.param
# -----------------------------------------------------------------------------
# Inherited tests
# -----------------------------------------------------------------------------
class TestDtype(extension_tests.BaseDtypeTests):
# additional tests
def test_array_type_with_arg(self, data, dtype):
assert dtype.construct_array_type() is GeometryArray
def test_registry(self, data, dtype):
s = pd.Series(np.asarray(data), dtype=object)
result = s.astype("geometry")
assert isinstance(result.array, GeometryArray)
expected = pd.Series(data)
self.assert_series_equal(result, expected)
class TestInterface(extension_tests.BaseInterfaceTests):
def test_array_interface(self, data):
# we are overriding this base test because the creation of `expected`
# potentionally doesn't work for shapely geometries
# TODO can be removed with Shapely 2.0
result = np.array(data)
assert result[0] == data[0]
result = np.array(data, dtype=object)
# expected = np.array(list(data), dtype=object)
expected = np.empty(len(data), dtype=object)
with ignore_shapely2_warnings():
expected[:] = list(data)
assert_array_equal(result, expected)
def test_contains(self, data, data_missing):
# overridden due to the inconsistency between
# GeometryDtype.na_value = np.nan
# and None being used as NA in array
# ensure data without missing values
data = data[~data.isna()]
# first elements are non-missing
assert data[0] in data
assert data_missing[0] in data_missing
assert None in data_missing
assert None not in data
assert pd.NaT not in data_missing
class TestConstructors(extension_tests.BaseConstructorsTests):
pass
class TestReshaping(extension_tests.BaseReshapingTests):
pass
class TestGetitem(extension_tests.BaseGetitemTests):
pass
class TestSetitem(extension_tests.BaseSetitemTests):
pass
class TestMissing(extension_tests.BaseMissingTests):
def test_fillna_series(self, data_missing):
fill_value = data_missing[1]
ser = pd.Series(data_missing)
result = ser.fillna(fill_value)
expected = pd.Series(data_missing._from_sequence([fill_value, fill_value]))
self.assert_series_equal(result, expected)
# filling with array-like not yet supported
# # Fill with a series
# result = ser.fillna(expected)
# self.assert_series_equal(result, expected)
# # Fill with a series not affecting the missing values
# result = ser.fillna(ser)
# self.assert_series_equal(result, ser)
@pytest.mark.skip("fillna method not supported")
def test_fillna_limit_pad(self, data_missing):
pass
@pytest.mark.skip("fillna method not supported")
def test_fillna_limit_backfill(self, data_missing):
pass
@pytest.mark.skip("fillna method not supported")
def test_fillna_series_method(self, data_missing, method):
pass
@pytest.mark.skip("fillna method not supported")
def test_fillna_no_op_returns_copy(self, data):
pass
class TestReduce(extension_tests.BaseNoReduceTests):
@pytest.mark.skip("boolean reduce (any/all) tested in test_pandas_methods")
def test_reduce_series_boolean():
pass
_all_arithmetic_operators = [
"__add__",
"__radd__",
# '__sub__', '__rsub__',
"__mul__",
"__rmul__",
"__floordiv__",
"__rfloordiv__",
"__truediv__",
"__rtruediv__",
"__pow__",
"__rpow__",
"__mod__",
"__rmod__",
]
@pytest.fixture(params=_all_arithmetic_operators)
def all_arithmetic_operators(request):
"""
Fixture for dunder names for common arithmetic operations
Adapted to exclude __sub__, as this is implemented as "difference".
"""
return request.param
# an inherited test from pandas creates a Series from a list of geometries, which
# triggers the warning from Shapely, out of control of GeoPandas, so ignoring here
@pytest.mark.filterwarnings(
"ignore:The array interface is deprecated and will no longer work in Shapely 2.0"
)
class TestArithmeticOps(extension_tests.BaseArithmeticOpsTests):
@pytest.mark.skip(reason="not applicable")
def test_divmod_series_array(self, data, data_for_twos):
pass
@pytest.mark.skip(reason="not applicable")
def test_add_series_with_extension_array(self, data):
pass
# an inherited test from pandas creates a Series from a list of geometries, which
# triggers the warning from Shapely, out of control of GeoPandas, so ignoring here
@pytest.mark.filterwarnings(
"ignore:The array interface is deprecated and will no longer work in Shapely 2.0"
)
class TestComparisonOps(extension_tests.BaseComparisonOpsTests):
def _compare_other(self, s, data, op_name, other):
op = getattr(operator, op_name.strip("_"))
result = op(s, other)
expected = s.combine(other, op)
self.assert_series_equal(result, expected)
def test_compare_scalar(self, data, all_compare_operators): # noqa
op_name = all_compare_operators
s = pd.Series(data)
self._compare_other(s, data, op_name, data[0])
def test_compare_array(self, data, all_compare_operators): # noqa
op_name = all_compare_operators
s = pd.Series(data)
other = pd.Series([data[0]] * len(data))
self._compare_other(s, data, op_name, other)
class TestMethods(extension_tests.BaseMethodsTests):
@no_sorting
@pytest.mark.parametrize("dropna", [True, False])
def test_value_counts(self, all_data, dropna):
pass
@no_sorting
def test_value_counts_with_normalize(self, data):
pass
@no_sorting
def test_argsort(self, data_for_sorting):
result = pd.Series(data_for_sorting).argsort()
expected = pd.Series(np.array([2, 0, 1], dtype=np.int64))
self.assert_series_equal(result, expected)
@no_sorting
def test_argsort_missing(self, data_missing_for_sorting):
result = pd.Series(data_missing_for_sorting).argsort()
expected = pd.Series(np.array([1, -1, 0], dtype=np.int64))
self.assert_series_equal(result, expected)
@no_sorting
@pytest.mark.parametrize("ascending", [True, False])
def test_sort_values(self, data_for_sorting, ascending):
ser = pd.Series(data_for_sorting)
result = ser.sort_values(ascending=ascending)
expected = ser.iloc[[2, 0, 1]]
if not ascending:
expected = expected[::-1]
self.assert_series_equal(result, expected)
@no_sorting
@pytest.mark.parametrize("ascending", [True, False])
def test_sort_values_missing(self, data_missing_for_sorting, ascending):
ser = pd.Series(data_missing_for_sorting)
result = ser.sort_values(ascending=ascending)
if ascending:
expected = ser.iloc[[2, 0, 1]]
else:
expected = ser.iloc[[0, 2, 1]]
self.assert_series_equal(result, expected)
@no_sorting
@pytest.mark.parametrize("ascending", [True, False])
def test_sort_values_frame(self, data_for_sorting, ascending):
df = pd.DataFrame({"A": [1, 2, 1], "B": data_for_sorting})
result = df.sort_values(["A", "B"])
expected = pd.DataFrame(
{"A": [1, 1, 2], "B": data_for_sorting.take([2, 0, 1])}, index=[2, 0, 1]
)
self.assert_frame_equal(result, expected)
@no_sorting
def test_searchsorted(self, data_for_sorting, as_series):
pass
@not_yet_implemented
def test_combine_le(self):
pass
@pytest.mark.skip(reason="addition not supported")
def test_combine_add(self):
pass
@not_yet_implemented
def test_fillna_length_mismatch(self, data_missing):
msg = "Length of 'value' does not match."
with pytest.raises(ValueError, match=msg):
data_missing.fillna(data_missing.take([1]))
@no_sorting
def test_nargsort(self):
pass
@no_sorting
def test_argsort_missing_array(self):
pass
@no_sorting
def test_argmin_argmax(self):
pass
@no_sorting
def test_argmin_argmax_empty_array(self):
pass
@no_sorting
def test_argmin_argmax_all_na(self):
pass
@no_sorting
def test_argreduce_series(self):
pass
@no_sorting
def test_argmax_argmin_no_skipna_notimplemented(self):
pass
class TestCasting(extension_tests.BaseCastingTests):
pass
class TestGroupby(extension_tests.BaseGroupbyTests):
@no_sorting
@pytest.mark.parametrize("as_index", [True, False])
def test_groupby_extension_agg(self, as_index, data_for_grouping):
pass
@no_sorting
def test_groupby_extension_transform(self, data_for_grouping):
pass
@no_sorting
@pytest.mark.parametrize(
"op",
[
lambda x: 1,
lambda x: [1] * len(x),
lambda x: pd.Series([1] * len(x)),
lambda x: x,
],
ids=["scalar", "list", "series", "object"],
)
def test_groupby_extension_apply(self, data_for_grouping, op):
pass
class TestPrinting(extension_tests.BasePrintingTests):
pass
@not_yet_implemented
class TestParsing(extension_tests.BaseParsingTests):
pass
| 27.414676 | 88 | 0.651292 | import operator
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from pandas.tests.extension import base as extension_tests
import shapely.geometry
from geopandas.array import GeometryArray, GeometryDtype, from_shapely
from geopandas._compat import ignore_shapely2_warnings
import pytest
not_yet_implemented = pytest.mark.skip(reason="Not yet implemented")
no_sorting = pytest.mark.skip(reason="Sorting not supported")
@pytest.fixture
def dtype():
return GeometryDtype()
def make_data():
a = np.empty(100, dtype=object)
with ignore_shapely2_warnings():
a[:] = [shapely.geometry.Point(i, i) for i in range(100)]
ga = from_shapely(a)
return ga
@pytest.fixture
def data():
return make_data()
@pytest.fixture
def data_for_twos():
raise NotImplementedError
@pytest.fixture
def data_missing():
return from_shapely([None, shapely.geometry.Point(1, 1)])
@pytest.fixture(params=["data", "data_missing"])
def all_data(request, data, data_missing):
if request.param == "data":
return data
elif request.param == "data_missing":
return data_missing
@pytest.fixture
def data_repeated(data):
def gen(count):
for _ in range(count):
yield data
return gen
@pytest.fixture
def data_for_sorting():
raise NotImplementedError
@pytest.fixture
def data_missing_for_sorting():
raise NotImplementedError
@pytest.fixture
def na_cmp():
return lambda x, y: x is None and y is None
@pytest.fixture
def na_value():
return None
@pytest.fixture
def data_for_grouping():
return from_shapely(
[
shapely.geometry.Point(1, 1),
shapely.geometry.Point(1, 1),
None,
None,
shapely.geometry.Point(0, 0),
shapely.geometry.Point(0, 0),
shapely.geometry.Point(1, 1),
shapely.geometry.Point(2, 2),
]
)
@pytest.fixture(params=[True, False])
def box_in_series(request):
return request.param
@pytest.fixture(
params=[
lambda x: 1,
lambda x: [1] * len(x),
lambda x: pd.Series([1] * len(x)),
lambda x: x,
],
ids=["scalar", "list", "series", "object"],
)
def groupby_apply_op(request):
return request.param
@pytest.fixture(params=[True, False])
def as_frame(request):
return request.param
@pytest.fixture(params=[True, False])
def as_series(request):
return request.param
@pytest.fixture(params=[True, False])
def use_numpy(request):
return request.param
@pytest.fixture(params=["ffill", "bfill"])
def fillna_method(request):
return request.param
@pytest.fixture(params=[True, False])
def as_array(request):
return request.param
@pytest.fixture(
params=["sum", "max", "min", "mean", "prod", "std", "var", "median", "kurt", "skew"]
)
def all_numeric_reductions(request):
return request.param
@pytest.fixture(params=["all", "any"])
def all_boolean_reductions(request):
return request.param
@pytest.fixture(params=["__eq__", "__ne__"])
def all_compare_operators(request):
return request.param
class TestDtype(extension_tests.BaseDtypeTests):
def test_array_type_with_arg(self, data, dtype):
assert dtype.construct_array_type() is GeometryArray
def test_registry(self, data, dtype):
s = pd.Series(np.asarray(data), dtype=object)
result = s.astype("geometry")
assert isinstance(result.array, GeometryArray)
expected = pd.Series(data)
self.assert_series_equal(result, expected)
class TestInterface(extension_tests.BaseInterfaceTests):
def test_array_interface(self, data):
# TODO can be removed with Shapely 2.0
result = np.array(data)
assert result[0] == data[0]
result = np.array(data, dtype=object)
# expected = np.array(list(data), dtype=object)
expected = np.empty(len(data), dtype=object)
with ignore_shapely2_warnings():
expected[:] = list(data)
assert_array_equal(result, expected)
def test_contains(self, data, data_missing):
# overridden due to the inconsistency between
# GeometryDtype.na_value = np.nan
# and None being used as NA in array
# ensure data without missing values
data = data[~data.isna()]
# first elements are non-missing
assert data[0] in data
assert data_missing[0] in data_missing
assert None in data_missing
assert None not in data
assert pd.NaT not in data_missing
class TestConstructors(extension_tests.BaseConstructorsTests):
pass
class TestReshaping(extension_tests.BaseReshapingTests):
pass
class TestGetitem(extension_tests.BaseGetitemTests):
pass
class TestSetitem(extension_tests.BaseSetitemTests):
pass
class TestMissing(extension_tests.BaseMissingTests):
def test_fillna_series(self, data_missing):
fill_value = data_missing[1]
ser = pd.Series(data_missing)
result = ser.fillna(fill_value)
expected = pd.Series(data_missing._from_sequence([fill_value, fill_value]))
self.assert_series_equal(result, expected)
# filling with array-like not yet supported
# # Fill with a series
# result = ser.fillna(expected)
# self.assert_series_equal(result, expected)
# # Fill with a series not affecting the missing values
# result = ser.fillna(ser)
# self.assert_series_equal(result, ser)
@pytest.mark.skip("fillna method not supported")
def test_fillna_limit_pad(self, data_missing):
pass
@pytest.mark.skip("fillna method not supported")
def test_fillna_limit_backfill(self, data_missing):
pass
@pytest.mark.skip("fillna method not supported")
def test_fillna_series_method(self, data_missing, method):
pass
@pytest.mark.skip("fillna method not supported")
def test_fillna_no_op_returns_copy(self, data):
pass
class TestReduce(extension_tests.BaseNoReduceTests):
@pytest.mark.skip("boolean reduce (any/all) tested in test_pandas_methods")
def test_reduce_series_boolean():
pass
_all_arithmetic_operators = [
"__add__",
"__radd__",
# '__sub__', '__rsub__',
"__mul__",
"__rmul__",
"__floordiv__",
"__rfloordiv__",
"__truediv__",
"__rtruediv__",
"__pow__",
"__rpow__",
"__mod__",
"__rmod__",
]
@pytest.fixture(params=_all_arithmetic_operators)
def all_arithmetic_operators(request):
return request.param
# an inherited test from pandas creates a Series from a list of geometries, which
# triggers the warning from Shapely, out of control of GeoPandas, so ignoring here
@pytest.mark.filterwarnings(
"ignore:The array interface is deprecated and will no longer work in Shapely 2.0"
)
class TestArithmeticOps(extension_tests.BaseArithmeticOpsTests):
@pytest.mark.skip(reason="not applicable")
def test_divmod_series_array(self, data, data_for_twos):
pass
@pytest.mark.skip(reason="not applicable")
def test_add_series_with_extension_array(self, data):
pass
# an inherited test from pandas creates a Series from a list of geometries, which
# triggers the warning from Shapely, out of control of GeoPandas, so ignoring here
@pytest.mark.filterwarnings(
"ignore:The array interface is deprecated and will no longer work in Shapely 2.0"
)
class TestComparisonOps(extension_tests.BaseComparisonOpsTests):
def _compare_other(self, s, data, op_name, other):
op = getattr(operator, op_name.strip("_"))
result = op(s, other)
expected = s.combine(other, op)
self.assert_series_equal(result, expected)
def test_compare_scalar(self, data, all_compare_operators): # noqa
op_name = all_compare_operators
s = pd.Series(data)
self._compare_other(s, data, op_name, data[0])
def test_compare_array(self, data, all_compare_operators): # noqa
op_name = all_compare_operators
s = pd.Series(data)
other = pd.Series([data[0]] * len(data))
self._compare_other(s, data, op_name, other)
class TestMethods(extension_tests.BaseMethodsTests):
@no_sorting
@pytest.mark.parametrize("dropna", [True, False])
def test_value_counts(self, all_data, dropna):
pass
@no_sorting
def test_value_counts_with_normalize(self, data):
pass
@no_sorting
def test_argsort(self, data_for_sorting):
result = pd.Series(data_for_sorting).argsort()
expected = pd.Series(np.array([2, 0, 1], dtype=np.int64))
self.assert_series_equal(result, expected)
@no_sorting
def test_argsort_missing(self, data_missing_for_sorting):
result = pd.Series(data_missing_for_sorting).argsort()
expected = pd.Series(np.array([1, -1, 0], dtype=np.int64))
self.assert_series_equal(result, expected)
@no_sorting
@pytest.mark.parametrize("ascending", [True, False])
def test_sort_values(self, data_for_sorting, ascending):
ser = pd.Series(data_for_sorting)
result = ser.sort_values(ascending=ascending)
expected = ser.iloc[[2, 0, 1]]
if not ascending:
expected = expected[::-1]
self.assert_series_equal(result, expected)
@no_sorting
@pytest.mark.parametrize("ascending", [True, False])
def test_sort_values_missing(self, data_missing_for_sorting, ascending):
ser = pd.Series(data_missing_for_sorting)
result = ser.sort_values(ascending=ascending)
if ascending:
expected = ser.iloc[[2, 0, 1]]
else:
expected = ser.iloc[[0, 2, 1]]
self.assert_series_equal(result, expected)
@no_sorting
@pytest.mark.parametrize("ascending", [True, False])
def test_sort_values_frame(self, data_for_sorting, ascending):
df = pd.DataFrame({"A": [1, 2, 1], "B": data_for_sorting})
result = df.sort_values(["A", "B"])
expected = pd.DataFrame(
{"A": [1, 1, 2], "B": data_for_sorting.take([2, 0, 1])}, index=[2, 0, 1]
)
self.assert_frame_equal(result, expected)
@no_sorting
def test_searchsorted(self, data_for_sorting, as_series):
pass
@not_yet_implemented
def test_combine_le(self):
pass
@pytest.mark.skip(reason="addition not supported")
def test_combine_add(self):
pass
@not_yet_implemented
def test_fillna_length_mismatch(self, data_missing):
msg = "Length of 'value' does not match."
with pytest.raises(ValueError, match=msg):
data_missing.fillna(data_missing.take([1]))
@no_sorting
def test_nargsort(self):
pass
@no_sorting
def test_argsort_missing_array(self):
pass
@no_sorting
def test_argmin_argmax(self):
pass
@no_sorting
def test_argmin_argmax_empty_array(self):
pass
@no_sorting
def test_argmin_argmax_all_na(self):
pass
@no_sorting
def test_argreduce_series(self):
pass
@no_sorting
def test_argmax_argmin_no_skipna_notimplemented(self):
pass
class TestCasting(extension_tests.BaseCastingTests):
pass
class TestGroupby(extension_tests.BaseGroupbyTests):
@no_sorting
@pytest.mark.parametrize("as_index", [True, False])
def test_groupby_extension_agg(self, as_index, data_for_grouping):
pass
@no_sorting
def test_groupby_extension_transform(self, data_for_grouping):
pass
@no_sorting
@pytest.mark.parametrize(
"op",
[
lambda x: 1,
lambda x: [1] * len(x),
lambda x: pd.Series([1] * len(x)),
lambda x: x,
],
ids=["scalar", "list", "series", "object"],
)
def test_groupby_extension_apply(self, data_for_grouping, op):
pass
class TestPrinting(extension_tests.BasePrintingTests):
pass
@not_yet_implemented
class TestParsing(extension_tests.BaseParsingTests):
pass
| true | true |
1c37be98984ec7224180548ff6d1dbe3a33de59e | 1,940 | py | Python | receipts/receipts/admin.py | rolisz/receipt_budget | 74f73e7f8bb8b0b4fa89bfebf4c3c2c930511308 | [
"BSD-3-Clause"
] | 15 | 2016-03-02T18:16:46.000Z | 2022-03-05T10:55:58.000Z | receipts/receipts/admin.py | rolisz/receipt_budget | 74f73e7f8bb8b0b4fa89bfebf4c3c2c930511308 | [
"BSD-3-Clause"
] | 1 | 2017-04-10T23:46:43.000Z | 2017-04-10T23:46:43.000Z | receipts/receipts/admin.py | rolisz/receipt_budget | 74f73e7f8bb8b0b4fa89bfebf4c3c2c930511308 | [
"BSD-3-Clause"
] | 11 | 2016-03-02T18:16:12.000Z | 2020-07-19T11:57:27.000Z | from django.contrib import admin
from receipts.models import Expense, ExpenseItem, Shop
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
class ExpenseItemInline(admin.StackedInline):
model = ExpenseItem
extra = 2
template = 'admin/receipts/expense/item_stacked.html'
class ShopInline(admin.StackedInline):
model = Shop
class ExpenseAdmin(admin.ModelAdmin):
class Media:
css = {
'all': ("receipts/css/bootstrap.css", "receipts/css/bootstrap-theme.css")
}
js = ("receipts/js/jquery.js", "receipts/js/bootstrap.min.js",)
def change_view(self, request, object_id, form_url='', extra_context=None):
extra_context = extra_context or {}
extra_context['show_delete_link'] = True
return super(ExpenseAdmin, self).change_view(request, object_id,
form_url, extra_context=extra_context)
def link(self, obj):
print(obj.shop)
url = reverse('admin:receipts_shop_change',args=(obj.shop.id,))
print(url)
return mark_safe("<a href='%s'>edit</a>" % url)
link.allow_tags = True
link.short_description = ""
inlines = [ExpenseItemInline]
fields = ['date', ('shop', 'link')]
readonly_fields = ['image', 'link']
class ShopAdmin(admin.ModelAdmin):
class Media:
css = {
'all': ("receipts/css/bootstrap.css", "receipts/css/bootstrap-theme.css")
}
js = ("receipts/js/jquery.js", "receipts/js/bootstrap.min.js",)
def change_view(self, request, object_id, form_url='', extra_context=None):
extra_context = extra_context or {}
extra_context['show_delete_link'] = True
return super(ShopAdmin, self).change_view(request, object_id,
form_url, extra_context=extra_context)
admin.site.register(Shop, ShopAdmin)
admin.site.register(Expense, ExpenseAdmin)
admin.site.register(ExpenseItem)
| 31.803279 | 85 | 0.673711 | from django.contrib import admin
from receipts.models import Expense, ExpenseItem, Shop
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
class ExpenseItemInline(admin.StackedInline):
model = ExpenseItem
extra = 2
template = 'admin/receipts/expense/item_stacked.html'
class ShopInline(admin.StackedInline):
model = Shop
class ExpenseAdmin(admin.ModelAdmin):
class Media:
css = {
'all': ("receipts/css/bootstrap.css", "receipts/css/bootstrap-theme.css")
}
js = ("receipts/js/jquery.js", "receipts/js/bootstrap.min.js",)
def change_view(self, request, object_id, form_url='', extra_context=None):
extra_context = extra_context or {}
extra_context['show_delete_link'] = True
return super(ExpenseAdmin, self).change_view(request, object_id,
form_url, extra_context=extra_context)
def link(self, obj):
print(obj.shop)
url = reverse('admin:receipts_shop_change',args=(obj.shop.id,))
print(url)
return mark_safe("<a href='%s'>edit</a>" % url)
link.allow_tags = True
link.short_description = ""
inlines = [ExpenseItemInline]
fields = ['date', ('shop', 'link')]
readonly_fields = ['image', 'link']
class ShopAdmin(admin.ModelAdmin):
class Media:
css = {
'all': ("receipts/css/bootstrap.css", "receipts/css/bootstrap-theme.css")
}
js = ("receipts/js/jquery.js", "receipts/js/bootstrap.min.js",)
def change_view(self, request, object_id, form_url='', extra_context=None):
extra_context = extra_context or {}
extra_context['show_delete_link'] = True
return super(ShopAdmin, self).change_view(request, object_id,
form_url, extra_context=extra_context)
admin.site.register(Shop, ShopAdmin)
admin.site.register(Expense, ExpenseAdmin)
admin.site.register(ExpenseItem)
| true | true |
1c37bf63892ff3c3dc3eea7b99f3a2fe1c154eb9 | 15,380 | py | Python | tifresi/phase/modGabPhaseGrad.py | andimarafioti/tifresi | 676db371d5c472a5f3199506bf3863367a2ecde4 | [
"MIT"
] | 12 | 2020-02-08T09:47:17.000Z | 2021-07-31T09:22:41.000Z | tifresi/phase/modGabPhaseGrad.py | nperraud/stft4pghi | 676db371d5c472a5f3199506bf3863367a2ecde4 | [
"MIT"
] | 1 | 2020-07-20T22:32:49.000Z | 2020-07-21T15:20:11.000Z | tifresi/phase/modGabPhaseGrad.py | nperraud/stft4pghi | 676db371d5c472a5f3199506bf3863367a2ecde4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# ######### COPYRIGHT #########
# Credits
# #######
#
# Copyright(c) 2015-2018
# ----------------------
#
# * `LabEx Archimède <http://labex-archimede.univ-amu.fr/>`_
# * `Laboratoire d'Informatique Fondamentale <http://www.lif.univ-mrs.fr/>`_
# (now `Laboratoire d'Informatique et Systèmes <http://www.lis-lab.fr/>`_)
# * `Institut de Mathématiques de Marseille <http://www.i2m.univ-amu.fr/>`_
# * `Université d'Aix-Marseille <http://www.univ-amu.fr/>`_
#
# This software is a port from LTFAT 2.1.0 :
# Copyright (C) 2005-2018 Peter L. Soendergaard <peter@sonderport.dk>.
#
# Contributors
# ------------
#
# * Denis Arrivault <contact.dev_AT_lis-lab.fr>
# * Florent Jaillet <contact.dev_AT_lis-lab.fr>
#
# Description
# -----------
#
# ltfatpy is a partial Python port of the
# `Large Time/Frequency Analysis Toolbox <http://ltfat.sourceforge.net/>`_,
# a MATLAB®/Octave toolbox for working with time-frequency analysis and
# synthesis.
#
# Version
# -------
#
# * ltfatpy version = 1.0.16
# * LTFAT version = 2.1.0
#
# Licence
# -------
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ######### COPYRIGHT #########
"""Module of phase gradient computation
Ported from ltfat_2.1.0/gabor/gabphasegrad.m
.. moduleauthor:: Florent Jaillet
"""
from __future__ import print_function, division
import numpy as np
from ltfatpy.comp.comp_sigreshape_pre import comp_sigreshape_pre
from ltfatpy.gabor.dgtlength import dgtlength
from ltfatpy.gabor.gabwin import gabwin
from ltfatpy.tools.postpad import postpad
from ltfatpy.fourier.fftindex import fftindex
from ltfatpy.comp.comp_sepdgt import comp_sepdgt
from ltfatpy.fourier.pderiv import pderiv
def modgabphasegrad(method, *args, **kwargs):
"""Modified Phase gradient of the discrete Gabor transform
We modified this to work with dgtreals on the phase and abs case
Phase case we did a lot of changes,
abs case we added M as a mandatory parameter
- Usage:
| ``(tgrad, fgrad, c) = gabphasegrad('dgt', f, g, a, M, L=None)``
| ``(tgrad, fgrad) = gabphasegrad('phase', cphase, a)``
| ``(tgrad, fgrad) = gabphasegrad('abs', s, g, a, M, difforder=2)``
- Input parameters:
:param str method: Method used to compute the phase gradient, see the
possible values below
:param numpy.ndarray f: (defined if ``method='dgt'``) Input signal
:param numpy.ndarray cphase: (defined if ``method='phase'``) Phase of a
:func:`~ltfatpy.gabor.dgt.dgt` of the signal
:param numpy.ndarray s: (defined if ``method='abs'``) Spectrogram of the
signal
:param numpy.ndarray g: (defined if ``method='dgt'`` or ``method='phase'``)
Window function
:param int a: (defined if ``method='dgt'`` or ``method='phase'`` or
``method='abs'``) Length of time shift
:param int M: (defined if ``method='dgt'``) Number of channels
:param int L: (defined if ``method='dgt'``, optional) Length of transform
to do
:param int difforder: (defined if ``method='abs'``, optional) Order of the
centered finite difference scheme used to perform the needed numerical
differentiation
- Output parameters:
:returns: ``(tgrad, fgrad, c)`` if ``method='dgt'``, or ``(tgrad, fgrad)``
if ``method='phase'`` or ``method='abs'``
:rtype: tuple
:var numpy.ndarray tgrad: Instantaneous frequency
:var numpy.ndarray fgrad: Local group delay
:var numpy.ndarray c: Gabor coefficients
``gabphasegrad`` computes the time-frequency gradient of the phase of the
:func:`~ltfatpy.gabor.dgt.dgt` of a signal. The derivative in time
**tgrad** is the instantaneous frequency while the frequency derivative
**fgrad** is the local group delay.
**tgrad** and **fgrad** measure the deviation from the current time and
frequency, so a value of zero means that the instantaneous frequency is
equal to the center frequency of the considered channel.
**tgrad** is scaled such that distances are measured in samples. Similarly,
**fgrad** is scaled such that the Nyquist frequency (the highest possible
frequency) corresponds to a value of ``L/2``.
The computation of **tgrad** and **fgrad** is inaccurate when the absolute
value of the Gabor coefficients is low. This is due to the fact the the
phase of complex numbers close to the machine precision is almost
random. Therefore, **tgrad** and **fgrad** may attain very large random
values when ``abs(c)`` is close to zero.
The computation can be done using three different methods:
=========== ===========================================================
``'dgt'`` Directly from the signal.
``'phase'`` From the phase of a :func:`~ltfatpy.gabor.dgt.dgt` of the
signal. This is the classic method used in the phase
vocoder.
``'abs'`` From the absolute value of the
:func:`~ltfatpy.gabor.dgt.dgt`. Currently this method works
only for Gaussian windows.
=========== ===========================================================
``(tgrad, fgrad, c) = gabphasegrad('dgt', f, g, a, M)`` computes the
time-frequency gradient using a :func:`~ltfatpy.gabor.dgt.dgt` of the
signal **f**. The :func:`~ltfatpy.gabor.dgt.dgt` is computed using the
window **g** on the lattice specified by the time shift **a** and the
number of channels **M**. The algorithm used to perform this calculation
computes several DGTs, and therefore this routine takes the exact same
input parameters as :func:`~ltfatpy.gabor.dgt.dgt`.
The window **g** may be specified as in :func:`~ltfatpy.gabor.dgt.dgt`. If
the window used is ``'gauss'``, the computation will be done by a faster
algorithm.
``(tgrad, fgrad, c) = gabphasegrad('dgt', f, g, a, M)`` additionally
returns the Gabor coefficients ``c``, as they are always computed as a
byproduct of the algorithm.
``(tgrad, fgrad) = gabphasegrad('phase', cphase, a)`` computes the phase
gradient from the phase **cphase** of a :func:`~ltfatpy.gabor.dgt.dgt` of
the signal. The original :func:`~ltfatpy.gabor.dgt.dgt` from which the
phase is obtained must have been computed using a time-shift of **a**.
``(tgrad, fgrad) = gabphasegrad('abs', s, g, a)`` computes the phase
gradient from the spectrogram **s**. The spectrogram must have been
computed using the window **g** and time-shift **a**.
``(tgrad, fgrad) = gabphasegrad('abs', s, g, a, difforder=ord)`` uses a
centered finite difference scheme of order ``ord`` to perform the needed
numerical differentiation. Default is to use a 4th order scheme.
Currently the 'abs' method only works if the window **g** is a Gaussian
window specified as a string or cell array.
.. seealso:: :func:`resgram`, :func:`gabreassign`,
:func:`~ltfatpy.gabor.dgt.dgt`
- References:
:cite:`aufl95,cmdaaufl97,fl65`
"""
# NOTE: This function doesn't support the parameter lt (lattice type)
# supported by the corresponding octave function and the lattice used is
# seperable (square lattice lt = (0, 1)).
# NOTE: As in the octave version of this function, if needed, the
# undocumented optional keyword minlvl is available when using method=dgt.
# So it can be passed using a call of the following form:
# (tgrad, fgrad, c) = gabphasegrad('dgt', f, g, a, M, minlvl=val)
if not isinstance(method, str):
raise TypeError('First argument must be a str containing the method '
'name, "dgt", "phase" or "abs".')
method = method.lower()
if method == 'dgt':
raise Exception("We dont know if this works")
# --------------------------- DGT method ------------------------
(f, g, a, M) = args
if 'L' in kwargs:
L = kwargs['L']
else:
L = None
if 'minlvl' in kwargs:
minlvl = kwargs['minlvl']
else:
minlvl = np.finfo(np.float64).tiny
# # ----- step 1 : Verify f and determine its length -------
# Change f to correct shape.
f, Ls, W, wasrow, remembershape = comp_sigreshape_pre(f, 0)
# # ------ step 2: Verify a, M and L
if not L:
# ----- step 2b : Verify a, M and get L from the signal length f---
L = dgtlength(Ls, a, M)
else:
# ----- step 2a : Verify a, M and get L
Luser = dgtlength(L, a, M)
if Luser != L:
raise ValueError('Incorrect transform length L = {0:d} '
'specified. Next valid length is L = {1:d}. '
'See the help of dgtlength for the '
'requirements.'.format(L, Luser))
# # ----- step 3 : Determine the window
g, info = gabwin(g, a, M, L)
if L < info['gl']:
raise ValueError('Window is too long.')
# # ----- step 4: final cleanup ---------------
f = postpad(f, L)
# # ------ algorithm starts --------------------
# Compute the time weighted version of the window.
hg = fftindex(L) * g
# The computation done this way is insensitive to whether the dgt is
# phaselocked or not.
c = comp_sepdgt(f, g, a, M, 0)
c_h = comp_sepdgt(f, hg, a, M, 0)
c_s = np.abs(c) ** 2
# Remove small values because we need to divide by c_s
c_s = np.maximum(c_s, minlvl * np.max(c_s))
# Compute the group delay
fgrad = np.real(c_h * c.conjugate() / c_s)
if info['gauss']:
# The method used below only works for the Gaussian window, because
# the time derivative and the time multiplicative of the Gaussian
# are identical.
tgrad = np.imag(c_h * c.conjugate() / c_s) / info['tfr']
else:
# The code below works for any window, and not just the Gaussian
dg = pderiv(g, difforder=float('inf')) / (2 * np.pi)
c_d = comp_sepdgt(f, dg, a, M, 0)
# NOTE: There is a bug here in the original octave file as it
# contains a reshape that uses an undefined variable N.
# You can get the error with LTFAT 2.1.0 in octave by running for
# example:
# gabphasegrad('dgt', rand(16,1), rand(16,1), 4, 16)
#
# So we just comment out the corresponding line here, as it appears
# to be unneeded:
# c_d.shape = (M, N, W)
# Compute the instantaneous frequency
tgrad = -np.imag(c_d * c.conjugate() / c_s)
return (tgrad, fgrad, c)
elif method == 'phase':
# --------------------------- phase method ------------------------
(cphase, a, M) = args
if not np.isrealobj(cphase):
raise TypeError("Input phase must be real valued. Use the 'angle'"
" function to compute the argument of complex "
"numbers.")
# --- linear method ---
if cphase.ndim == 3:
M2, N, W = cphase.shape # M2 is the number of channels from 0 to Nyquist
else:
M2, N = cphase.shape # M2 is the number of channels from 0 to Nyquist
L = N * a
b = L / M
# NOTE: The following code found in the original octave version of the function
# hasn't been translated here to Python as it is not used:
# if 0
#
# # This is the classic phase vocoder algorithm by Flanagan.
#
# tgrad = cphase-circshift(cphase,[0,-1]);
# tgrad = tgrad- 2*pi*round(tgrad/(2*pi));
# tgrad = -tgrad/(2*pi)*L;
#
# # Phase-lock the angles.
# TimeInd = (0:(N-1))*a;
# FreqInd = (0:(M-1))/M;
#
# phl = FreqInd'*TimeInd;
# cphase = cphase+2*pi.*phl;
#
# fgrad = cphase-circshift(cphase,[1,0]);
# fgrad = fgrad- 2*pi*round(fgrad/(2*pi));
# fgrad = -fgrad/(2*pi)*L;
#
# end;
# This is the classic phase vocoder algorithm by Flanagan modified to
# yield a second order centered difference approximation.
# Forward approximation
tgrad_1 = cphase - np.roll(cphase, -1, axis=1)
# numpy round function doesn't use the same convention than octave for
# half-integers but the standard Python round function uses the same
# convention than octave, so we use the Python standard round in the
# computation below
octave_round = np.vectorize(round)
tgrad_1 = tgrad_1 - 2 * np.pi * octave_round(tgrad_1 / (2 * np.pi))
# Backward approximation
tgrad_2 = np.roll(cphase, 1, axis=1) - cphase
tgrad_2 = tgrad_2 - 2 * np.pi * octave_round(tgrad_2 / (2 * np.pi))
# Average
tgrad = (tgrad_1 + tgrad_2) / 2
tgrad = -tgrad / (2 * np.pi * a) * L
# Phase-lock the angles.
TimeInd = np.arange(N) * a
FreqInd = np.arange(M2) / M
phl = np.dot(FreqInd.reshape((FreqInd.shape[0], 1)),
TimeInd.reshape((1, TimeInd.shape[0])))
# NOTE: in the following lines, the shape of phl is changed so that
# broadcasting works in the following addition with cphase when cphase
# has more than two dimensions
new_shape = np.ones((len(cphase.shape),), dtype=int)
new_shape[0] = phl.shape[0]
new_shape[1] = phl.shape[1]
phl = phl.reshape(tuple(new_shape))
cphase = cphase + 2 * np.pi * phl
cphase_to_aprox = np.concatenate([-cphase[1:2], cphase, -cphase[-2:-1]])
# Forward approximation
fgrad_1 = cphase_to_aprox - np.roll(cphase_to_aprox, -1, axis=0)
fgrad_1 = fgrad_1 - 2 * np.pi * octave_round(fgrad_1 / (2 * np.pi))
fgrad_1 = fgrad_1[1:-1]
# Backward approximation
fgrad_2 = np.roll(cphase_to_aprox, 1, axis=0) - cphase_to_aprox
fgrad_2 = fgrad_2 - 2 * np.pi * octave_round(fgrad_2 / (2 * np.pi))
fgrad_2 = fgrad_2[1:-1]
# Average
fgrad = (fgrad_1 + fgrad_2) / 2
fgrad = fgrad / (2 * np.pi * b) * L
return (tgrad, fgrad)
elif method == 'abs':
# --------------------------- abs method ------------------------
(s, g, a, M) = args
if 'difforder' in kwargs:
difforder = kwargs['difforder']
else:
difforder = 2
if not np.all(s >= 0.):
raise ValueError('First input argument must be positive or zero.')
if s.ndim == 3:
M2, N, W = s.shape
else:
M2, N = s.shape
L = N * a
g, info = gabwin(g, a, M, L)
if not info['gauss']:
raise ValueError('The window must be a Gaussian window (specified '
'as a string or as a dictionary).')
b = L / M
# We must avoid taking the log of zero.
# Therefore we add the smallest possible
# number
logs = np.log(s + np.finfo(s.dtype).tiny)
# XXX REMOVE Add a small constant to limit the dynamic range. This
# should lessen the problem of errors in the differentiation for points
# close to (but not exactly) zeros points.
maxmax = np.max(logs)
tt = -11.
logs[logs < (maxmax + tt)] = tt
fgrad = pderiv(logs, 1, difforder) / (2 * np.pi) * info['tfr']
tgrad = pderiv(logs, 0, difforder) / (2 * np.pi * info['tfr']) * (M/M2)
# Fix the first and last rows .. the
# borders are symmetric so the centered difference is 0
tgrad[0, :] = 0
tgrad[-1, :] = 0
return (tgrad, fgrad)
else:
raise ValueError("First argument must be the method name, 'dgt', "
"'phase' or 'abs'.")
| 34.954545 | 81 | 0.633745 |
t Jaillet <contact.dev_AT_lis-lab.fr>
#
# Description
# -----------
#
# ltfatpy is a partial Python port of the
# `Large Time/Frequency Analysis Toolbox <http://ltfat.sourceforge.net/>`_,
# a MATLAB®/Octave toolbox for working with time-frequency analysis and
# synthesis.
#
# Version
# -------
#
# * ltfatpy version = 1.0.16
# * LTFAT version = 2.1.0
#
# Licence
# -------
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ######### COPYRIGHT #########
from __future__ import print_function, division
import numpy as np
from ltfatpy.comp.comp_sigreshape_pre import comp_sigreshape_pre
from ltfatpy.gabor.dgtlength import dgtlength
from ltfatpy.gabor.gabwin import gabwin
from ltfatpy.tools.postpad import postpad
from ltfatpy.fourier.fftindex import fftindex
from ltfatpy.comp.comp_sepdgt import comp_sepdgt
from ltfatpy.fourier.pderiv import pderiv
def modgabphasegrad(method, *args, **kwargs):
# NOTE: This function doesn't support the parameter lt (lattice type)
if not isinstance(method, str):
raise TypeError('First argument must be a str containing the method '
'name, "dgt", "phase" or "abs".')
method = method.lower()
if method == 'dgt':
raise Exception("We dont know if this works")
(f, g, a, M) = args
if 'L' in kwargs:
L = kwargs['L']
else:
L = None
if 'minlvl' in kwargs:
minlvl = kwargs['minlvl']
else:
minlvl = np.finfo(np.float64).tiny
re(f, 0)
Ls, a, M)
else:
Luser = dgtlength(L, a, M)
if Luser != L:
raise ValueError('Incorrect transform length L = {0:d} '
'specified. Next valid length is L = {1:d}. '
'See the help of dgtlength for the '
'requirements.'.format(L, Luser))
L < info['gl']:
raise ValueError('Window is too long.')
)
c_h = comp_sepdgt(f, hg, a, M, 0)
c_s = np.abs(c) ** 2
c_s = np.maximum(c_s, minlvl * np.max(c_s))
fgrad = np.real(c_h * c.conjugate() / c_s)
if info['gauss']:
tgrad = np.imag(c_h * c.conjugate() / c_s) / info['tfr']
else:
dg = pderiv(g, difforder=float('inf')) / (2 * np.pi)
c_d = comp_sepdgt(f, dg, a, M, 0)
tgrad = -np.imag(c_d * c.conjugate() / c_s)
return (tgrad, fgrad, c)
elif method == 'phase':
(cphase, a, M) = args
if not np.isrealobj(cphase):
raise TypeError("Input phase must be real valued. Use the 'angle'"
" function to compute the argument of complex "
"numbers.")
if cphase.ndim == 3:
M2, N, W = cphase.shape
else:
M2, N = cphase.shape
L = N * a
b = L / M
# if 0
#
# # This is the classic phase vocoder algorithm by Flanagan.
#
# tgrad = cphase-circshift(cphase,[0,-1]);
# tgrad = tgrad- 2*pi*round(tgrad/(2*pi));
# tgrad = -tgrad/(2*pi)*L;
#
# # Phase-lock the angles.
# TimeInd = (0:(N-1))*a;
# FreqInd = (0:(M-1))/M;
#
# phl = FreqInd'*TimeInd;
tgrad_1 = cphase - np.roll(cphase, -1, axis=1)
# half-integers but the standard Python round function uses the same
# convention than octave, so we use the Python standard round in the
# computation below
octave_round = np.vectorize(round)
tgrad_1 = tgrad_1 - 2 * np.pi * octave_round(tgrad_1 / (2 * np.pi))
# Backward approximation
tgrad_2 = np.roll(cphase, 1, axis=1) - cphase
tgrad_2 = tgrad_2 - 2 * np.pi * octave_round(tgrad_2 / (2 * np.pi))
# Average
tgrad = (tgrad_1 + tgrad_2) / 2
tgrad = -tgrad / (2 * np.pi * a) * L
# Phase-lock the angles.
TimeInd = np.arange(N) * a
FreqInd = np.arange(M2) / M
phl = np.dot(FreqInd.reshape((FreqInd.shape[0], 1)),
TimeInd.reshape((1, TimeInd.shape[0])))
# NOTE: in the following lines, the shape of phl is changed so that
# broadcasting works in the following addition with cphase when cphase
# has more than two dimensions
new_shape = np.ones((len(cphase.shape),), dtype=int)
new_shape[0] = phl.shape[0]
new_shape[1] = phl.shape[1]
phl = phl.reshape(tuple(new_shape))
cphase = cphase + 2 * np.pi * phl
cphase_to_aprox = np.concatenate([-cphase[1:2], cphase, -cphase[-2:-1]])
# Forward approximation
fgrad_1 = cphase_to_aprox - np.roll(cphase_to_aprox, -1, axis=0)
fgrad_1 = fgrad_1 - 2 * np.pi * octave_round(fgrad_1 / (2 * np.pi))
fgrad_1 = fgrad_1[1:-1]
# Backward approximation
fgrad_2 = np.roll(cphase_to_aprox, 1, axis=0) - cphase_to_aprox
fgrad_2 = fgrad_2 - 2 * np.pi * octave_round(fgrad_2 / (2 * np.pi))
fgrad_2 = fgrad_2[1:-1]
# Average
fgrad = (fgrad_1 + fgrad_2) / 2
fgrad = fgrad / (2 * np.pi * b) * L
return (tgrad, fgrad)
elif method == 'abs':
# --------------------------- abs method ------------------------
(s, g, a, M) = args
if 'difforder' in kwargs:
difforder = kwargs['difforder']
else:
difforder = 2
if not np.all(s >= 0.):
raise ValueError('First input argument must be positive or zero.')
if s.ndim == 3:
M2, N, W = s.shape
else:
M2, N = s.shape
L = N * a
g, info = gabwin(g, a, M, L)
if not info['gauss']:
raise ValueError('The window must be a Gaussian window (specified '
'as a string or as a dictionary).')
b = L / M
# We must avoid taking the log of zero.
# Therefore we add the smallest possible
# number
logs = np.log(s + np.finfo(s.dtype).tiny)
# XXX REMOVE Add a small constant to limit the dynamic range. This
# should lessen the problem of errors in the differentiation for points
# close to (but not exactly) zeros points.
maxmax = np.max(logs)
tt = -11.
logs[logs < (maxmax + tt)] = tt
fgrad = pderiv(logs, 1, difforder) / (2 * np.pi) * info['tfr']
tgrad = pderiv(logs, 0, difforder) / (2 * np.pi * info['tfr']) * (M/M2)
# Fix the first and last rows .. the
# borders are symmetric so the centered difference is 0
tgrad[0, :] = 0
tgrad[-1, :] = 0
return (tgrad, fgrad)
else:
raise ValueError("First argument must be the method name, 'dgt', "
"'phase' or 'abs'.")
| true | true |
1c37bfd8ec65b02baae06a02cae28fc2283c52ec | 971 | py | Python | main/settings/stage.py | wuuuduu/django-notifications2 | 544502ec02bf34b4e0ff613500fd29766aecd229 | [
"BSD-3-Clause"
] | 1 | 2020-09-08T20:13:58.000Z | 2020-09-08T20:13:58.000Z | main/settings/stage.py | wuuuduu/django-notifications2 | 544502ec02bf34b4e0ff613500fd29766aecd229 | [
"BSD-3-Clause"
] | null | null | null | main/settings/stage.py | wuuuduu/django-notifications2 | 544502ec02bf34b4e0ff613500fd29766aecd229 | [
"BSD-3-Clause"
] | null | null | null | from .base import *
from .logging import ConfigureLogger
LOGGING_LEVEL = 'INFO'
ConfigureLogger(log_level=LOGGING_LEVEL, logging_dir=LOGGING_DIR, django_modules=PROJECT_APPS)
REST_FRAMEWORK['DEFAULT_RENDERER_CLASSES'] = [
'rest_framework.renderers.JSONRenderer',
]
ALLOWED_HOSTS = [
'api.stage.example.com'
]
RAVEN_CONFIG['environment'] = 'stage'
CORS_ORIGIN_WHITELIST = [
'https://stage.example.com',
'https://www.stage.example.com',
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.getenv('DATABASES_NAME'),
'USER': os.getenv('DATABASES_USER'),
'PASSWORD': os.getenv('DATABASES_PASSWORD'),
'HOST': os.getenv('DATABASES_HOST'),
'PORT': os.getenv('DATABASES_PORT'),
'OPTIONS': {'charset': 'utf8mb4'},
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
| 23.682927 | 94 | 0.649846 | from .base import *
from .logging import ConfigureLogger
LOGGING_LEVEL = 'INFO'
ConfigureLogger(log_level=LOGGING_LEVEL, logging_dir=LOGGING_DIR, django_modules=PROJECT_APPS)
REST_FRAMEWORK['DEFAULT_RENDERER_CLASSES'] = [
'rest_framework.renderers.JSONRenderer',
]
ALLOWED_HOSTS = [
'api.stage.example.com'
]
RAVEN_CONFIG['environment'] = 'stage'
CORS_ORIGIN_WHITELIST = [
'https://stage.example.com',
'https://www.stage.example.com',
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.getenv('DATABASES_NAME'),
'USER': os.getenv('DATABASES_USER'),
'PASSWORD': os.getenv('DATABASES_PASSWORD'),
'HOST': os.getenv('DATABASES_HOST'),
'PORT': os.getenv('DATABASES_PORT'),
'OPTIONS': {'charset': 'utf8mb4'},
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
| true | true |
1c37c0a9cb26e30c5fb60c8dbfd4c2ea7166eb2a | 15,493 | py | Python | daseki/common/__init__.py | cuthbertLab/daseki | 48a16ab1351bd1128c06092065234ea1016a87ef | [
"BSD-3-Clause"
] | null | null | null | daseki/common/__init__.py | cuthbertLab/daseki | 48a16ab1351bd1128c06092065234ea1016a87ef | [
"BSD-3-Clause"
] | null | null | null | daseki/common/__init__.py | cuthbertLab/daseki | 48a16ab1351bd1128c06092065234ea1016a87ef | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Name: common.py
# Purpose: Commonly used tools across Daseki
#
# Authors: Michael Scott Cuthbert
#
# Copyright: Copyright © 2014-16 Michael Scott Cuthbert / cuthbertLab
# License: BSD, see license.txt
# ----------------------------------------------------------------------------
'''
Common is a collection of utility functions, objects, constants and dictionaries used
throughout daseki.
functions in common/ should not import anything from daseki except daseki.exceptionsDS
(except in tests and doctests).
For historical reasons all the (non-private) functions etc. of the common/
folder are available by importing common.
'''
# pylint: disable=wildcard-import
from typing import Any
from daseki.common.parallel import *
import enum
import inspect
import re
import os
import sys
import time
import tempfile
import weakref
from daseki.exceptionsDS import DasekiException
maxRetrosheetYear = 2015
class TeamNum(enum.IntEnum):
VISITOR = 0
HOME = 1
# tools for setup.py
def sourceFilePath():
'''
Get the Daseki directory that contains source files. This is not the same as the
outermost package development directory.
'''
dn = os.path.dirname
fpThis = inspect.getfile(sourceFilePath)
fpDS = dn(dn(fpThis))
# use retro as a test case
if 'retro' not in os.listdir(fpDS):
raise DasekiException('cannot find expected daseki directory: %s' % fpDS)
return fpDS
def dataFilePath():
return os.path.join(sourceFilePath(), 'dataFiles')
def dataRetrosheet():
return os.path.join(dataFilePath(), 'retrosheet')
def dataRetrosheetEvent():
return os.path.join(dataRetrosheet(), 'event')
def dataRetrosheetByType(gameType='regular'):
if gameType not in ('asg', 'post', 'regular'):
raise DasekiException('gameType must be asg, post, or regular, not {0}'.format(gameType))
return os.path.join(dataRetrosheetEvent(), gameType)
def gameLogFilePath():
return os.path.join(dataRetrosheet(), 'gamelog')
# ---------------------
def getDefaultRootTempDir():
'''
returns whatever tempfile.gettempdir() returns plus 'daseki'.
Creates the subdirectory if it doesn't exist:
>>> from daseki import common
>>> import tempfile
>>> t = tempfile.gettempdir()
>>> #_DOCS_SHOW t
'/var/folders/x5/rymq2tx16lqbpytwb1n_cc4c0000gn/T'
>>> import os
>>> common.getDefaultRootTempDir() == os.path.join(t, 'daseki')
True
'''
# this returns the root temp dir; this does not create a new dir
dstDir = os.path.join(tempfile.gettempdir(), 'daseki')
# if this path already exists, we have nothing more to do
if os.path.exists(dstDir):
return dstDir
else:
# make this directory as a temp directory
try:
os.mkdir(dstDir)
except OSError: # cannot make the directory
dstDir = tempfile.gettempdir()
return dstDir
# ---------------------
GAMEID_MATCH = re.compile(r'([A-Za-z][A-Za-z][A-Za-z])(\d\d\d\d)(\d\d)(\d\d)(\d?)')
class GameId(object):
'''
A GameId is a 12-character string that embeds information about
when and where a game was played. It is designed to uniquely identify
any game every played.
We can initialize a GameId object from a string:
>>> from daseki import common
>>> gid = common.GameId('SDN201304090')
>>> str(gid)
'SDN201304090'
>>> gid
<daseki.common.GameId SDN201304090>
>>> gid.year
2013
>>> gid.day
9
>>> gid.gameNum # always a string because of weird split double header A, B codes
'0'
>>> gid.homeTeam
'SDN'
Or we can construct the id from all the information:
>>> gid2 = common.GameId()
>>> gid2.homeTeam = 'ARI'
>>> gid2.year = 2000
>>> gid2.month = 9
>>> gid2.day = 22
>>> print(gid2)
ARI200009220
Last digit is optional:
>>> gid = common.GameId('SDN20130409')
>>> str(gid)
'SDN201304090'
'''
def __init__(self, gameId=None):
self.gameId = gameId
self.year = 0
self.month = 0
self.day = 0
self.gameNum = '0'
self.homeTeam = 'XXX'
if gameId is not None:
self.parse()
def __repr__(self):
return '<{0}.{1} {2}>'.format(self.__module__, self.__class__.__name__, str(self))
def __str__(self):
return '{s.homeTeam}{s.year:4d}{s.month:02d}{s.day:02d}{s.gameNum}'.format(s=self)
def parse(self):
gameId = self.gameId
matched = GAMEID_MATCH.match(gameId)
if not matched:
raise DasekiException('invalid gameId: %s' % gameId)
self.homeTeam = matched.group(1).upper()
self.year = int(matched.group(2))
self.month = int(matched.group(3))
self.day = int(matched.group(4))
self.gameNum = matched.group(5)
if self.gameNum == '':
self.gameNum = '0'
# ---------------------
ordinals = ['Zeroth', 'First', 'Second', 'Third', 'Fourth', 'Fifth',
'Sixth', 'Seventh', 'Eighth', 'Ninth', 'Tenth', 'Eleventh',
'Twelfth', 'Thirteenth', 'Fourteenth', 'Fifteenth',
'Sixteenth', 'Seventeenth', 'Eighteenth', 'Nineteenth',
'Twentieth', 'Twenty-first', 'Twenty-second']
def ordinalAbbreviation(value, plural=False):
'''Return the ordinal abbreviations for integers
>>> from daseki import common
>>> common.ordinalAbbreviation(3)
'rd'
>>> common.ordinalAbbreviation(255)
'th'
>>> common.ordinalAbbreviation(255, plural=True)
'ths'
:rtype: str
'''
valueHundreths = value % 100
post = ''
if valueHundreths in [11, 12, 13]:
post = 'th'
else:
valueMod = value % 10
if valueMod == 1:
post = 'st'
elif valueMod in [0, 4, 5, 6, 7, 8, 9]:
post = 'th'
elif valueMod == 2:
post = 'nd'
elif valueMod == 3:
post = 'rd'
if post != 'st' and plural:
post += 's'
return post
# -------------------------------------------------------------------------------
class Timer(object):
'''
An object for timing. Call it to get the current time since starting.
>>> from daseki import common
>>> t = common.Timer()
>>> now = t()
>>> now_now = t()
>>> now_now > now
True
Call `stop` to stop it. Calling `start` again will reset the number
>>> t.stop()
>>> stopTime = t()
>>> stopNow = t()
>>> stopTime == stopNow
True
All this had better take less than one second!
>>> stopTime < 1
True
'''
def __init__(self):
# start on init
self._tStart = time.time()
self._tDif = 0
self._tStop = None
def start(self):
'''
Explicit start method; will clear previous values.
Start always happens on initialization.
'''
self._tStart = time.time()
self._tStop = None # show that a new run has started so __call__ works
self._tDif = 0
def stop(self):
self._tStop = time.time()
self._tDif = self._tStop - self._tStart
def clear(self):
self._tStop = None
self._tDif = 0
self._tStart = None
def __call__(self):
'''Reports current time or, if stopped, stopped time.
'''
# if stopped, gets _tDif; if not stopped, gets current time
if self._tStop is None: # if not stopped yet
t = time.time() - self._tStart
else:
t = self._tDif
return t
def __str__(self):
if self._tStop is None: # if not stopped yet
t = time.time() - self._tStart
else:
t = self._tDif
return str(round(t, 3))
# ---------
def sortModules(moduleList):
'''
Sort a lost of imported module names such that most recently modified is
first. In ties, last access time is used then module name
Will return a different order each time depending on the last mod time
:rtype: list(str)
'''
sort = []
modNameToMod = {}
for mod in moduleList:
modNameToMod[mod.__name__] = mod
fp = mod.__file__ # returns the pyc file
stat = os.stat(fp)
lastmod = time.localtime(stat[8])
asctime = time.asctime(lastmod)
sort.append((lastmod, asctime, mod.__name__))
sort.sort()
sort.reverse()
# just return module list
return [modNameToMod[modName] for lastmod, asctime, modName in sort]
# ------------------------
class SlottedObjectMixin(object):
r'''
Provides template for classes implementing slots allowing it to be pickled
properly.
Only use SlottedObjects for objects that we expect to make so many of
that memory storage and speed become an issue. For instance an object representing
a single play or plate appearence.
>>> import pickle
>>> from daseki import common
>>> class BatAngle(common.SlottedObjectMixin):
... __slots__ = ('horizontal', 'vertical')
>>> s = BatAngle()
>>> s.horizontal = 35
>>> s.vertical = 20
>>> #_DOCS_SHOW out = pickle.dumps(s)
>>> #_DOCS_SHOW t = pickle.loads(out)
>>> t = s #_DOCS_HIDE -- cannot define classes for pickling in doctests
>>> t.horizontal, t.vertical
(35, 20)
'''
# CLASS VARIABLES #
__slots__ = ('__weakref__')
# SPECIAL METHODS #
def __getstate__(self):
if getattr(self, '__dict__', None) is not None:
state = getattr(self, '__dict__').copy()
else:
state = {}
slots = set()
for cls in self.__class__.mro():
slots.update(getattr(cls, '__slots__', ()))
for slot in slots:
sValue = getattr(self, slot, None)
if isinstance(sValue, weakref.ref):
sValue = sValue()
print('Warning: uncaught weakref found in %r - %s, will not be rewrapped' %
(self, slot))
state[slot] = sValue
if getattr(self, '__dict__', None) is not None:
print('We got a dict TOO!', getattr(self, '__class__'))
return state
def __setstate__(self, state):
# print('Restoring state {0}'.format(self.__class__))
for slot, value in state.items():
setattr(self, slot, value)
class ParentMixin(SlottedObjectMixin):
__slots__ = ('_parent',)
def __init__(self, parent=None):
self._parent = None
if parent is not None:
self.parent = parent
def __getstate__(self):
pValue = getattr(self, '_parent', None)
setattr(self, '_parent', None)
state = super().__getstate__()
state['_parent'] = pValue
return state
def __setstate__(self, state):
super().__setstate__(state)
pValue = getattr(self, '_parent', None)
try:
pValue = weakref.ref(pValue)
except TypeError:
pass # hard reference now...
setattr(self, '_parent', pValue)
def parentByClass(self, className):
'''
iterate through parents until one of the proper class is found.
'''
p = self.parent
if p is None:
return None
if p.__class__.__name__ == className:
return p
elif hasattr(p, 'parentByClass'):
return p.parentByClass(className)
else:
return None
def _getParent(self):
_p = self._parent
if _p is None:
return _p
elif isinstance(_p, weakref.ref):
return _p()
else:
return _p
def _setParent(self, referent):
if referent is None:
return
try:
self._parent = weakref.ref(referent)
# if referent is None, will raise a TypeError
# if referent is a weakref, will also raise a TypeError
# will also raise a type error for string, ints, etc.
# slight performance boost rather than checking if None
except TypeError:
self._parent = referent
parent = property(_getParent, _setParent)
# ------------------------------------------------------------------------------
def wrapWeakref(referent):
'''
utility function that wraps objects as weakrefs but does not wrap
already wrapped objects; also prevents wrapping the unwrapable 'None' type, etc.
>>> import weakref
>>> from daseki import common
>>> class Mock(object):
... pass
>>> a1 = Mock()
>>> ref1 = common.wrapWeakref(a1)
>>> ref1
<weakref at 0x101f29ae8; to 'Mock' at 0x101e45358>
>>> ref2 = common.wrapWeakref(ref1)
>>> ref2
<weakref at 0x101f299af; to 'Mock' at 0x101e45358>
>>> ref3 = common.wrapWeakref(5)
>>> ref3
5
'''
# if type(referent) is weakref.ref:
# if isinstance(referent, weakref.ref):
# return referent
try:
return weakref.ref(referent)
# if referent is None, will raise a TypeError
# if referent is a weakref, will also raise a TypeError
# will also raise a type error for string, ints, etc.
# slight performance boost rather than checking if None
except TypeError:
return referent
def unwrapWeakref(referent):
'''
Utility function that gets an object that might be an object itself
or a weak reference to an object. It returns obj() if it's a weakref or another callable.
and obj if it's not.
>>> from daseki import common
>>> class Mock(object):
... strong: Any
... weak: Any
>>> a1 = Mock()
>>> a2 = Mock()
>>> a2.strong = a1
>>> a2.weak = common.wrapWeakref(a1)
>>> common.unwrapWeakref(a2.strong) is a1
True
>>> common.unwrapWeakref(a2.weak) is a1
True
>>> common.unwrapWeakref(a2.strong) is common.unwrapWeakref(a2.weak)
True
'''
try:
return referent()
except TypeError:
return referent
def warn(*msg):
'''
To print a warning to the user, send a list of strings to this method.
Similar to printDebug but even if debug is off.
'''
msg = formatStr(msg)
sys.stderr.write(msg)
def formatStr(msg, *arguments, **keywords):
'''Format one or more data elements into string suitable for printing
straight to stderr or other outputs
>>> from daseki import common
>>> a = common.formatStr('test', '1', 2, 3)
>>> print(a)
test 1 2 3
<BLANKLINE>
'''
if 'format' in keywords:
formatType = keywords['format']
else:
formatType = None
msg = [msg] + list(arguments)
for i in range(len(msg)):
x = msg[i]
if isinstance(x, bytes):
msg[i] = x.decode('utf-8')
if not isinstance(x, str):
try:
msg[i] = repr(x)
except TypeError:
try:
msg[i] = x.decode('utf-8')
except AttributeError:
msg[i] = '<__repr__ failed for ' + x.__class__.__name__ + '>'
except AttributeError: # or something
msg[i] = '<__repr__ failed for ' + x.__class__.__name__ + '>'
if formatType == 'block':
return '\n*** '.join(msg)+'\n'
else: # catch all others
return ' '.join(msg)+'\n'
if __name__ == '__main__':
import daseki
daseki.mainTest()
| 28.117967 | 97 | 0.57981 |
from typing import Any
from daseki.common.parallel import *
import enum
import inspect
import re
import os
import sys
import time
import tempfile
import weakref
from daseki.exceptionsDS import DasekiException
maxRetrosheetYear = 2015
class TeamNum(enum.IntEnum):
VISITOR = 0
HOME = 1
def sourceFilePath():
dn = os.path.dirname
fpThis = inspect.getfile(sourceFilePath)
fpDS = dn(dn(fpThis))
if 'retro' not in os.listdir(fpDS):
raise DasekiException('cannot find expected daseki directory: %s' % fpDS)
return fpDS
def dataFilePath():
return os.path.join(sourceFilePath(), 'dataFiles')
def dataRetrosheet():
return os.path.join(dataFilePath(), 'retrosheet')
def dataRetrosheetEvent():
return os.path.join(dataRetrosheet(), 'event')
def dataRetrosheetByType(gameType='regular'):
if gameType not in ('asg', 'post', 'regular'):
raise DasekiException('gameType must be asg, post, or regular, not {0}'.format(gameType))
return os.path.join(dataRetrosheetEvent(), gameType)
def gameLogFilePath():
return os.path.join(dataRetrosheet(), 'gamelog')
def getDefaultRootTempDir():
dstDir = os.path.join(tempfile.gettempdir(), 'daseki')
if os.path.exists(dstDir):
return dstDir
else:
try:
os.mkdir(dstDir)
except OSError:
dstDir = tempfile.gettempdir()
return dstDir
GAMEID_MATCH = re.compile(r'([A-Za-z][A-Za-z][A-Za-z])(\d\d\d\d)(\d\d)(\d\d)(\d?)')
class GameId(object):
def __init__(self, gameId=None):
self.gameId = gameId
self.year = 0
self.month = 0
self.day = 0
self.gameNum = '0'
self.homeTeam = 'XXX'
if gameId is not None:
self.parse()
def __repr__(self):
return '<{0}.{1} {2}>'.format(self.__module__, self.__class__.__name__, str(self))
def __str__(self):
return '{s.homeTeam}{s.year:4d}{s.month:02d}{s.day:02d}{s.gameNum}'.format(s=self)
def parse(self):
gameId = self.gameId
matched = GAMEID_MATCH.match(gameId)
if not matched:
raise DasekiException('invalid gameId: %s' % gameId)
self.homeTeam = matched.group(1).upper()
self.year = int(matched.group(2))
self.month = int(matched.group(3))
self.day = int(matched.group(4))
self.gameNum = matched.group(5)
if self.gameNum == '':
self.gameNum = '0'
ordinals = ['Zeroth', 'First', 'Second', 'Third', 'Fourth', 'Fifth',
'Sixth', 'Seventh', 'Eighth', 'Ninth', 'Tenth', 'Eleventh',
'Twelfth', 'Thirteenth', 'Fourteenth', 'Fifteenth',
'Sixteenth', 'Seventeenth', 'Eighteenth', 'Nineteenth',
'Twentieth', 'Twenty-first', 'Twenty-second']
def ordinalAbbreviation(value, plural=False):
valueHundreths = value % 100
post = ''
if valueHundreths in [11, 12, 13]:
post = 'th'
else:
valueMod = value % 10
if valueMod == 1:
post = 'st'
elif valueMod in [0, 4, 5, 6, 7, 8, 9]:
post = 'th'
elif valueMod == 2:
post = 'nd'
elif valueMod == 3:
post = 'rd'
if post != 'st' and plural:
post += 's'
return post
class Timer(object):
def __init__(self):
self._tStart = time.time()
self._tDif = 0
self._tStop = None
def start(self):
self._tStart = time.time()
self._tStop = None
self._tDif = 0
def stop(self):
self._tStop = time.time()
self._tDif = self._tStop - self._tStart
def clear(self):
self._tStop = None
self._tDif = 0
self._tStart = None
def __call__(self):
if self._tStop is None:
t = time.time() - self._tStart
else:
t = self._tDif
return t
def __str__(self):
if self._tStop is None:
t = time.time() - self._tStart
else:
t = self._tDif
return str(round(t, 3))
def sortModules(moduleList):
sort = []
modNameToMod = {}
for mod in moduleList:
modNameToMod[mod.__name__] = mod
fp = mod.__file__
stat = os.stat(fp)
lastmod = time.localtime(stat[8])
asctime = time.asctime(lastmod)
sort.append((lastmod, asctime, mod.__name__))
sort.sort()
sort.reverse()
return [modNameToMod[modName] for lastmod, asctime, modName in sort]
class SlottedObjectMixin(object):
__slots__ = ('__weakref__')
def __getstate__(self):
if getattr(self, '__dict__', None) is not None:
state = getattr(self, '__dict__').copy()
else:
state = {}
slots = set()
for cls in self.__class__.mro():
slots.update(getattr(cls, '__slots__', ()))
for slot in slots:
sValue = getattr(self, slot, None)
if isinstance(sValue, weakref.ref):
sValue = sValue()
print('Warning: uncaught weakref found in %r - %s, will not be rewrapped' %
(self, slot))
state[slot] = sValue
if getattr(self, '__dict__', None) is not None:
print('We got a dict TOO!', getattr(self, '__class__'))
return state
def __setstate__(self, state):
for slot, value in state.items():
setattr(self, slot, value)
class ParentMixin(SlottedObjectMixin):
__slots__ = ('_parent',)
def __init__(self, parent=None):
self._parent = None
if parent is not None:
self.parent = parent
def __getstate__(self):
pValue = getattr(self, '_parent', None)
setattr(self, '_parent', None)
state = super().__getstate__()
state['_parent'] = pValue
return state
def __setstate__(self, state):
super().__setstate__(state)
pValue = getattr(self, '_parent', None)
try:
pValue = weakref.ref(pValue)
except TypeError:
pass
setattr(self, '_parent', pValue)
def parentByClass(self, className):
p = self.parent
if p is None:
return None
if p.__class__.__name__ == className:
return p
elif hasattr(p, 'parentByClass'):
return p.parentByClass(className)
else:
return None
def _getParent(self):
_p = self._parent
if _p is None:
return _p
elif isinstance(_p, weakref.ref):
return _p()
else:
return _p
def _setParent(self, referent):
if referent is None:
return
try:
self._parent = weakref.ref(referent)
except TypeError:
self._parent = referent
parent = property(_getParent, _setParent)
def wrapWeakref(referent):
try:
return weakref.ref(referent)
except TypeError:
return referent
def unwrapWeakref(referent):
try:
return referent()
except TypeError:
return referent
def warn(*msg):
msg = formatStr(msg)
sys.stderr.write(msg)
def formatStr(msg, *arguments, **keywords):
if 'format' in keywords:
formatType = keywords['format']
else:
formatType = None
msg = [msg] + list(arguments)
for i in range(len(msg)):
x = msg[i]
if isinstance(x, bytes):
msg[i] = x.decode('utf-8')
if not isinstance(x, str):
try:
msg[i] = repr(x)
except TypeError:
try:
msg[i] = x.decode('utf-8')
except AttributeError:
msg[i] = '<__repr__ failed for ' + x.__class__.__name__ + '>'
except AttributeError:
msg[i] = '<__repr__ failed for ' + x.__class__.__name__ + '>'
if formatType == 'block':
return '\n*** '.join(msg)+'\n'
else:
return ' '.join(msg)+'\n'
if __name__ == '__main__':
import daseki
daseki.mainTest()
| true | true |
1c37c1f7bdcfbe9db8a303114887b9cbea05e3b6 | 4,260 | py | Python | bot/utils.py | Alpha-Omega-United/discord-bot | d395c1e139de8b59773fb0a222d08f68105a811c | [
"MIT"
] | 1 | 2021-09-21T07:50:39.000Z | 2021-09-21T07:50:39.000Z | bot/utils.py | Alpha-Omega-United/discord-bot | d395c1e139de8b59773fb0a222d08f68105a811c | [
"MIT"
] | 1 | 2021-07-30T20:31:49.000Z | 2021-08-17T16:50:43.000Z | bot/utils.py | Alpha-Omega-United/discord-bot | d395c1e139de8b59773fb0a222d08f68105a811c | [
"MIT"
] | 1 | 2021-08-10T16:41:39.000Z | 2021-08-10T16:41:39.000Z | """Helper functions."""
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING
import hikari
from bot import constants
if TYPE_CHECKING:
from typing import Awaitable
import tanjun
def is_admin(member: hikari.Member) -> bool:
"""
Check if a member is an admin.
Args:
member (hikari.Member): Member to check
Returns:
bool: If the member is an admin or not
"""
roles = member.get_roles()
return any(role.id == constants.ADMIN_ROLE_ID for role in roles)
async def wait_for_interaction(
ctx: tanjun.SlashContext,
message: hikari.Message,
timeout: int | float = 60 * 5,
) -> hikari.ComponentInteraction:
"""
Wait for an interaction to happen on the message.
Args:
ctx (tanjun.SlashContext): The context the message was sent in.
message (hikari.Message): The message to wait for interactions on.
timeout (int | float):
How long to wait before stop waiting.
Defaults to 60*5 (5 minutes).
Returns:
hikari.ComponentInteraction:
the event, garantied to contain a ComponentInteraction
Raises:
TypeError: ctx.events is None
"""
def predicate(event: hikari.InteractionCreateEvent) -> bool:
inte = event.interaction
return (
isinstance(inte, hikari.ComponentInteraction)
and inte.message.id == message.id
)
if ctx.events is None:
raise TypeError("ctx.events is None")
event = await ctx.events.wait_for(
hikari.InteractionCreateEvent, timeout=timeout, predicate=predicate
)
return event.interaction # type: ignore
@dataclass(frozen=True)
class ButtonInfo:
"""Info about a discord button."""
label: str
style: hikari.InteractiveButtonTypesT
emoji: hikari.Snowflakeish | hikari.Emoji | str | hikari.UndefinedType = (
hikari.UNDEFINED
)
async def confirmation_embed(
ctx: tanjun.SlashContext,
*,
callback: Awaitable[None],
embed: hikari.Embed,
confirm_button: ButtonInfo,
deny_button: ButtonInfo = ButtonInfo("Cancel", hikari.ButtonStyle.DANGER),
) -> None:
"""
Create a confirmation embed and call a callback if the user confirms.
Args:
ctx (tanjun.SlashContext): The context to create the popup in
callback (Awaitable[None]): The callback to call if the user click confirm
embed (hikari.Embed): The embed to present the user with.
confirm_button (ButtonInfo): The button the confirms the selection.
deny_button (ButtonInfo):
The button to cancel the action.
Defaults to ButtonInfo("Cancel", hikari.ButtonStyle.DANGER).
"""
confirm_button_id = "confirm"
deny_button_id = "deny"
buttons = (
ctx.rest.build_action_row()
.add_button(confirm_button.style, confirm_button_id)
.set_label(confirm_button.label)
.set_emoji(confirm_button.emoji)
.add_to_container()
.add_button(deny_button.style, deny_button_id)
.set_label(deny_button.label)
.set_emoji(deny_button.emoji)
.add_to_container()
)
message = await ctx.respond(
embed=embed, component=buttons, ensure_result=True
)
interaction = await wait_for_interaction(ctx, message)
if embed.title is None:
embed.title = ""
if interaction.custom_id == confirm_button_id:
await callback
embed.color = constants.Colors.GREEN
embed.title += ": DONE"
else:
embed.color = constants.Colors.RED
embed.title += ": Canceld"
# disable buttons
buttons = (
ctx.rest.build_action_row()
.add_button(confirm_button.style, confirm_button_id)
.set_label(confirm_button.label)
.set_emoji(confirm_button.emoji)
.set_is_disabled(True)
.add_to_container()
.add_button(deny_button.style, deny_button_id)
.set_label(deny_button.label)
.set_emoji(deny_button.emoji)
.set_is_disabled(True)
.add_to_container()
)
await interaction.create_initial_response(
hikari.ResponseType.MESSAGE_UPDATE, embed=embed, component=buttons
)
| 28.026316 | 82 | 0.661972 |
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING
import hikari
from bot import constants
if TYPE_CHECKING:
from typing import Awaitable
import tanjun
def is_admin(member: hikari.Member) -> bool:
roles = member.get_roles()
return any(role.id == constants.ADMIN_ROLE_ID for role in roles)
async def wait_for_interaction(
ctx: tanjun.SlashContext,
message: hikari.Message,
timeout: int | float = 60 * 5,
) -> hikari.ComponentInteraction:
def predicate(event: hikari.InteractionCreateEvent) -> bool:
inte = event.interaction
return (
isinstance(inte, hikari.ComponentInteraction)
and inte.message.id == message.id
)
if ctx.events is None:
raise TypeError("ctx.events is None")
event = await ctx.events.wait_for(
hikari.InteractionCreateEvent, timeout=timeout, predicate=predicate
)
return event.interaction
@dataclass(frozen=True)
class ButtonInfo:
label: str
style: hikari.InteractiveButtonTypesT
emoji: hikari.Snowflakeish | hikari.Emoji | str | hikari.UndefinedType = (
hikari.UNDEFINED
)
async def confirmation_embed(
ctx: tanjun.SlashContext,
*,
callback: Awaitable[None],
embed: hikari.Embed,
confirm_button: ButtonInfo,
deny_button: ButtonInfo = ButtonInfo("Cancel", hikari.ButtonStyle.DANGER),
) -> None:
confirm_button_id = "confirm"
deny_button_id = "deny"
buttons = (
ctx.rest.build_action_row()
.add_button(confirm_button.style, confirm_button_id)
.set_label(confirm_button.label)
.set_emoji(confirm_button.emoji)
.add_to_container()
.add_button(deny_button.style, deny_button_id)
.set_label(deny_button.label)
.set_emoji(deny_button.emoji)
.add_to_container()
)
message = await ctx.respond(
embed=embed, component=buttons, ensure_result=True
)
interaction = await wait_for_interaction(ctx, message)
if embed.title is None:
embed.title = ""
if interaction.custom_id == confirm_button_id:
await callback
embed.color = constants.Colors.GREEN
embed.title += ": DONE"
else:
embed.color = constants.Colors.RED
embed.title += ": Canceld"
buttons = (
ctx.rest.build_action_row()
.add_button(confirm_button.style, confirm_button_id)
.set_label(confirm_button.label)
.set_emoji(confirm_button.emoji)
.set_is_disabled(True)
.add_to_container()
.add_button(deny_button.style, deny_button_id)
.set_label(deny_button.label)
.set_emoji(deny_button.emoji)
.set_is_disabled(True)
.add_to_container()
)
await interaction.create_initial_response(
hikari.ResponseType.MESSAGE_UPDATE, embed=embed, component=buttons
)
| true | true |
1c37c32f6b96cbaa4f9672e1a1276cb7d4d1aad4 | 5,702 | py | Python | modules/event.py | TheApertureProject/Yume-Bot | 9b1219958f1c43489c0fbc33825ae7656eeea02e | [
"MIT"
] | 1 | 2020-06-04T17:26:13.000Z | 2020-06-04T17:26:13.000Z | modules/event.py | TheApertureProject/Yume-Bot | 9b1219958f1c43489c0fbc33825ae7656eeea02e | [
"MIT"
] | null | null | null | modules/event.py | TheApertureProject/Yume-Bot | 9b1219958f1c43489c0fbc33825ae7656eeea02e | [
"MIT"
] | null | null | null | # Copyright (c) 2019.
# MIT License
#
# Copyright (c) 2019 YumeNetwork
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import random
import discord
from discord.ext import commands
from modules.sql.guilddb import GuildDB
from modules.utils import lists
class Event(commands.Cog):
conf = {}
def __init__(self, bot):
self.bot = bot
self.config = bot.config
@commands.Cog.listener()
async def on_member_join(self, member: discord.Member):
"""
:param member: The member who joined the guild
"""
guild = GuildDB.get_one(member.guild.id)
if guild.greet:
channel = self.bot.get_channel(int(guild.greet_chan))
greet = random.choice(lists.greet)
em = discord.Embed(timestamp=member.joined_at)
em.set_author(name="Welcome", icon_url=member.avatar_url)
em.set_footer(text=f'{member.name}')
em.description = f"{greet}"
await channel.send(embed=em)
if guild.stats_channels:
try:
category = discord.utils.get(
member.guild.categories, id=int(guild.stats_category))
except discord.HTTPException:
return
else:
if not isinstance(category, discord.CategoryChannel):
return
for channel in category.channels:
try:
await channel.delete()
except discord.Forbidden:
return
except discord.HTTPException:
return
overwrite = {
member.guild.default_role: discord.PermissionOverwrite(connect=False),
}
await member.guild.create_voice_channel(f'Users : {len(member.guild.members)}', overwrites=overwrite,
category=category)
bots = []
for user in member.guild.members:
if user.bot is True:
bots.append(user)
await member.guild.create_voice_channel(f'Bots : {len(bots)}', overwrites=overwrite, category=category)
await member.guild.create_voice_channel(f'Members : {len(member.guild.members) - len(bots)}',
overwrites=overwrite, category=category)
@commands.Cog.listener()
async def on_member_remove(self, member):
"""
:param member: The member who has left
"""
guild = GuildDB.get_one(member.guild.id)
if guild.greet:
try:
channel = member.guild.get_channel(int(guild.greet_chan))
except discord.HTTPException:
pass
else:
greet = random.choice(lists.leave)
em = discord.Embed(timestamp=member.joined_at)
em.set_author(name="Bye", icon_url=member.avatar_url)
em.set_footer(text=f'{member.name}')
em.description = f"{greet}"
await channel.send(embed=em)
if guild.stats_channels:
try:
category = discord.utils.get(
member.guild.categories, id=int(guild.stats_category))
except discord.HTTPException:
return
else:
if not isinstance(category, discord.CategoryChannel):
return
for channel in category.channels:
try:
await channel.delete()
except discord.Forbidden:
return
except discord.HTTPException:
return
overwrite = {
member.guild.default_role: discord.PermissionOverwrite(connect=False),
}
await member.guild.create_voice_channel(f'Users : {len(member.guild.members)}', overwrites=overwrite,
category=category)
bots = []
for user in member.guild.members:
if user.bot is True:
bots.append(user)
await member.guild.create_voice_channel(f'Bots : {len(bots)}', overwrites=overwrite, category=category)
await member.guild.create_voice_channel(f'Members : {len(member.guild.members) - len(bots)}',
overwrites=overwrite, category=category)
def setup(bot):
bot.add_cog(Event(bot))
| 38.789116 | 119 | 0.571554 |
import random
import discord
from discord.ext import commands
from modules.sql.guilddb import GuildDB
from modules.utils import lists
class Event(commands.Cog):
conf = {}
def __init__(self, bot):
self.bot = bot
self.config = bot.config
@commands.Cog.listener()
async def on_member_join(self, member: discord.Member):
guild = GuildDB.get_one(member.guild.id)
if guild.greet:
channel = self.bot.get_channel(int(guild.greet_chan))
greet = random.choice(lists.greet)
em = discord.Embed(timestamp=member.joined_at)
em.set_author(name="Welcome", icon_url=member.avatar_url)
em.set_footer(text=f'{member.name}')
em.description = f"{greet}"
await channel.send(embed=em)
if guild.stats_channels:
try:
category = discord.utils.get(
member.guild.categories, id=int(guild.stats_category))
except discord.HTTPException:
return
else:
if not isinstance(category, discord.CategoryChannel):
return
for channel in category.channels:
try:
await channel.delete()
except discord.Forbidden:
return
except discord.HTTPException:
return
overwrite = {
member.guild.default_role: discord.PermissionOverwrite(connect=False),
}
await member.guild.create_voice_channel(f'Users : {len(member.guild.members)}', overwrites=overwrite,
category=category)
bots = []
for user in member.guild.members:
if user.bot is True:
bots.append(user)
await member.guild.create_voice_channel(f'Bots : {len(bots)}', overwrites=overwrite, category=category)
await member.guild.create_voice_channel(f'Members : {len(member.guild.members) - len(bots)}',
overwrites=overwrite, category=category)
@commands.Cog.listener()
async def on_member_remove(self, member):
guild = GuildDB.get_one(member.guild.id)
if guild.greet:
try:
channel = member.guild.get_channel(int(guild.greet_chan))
except discord.HTTPException:
pass
else:
greet = random.choice(lists.leave)
em = discord.Embed(timestamp=member.joined_at)
em.set_author(name="Bye", icon_url=member.avatar_url)
em.set_footer(text=f'{member.name}')
em.description = f"{greet}"
await channel.send(embed=em)
if guild.stats_channels:
try:
category = discord.utils.get(
member.guild.categories, id=int(guild.stats_category))
except discord.HTTPException:
return
else:
if not isinstance(category, discord.CategoryChannel):
return
for channel in category.channels:
try:
await channel.delete()
except discord.Forbidden:
return
except discord.HTTPException:
return
overwrite = {
member.guild.default_role: discord.PermissionOverwrite(connect=False),
}
await member.guild.create_voice_channel(f'Users : {len(member.guild.members)}', overwrites=overwrite,
category=category)
bots = []
for user in member.guild.members:
if user.bot is True:
bots.append(user)
await member.guild.create_voice_channel(f'Bots : {len(bots)}', overwrites=overwrite, category=category)
await member.guild.create_voice_channel(f'Members : {len(member.guild.members) - len(bots)}',
overwrites=overwrite, category=category)
def setup(bot):
bot.add_cog(Event(bot))
| true | true |
1c37c515ccfe0f90bc286e7c0bc33482d12a3037 | 7,574 | py | Python | src/oci/data_safe/models/tls_config.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 249 | 2017-09-11T22:06:05.000Z | 2022-03-04T17:09:29.000Z | src/oci/data_safe/models/tls_config.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 228 | 2017-09-11T23:07:26.000Z | 2022-03-23T10:58:50.000Z | src/oci/data_safe/models/tls_config.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 224 | 2017-09-27T07:32:43.000Z | 2022-03-25T16:55:42.000Z | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class TlsConfig(object):
"""
The details required to establish a TLS enabled connection.
"""
#: A constant which can be used with the status property of a TlsConfig.
#: This constant has a value of "ENABLED"
STATUS_ENABLED = "ENABLED"
#: A constant which can be used with the status property of a TlsConfig.
#: This constant has a value of "DISABLED"
STATUS_DISABLED = "DISABLED"
#: A constant which can be used with the certificate_store_type property of a TlsConfig.
#: This constant has a value of "JKS"
CERTIFICATE_STORE_TYPE_JKS = "JKS"
def __init__(self, **kwargs):
"""
Initializes a new TlsConfig object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param status:
The value to assign to the status property of this TlsConfig.
Allowed values for this property are: "ENABLED", "DISABLED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type status: str
:param certificate_store_type:
The value to assign to the certificate_store_type property of this TlsConfig.
Allowed values for this property are: "JKS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type certificate_store_type: str
:param store_password:
The value to assign to the store_password property of this TlsConfig.
:type store_password: str
:param trust_store_content:
The value to assign to the trust_store_content property of this TlsConfig.
:type trust_store_content: str
:param key_store_content:
The value to assign to the key_store_content property of this TlsConfig.
:type key_store_content: str
"""
self.swagger_types = {
'status': 'str',
'certificate_store_type': 'str',
'store_password': 'str',
'trust_store_content': 'str',
'key_store_content': 'str'
}
self.attribute_map = {
'status': 'status',
'certificate_store_type': 'certificateStoreType',
'store_password': 'storePassword',
'trust_store_content': 'trustStoreContent',
'key_store_content': 'keyStoreContent'
}
self._status = None
self._certificate_store_type = None
self._store_password = None
self._trust_store_content = None
self._key_store_content = None
@property
def status(self):
"""
**[Required]** Gets the status of this TlsConfig.
Status to represent whether the database connection is TLS enabled or not.
Allowed values for this property are: "ENABLED", "DISABLED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The status of this TlsConfig.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this TlsConfig.
Status to represent whether the database connection is TLS enabled or not.
:param status: The status of this TlsConfig.
:type: str
"""
allowed_values = ["ENABLED", "DISABLED"]
if not value_allowed_none_or_none_sentinel(status, allowed_values):
status = 'UNKNOWN_ENUM_VALUE'
self._status = status
@property
def certificate_store_type(self):
"""
Gets the certificate_store_type of this TlsConfig.
The format of the certificate store.
Allowed values for this property are: "JKS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The certificate_store_type of this TlsConfig.
:rtype: str
"""
return self._certificate_store_type
@certificate_store_type.setter
def certificate_store_type(self, certificate_store_type):
"""
Sets the certificate_store_type of this TlsConfig.
The format of the certificate store.
:param certificate_store_type: The certificate_store_type of this TlsConfig.
:type: str
"""
allowed_values = ["JKS"]
if not value_allowed_none_or_none_sentinel(certificate_store_type, allowed_values):
certificate_store_type = 'UNKNOWN_ENUM_VALUE'
self._certificate_store_type = certificate_store_type
@property
def store_password(self):
"""
Gets the store_password of this TlsConfig.
The password to read the trust store and key store files, if they are password protected.
:return: The store_password of this TlsConfig.
:rtype: str
"""
return self._store_password
@store_password.setter
def store_password(self, store_password):
"""
Sets the store_password of this TlsConfig.
The password to read the trust store and key store files, if they are password protected.
:param store_password: The store_password of this TlsConfig.
:type: str
"""
self._store_password = store_password
@property
def trust_store_content(self):
"""
Gets the trust_store_content of this TlsConfig.
Base64 encoded string of trust store file content.
:return: The trust_store_content of this TlsConfig.
:rtype: str
"""
return self._trust_store_content
@trust_store_content.setter
def trust_store_content(self, trust_store_content):
"""
Sets the trust_store_content of this TlsConfig.
Base64 encoded string of trust store file content.
:param trust_store_content: The trust_store_content of this TlsConfig.
:type: str
"""
self._trust_store_content = trust_store_content
@property
def key_store_content(self):
"""
Gets the key_store_content of this TlsConfig.
Base64 encoded string of key store file content.
:return: The key_store_content of this TlsConfig.
:rtype: str
"""
return self._key_store_content
@key_store_content.setter
def key_store_content(self, key_store_content):
"""
Sets the key_store_content of this TlsConfig.
Base64 encoded string of key store file content.
:param key_store_content: The key_store_content of this TlsConfig.
:type: str
"""
self._key_store_content = key_store_content
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 33.964126 | 245 | 0.66319 |
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class TlsConfig(object):
STATUS_ENABLED = "ENABLED"
STATUS_DISABLED = "DISABLED"
CERTIFICATE_STORE_TYPE_JKS = "JKS"
def __init__(self, **kwargs):
self.swagger_types = {
'status': 'str',
'certificate_store_type': 'str',
'store_password': 'str',
'trust_store_content': 'str',
'key_store_content': 'str'
}
self.attribute_map = {
'status': 'status',
'certificate_store_type': 'certificateStoreType',
'store_password': 'storePassword',
'trust_store_content': 'trustStoreContent',
'key_store_content': 'keyStoreContent'
}
self._status = None
self._certificate_store_type = None
self._store_password = None
self._trust_store_content = None
self._key_store_content = None
@property
def status(self):
return self._status
@status.setter
def status(self, status):
allowed_values = ["ENABLED", "DISABLED"]
if not value_allowed_none_or_none_sentinel(status, allowed_values):
status = 'UNKNOWN_ENUM_VALUE'
self._status = status
@property
def certificate_store_type(self):
return self._certificate_store_type
@certificate_store_type.setter
def certificate_store_type(self, certificate_store_type):
allowed_values = ["JKS"]
if not value_allowed_none_or_none_sentinel(certificate_store_type, allowed_values):
certificate_store_type = 'UNKNOWN_ENUM_VALUE'
self._certificate_store_type = certificate_store_type
@property
def store_password(self):
return self._store_password
@store_password.setter
def store_password(self, store_password):
self._store_password = store_password
@property
def trust_store_content(self):
return self._trust_store_content
@trust_store_content.setter
def trust_store_content(self, trust_store_content):
self._trust_store_content = trust_store_content
@property
def key_store_content(self):
return self._key_store_content
@key_store_content.setter
def key_store_content(self, key_store_content):
self._key_store_content = key_store_content
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c37c566dd1447eac08426f66222b65b38b12130 | 1,121 | py | Python | examples/standalone_plan.py | dpcomp-org/ektelo | 7629fbf106f9b9568c66a0b97f6005280022c3d8 | [
"Apache-2.0"
] | null | null | null | examples/standalone_plan.py | dpcomp-org/ektelo | 7629fbf106f9b9568c66a0b97f6005280022c3d8 | [
"Apache-2.0"
] | 1 | 2019-04-09T20:51:32.000Z | 2019-04-09T20:51:32.000Z | examples/standalone_plan.py | dpcomp-org/ektelo | 7629fbf106f9b9568c66a0b97f6005280022c3d8 | [
"Apache-2.0"
] | null | null | null | """ Example of the invocation of a standalone plan
"""
from ektelo import data
from ektelo import workload
from ektelo.plans import standalone
from ektelo.private import transformation
import os
import numpy as np
import yaml
CSV_PATH = os.environ['EKTELO_DATA']
CONFIG_PATH = os.path.join(os.environ['EKTELO_HOME'], 'resources', 'config')
# Load relation
filename = os.path.join(CSV_PATH, 'cps.csv')
config_file = os.path.join(CONFIG_PATH, 'cps.yml')
config = yaml.load(open(config_file, 'r').read())['cps_config']
R = data.Relation(config).load_csv(filename, ',')
# Choose reduced domain for relation
domain = (10, 1, 7, 1, 1)
# Vectorize relation
x = transformation.Vectorize('CPS', reduced_domain=domain).transform(R)
# Setup arbitrary constants for MWEM
seed = 0
ratio = 0.5
rounds = 3
data_scale = 1e5
use_history = True
epsilon = 0.1
# Create query workload
W = workload.RandomRange(None, (np.prod(domain),), 25)
# Calculate noisy estimate of x
x_hat = standalone.Mwem(ratio, rounds, data_scale, domain, use_history).Run(W, x, epsilon, seed)
# Report noisy query responses
print(W.get_matrix() * x_hat)
| 26.690476 | 96 | 0.742194 | from ektelo import data
from ektelo import workload
from ektelo.plans import standalone
from ektelo.private import transformation
import os
import numpy as np
import yaml
CSV_PATH = os.environ['EKTELO_DATA']
CONFIG_PATH = os.path.join(os.environ['EKTELO_HOME'], 'resources', 'config')
filename = os.path.join(CSV_PATH, 'cps.csv')
config_file = os.path.join(CONFIG_PATH, 'cps.yml')
config = yaml.load(open(config_file, 'r').read())['cps_config']
R = data.Relation(config).load_csv(filename, ',')
domain = (10, 1, 7, 1, 1)
x = transformation.Vectorize('CPS', reduced_domain=domain).transform(R)
seed = 0
ratio = 0.5
rounds = 3
data_scale = 1e5
use_history = True
epsilon = 0.1
W = workload.RandomRange(None, (np.prod(domain),), 25)
x_hat = standalone.Mwem(ratio, rounds, data_scale, domain, use_history).Run(W, x, epsilon, seed)
print(W.get_matrix() * x_hat)
| true | true |
1c37c5dec7c9b26fac370eb8d7e111002c09524a | 171 | py | Python | .history/calculator_factories_20210629130649.py | Aleff13/calculadora-tkinter | 01e169d3c1d128976eb3a41ea1f53f11d6157e44 | [
"MIT"
] | null | null | null | .history/calculator_factories_20210629130649.py | Aleff13/calculadora-tkinter | 01e169d3c1d128976eb3a41ea1f53f11d6157e44 | [
"MIT"
] | null | null | null | .history/calculator_factories_20210629130649.py | Aleff13/calculadora-tkinter | 01e169d3c1d128976eb3a41ea1f53f11d6157e44 | [
"MIT"
] | null | null | null | import tkinter as tk
def make_root() -> tk.Tk:
root = tk.Tk()
root.title("Calculator")
root.config(padx=10, pady=10, background="white")
return root | 21.375 | 53 | 0.625731 | import tkinter as tk
def make_root() -> tk.Tk:
root = tk.Tk()
root.title("Calculator")
root.config(padx=10, pady=10, background="white")
return root | true | true |
1c37c69715879ce386b01293ebb811b4037e288a | 6,823 | py | Python | third_party/boto/manage/task.py | stdft112/depot_tools | 52c7211807930272424213ff6127c209de790eca | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | third_party/boto/manage/task.py | stdft112/depot_tools | 52c7211807930272424213ff6127c209de790eca | [
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | third_party/boto/manage/task.py | stdft112/depot_tools | 52c7211807930272424213ff6127c209de790eca | [
"BSD-3-Clause"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.sdb.db.property import StringProperty, DateTimeProperty, IntegerProperty
from boto.sdb.db.model import Model
import datetime, subprocess, StringIO, time
def check_hour(val):
if val == '*':
return
if int(val) < 0 or int(val) > 23:
raise ValueError
class Task(Model):
"""
A scheduled, repeating task that can be executed by any participating servers.
The scheduling is similar to cron jobs. Each task has an hour attribute.
The allowable values for hour are [0-23|*].
To keep the operation reasonably efficient and not cause excessive polling,
the minimum granularity of a Task is hourly. Some examples:
hour='*' - the task would be executed each hour
hour='3' - the task would be executed at 3AM GMT each day.
"""
name = StringProperty()
hour = StringProperty(required=True, validator=check_hour, default='*')
command = StringProperty(required=True)
last_executed = DateTimeProperty()
last_status = IntegerProperty()
last_output = StringProperty()
message_id = StringProperty()
@classmethod
def start_all(cls, queue_name):
for task in cls.all():
task.start(queue_name)
def __init__(self, id=None, **kw):
Model.__init__(self, id, **kw)
self.hourly = self.hour == '*'
self.daily = self.hour != '*'
self.now = datetime.datetime.utcnow()
def check(self):
"""
Determine how long until the next scheduled time for a Task.
Returns the number of seconds until the next scheduled time or zero
if the task needs to be run immediately.
If it's an hourly task and it's never been run, run it now.
If it's a daily task and it's never been run and the hour is right, run it now.
"""
boto.log.info('checking Task[%s]-now=%s, last=%s' % (self.name, self.now, self.last_executed))
if self.hourly and not self.last_executed:
return 0
if self.daily and not self.last_executed:
if int(self.hour) == self.now.hour:
return 0
else:
return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60
delta = self.now - self.last_executed
if self.hourly:
if delta.seconds >= 60*60:
return 0
else:
return 60*60 - delta.seconds
else:
if int(self.hour) == self.now.hour:
if delta.days >= 1:
return 0
else:
return 82800 # 23 hours, just to be safe
else:
return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60
def _run(self, msg, vtimeout):
boto.log.info('Task[%s] - running:%s' % (self.name, self.command))
log_fp = StringIO.StringIO()
process = subprocess.Popen(self.command, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
nsecs = 5
current_timeout = vtimeout
while process.poll() == None:
boto.log.info('nsecs=%s, timeout=%s' % (nsecs, current_timeout))
if nsecs >= current_timeout:
current_timeout += vtimeout
boto.log.info('Task[%s] - setting timeout to %d seconds' % (self.name, current_timeout))
if msg:
msg.change_visibility(current_timeout)
time.sleep(5)
nsecs += 5
t = process.communicate()
log_fp.write(t[0])
log_fp.write(t[1])
boto.log.info('Task[%s] - output: %s' % (self.name, log_fp.getvalue()))
self.last_executed = self.now
self.last_status = process.returncode
self.last_output = log_fp.getvalue()[0:1023]
def run(self, msg, vtimeout=60):
delay = self.check()
boto.log.info('Task[%s] - delay=%s seconds' % (self.name, delay))
if delay == 0:
self._run(msg, vtimeout)
queue = msg.queue
new_msg = queue.new_message(self.id)
new_msg = queue.write(new_msg)
self.message_id = new_msg.id
self.put()
boto.log.info('Task[%s] - new message id=%s' % (self.name, new_msg.id))
msg.delete()
boto.log.info('Task[%s] - deleted message %s' % (self.name, msg.id))
else:
boto.log.info('new_vtimeout: %d' % delay)
msg.change_visibility(delay)
def start(self, queue_name):
boto.log.info('Task[%s] - starting with queue: %s' % (self.name, queue_name))
queue = boto.lookup('sqs', queue_name)
msg = queue.new_message(self.id)
msg = queue.write(msg)
self.message_id = msg.id
self.put()
boto.log.info('Task[%s] - start successful' % self.name)
class TaskPoller(object):
def __init__(self, queue_name):
self.sqs = boto.connect_sqs()
self.queue = self.sqs.lookup(queue_name)
def poll(self, wait=60, vtimeout=60):
while True:
m = self.queue.read(vtimeout)
if m:
task = Task.get_by_id(m.get_body())
if task:
if not task.message_id or m.id == task.message_id:
boto.log.info('Task[%s] - read message %s' % (task.name, m.id))
task.run(m, vtimeout)
else:
boto.log.info('Task[%s] - found extraneous message, ignoring' % task.name)
else:
time.sleep(wait)
| 38.767045 | 104 | 0.597245 |
import boto
from boto.sdb.db.property import StringProperty, DateTimeProperty, IntegerProperty
from boto.sdb.db.model import Model
import datetime, subprocess, StringIO, time
def check_hour(val):
if val == '*':
return
if int(val) < 0 or int(val) > 23:
raise ValueError
class Task(Model):
name = StringProperty()
hour = StringProperty(required=True, validator=check_hour, default='*')
command = StringProperty(required=True)
last_executed = DateTimeProperty()
last_status = IntegerProperty()
last_output = StringProperty()
message_id = StringProperty()
@classmethod
def start_all(cls, queue_name):
for task in cls.all():
task.start(queue_name)
def __init__(self, id=None, **kw):
Model.__init__(self, id, **kw)
self.hourly = self.hour == '*'
self.daily = self.hour != '*'
self.now = datetime.datetime.utcnow()
def check(self):
boto.log.info('checking Task[%s]-now=%s, last=%s' % (self.name, self.now, self.last_executed))
if self.hourly and not self.last_executed:
return 0
if self.daily and not self.last_executed:
if int(self.hour) == self.now.hour:
return 0
else:
return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60
delta = self.now - self.last_executed
if self.hourly:
if delta.seconds >= 60*60:
return 0
else:
return 60*60 - delta.seconds
else:
if int(self.hour) == self.now.hour:
if delta.days >= 1:
return 0
else:
return 82800
else:
return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60
def _run(self, msg, vtimeout):
boto.log.info('Task[%s] - running:%s' % (self.name, self.command))
log_fp = StringIO.StringIO()
process = subprocess.Popen(self.command, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
nsecs = 5
current_timeout = vtimeout
while process.poll() == None:
boto.log.info('nsecs=%s, timeout=%s' % (nsecs, current_timeout))
if nsecs >= current_timeout:
current_timeout += vtimeout
boto.log.info('Task[%s] - setting timeout to %d seconds' % (self.name, current_timeout))
if msg:
msg.change_visibility(current_timeout)
time.sleep(5)
nsecs += 5
t = process.communicate()
log_fp.write(t[0])
log_fp.write(t[1])
boto.log.info('Task[%s] - output: %s' % (self.name, log_fp.getvalue()))
self.last_executed = self.now
self.last_status = process.returncode
self.last_output = log_fp.getvalue()[0:1023]
def run(self, msg, vtimeout=60):
delay = self.check()
boto.log.info('Task[%s] - delay=%s seconds' % (self.name, delay))
if delay == 0:
self._run(msg, vtimeout)
queue = msg.queue
new_msg = queue.new_message(self.id)
new_msg = queue.write(new_msg)
self.message_id = new_msg.id
self.put()
boto.log.info('Task[%s] - new message id=%s' % (self.name, new_msg.id))
msg.delete()
boto.log.info('Task[%s] - deleted message %s' % (self.name, msg.id))
else:
boto.log.info('new_vtimeout: %d' % delay)
msg.change_visibility(delay)
def start(self, queue_name):
boto.log.info('Task[%s] - starting with queue: %s' % (self.name, queue_name))
queue = boto.lookup('sqs', queue_name)
msg = queue.new_message(self.id)
msg = queue.write(msg)
self.message_id = msg.id
self.put()
boto.log.info('Task[%s] - start successful' % self.name)
class TaskPoller(object):
def __init__(self, queue_name):
self.sqs = boto.connect_sqs()
self.queue = self.sqs.lookup(queue_name)
def poll(self, wait=60, vtimeout=60):
while True:
m = self.queue.read(vtimeout)
if m:
task = Task.get_by_id(m.get_body())
if task:
if not task.message_id or m.id == task.message_id:
boto.log.info('Task[%s] - read message %s' % (task.name, m.id))
task.run(m, vtimeout)
else:
boto.log.info('Task[%s] - found extraneous message, ignoring' % task.name)
else:
time.sleep(wait)
| true | true |
1c37c6dbd9f5765f750aee5d81ab7b88235339b9 | 402 | py | Python | litemark/__init__.py | pyrustic/litemark | 49669865b7aa23f964eec0117b7ba1936658a0d2 | [
"MIT"
] | 4 | 2021-10-14T16:20:36.000Z | 2022-01-18T08:44:12.000Z | litemark/__init__.py | pyrustic/litemark | 49669865b7aa23f964eec0117b7ba1936658a0d2 | [
"MIT"
] | null | null | null | litemark/__init__.py | pyrustic/litemark | 49669865b7aa23f964eec0117b7ba1936658a0d2 | [
"MIT"
] | null | null | null | from litemark.core import scanner
from litemark.core.scanner import Element
from litemark.core.viewer import Viewer, get_light_style
from litemark.core.style import Style
from litemark.core.util import center_window
__all__ = ["scan", "Element", "Viewer", "get_light_style", "Style"]
def scan(text):
"""Returns a generator. If you need a list: list(scan(text))"""
return scanner.scan(text)
| 28.714286 | 67 | 0.756219 | from litemark.core import scanner
from litemark.core.scanner import Element
from litemark.core.viewer import Viewer, get_light_style
from litemark.core.style import Style
from litemark.core.util import center_window
__all__ = ["scan", "Element", "Viewer", "get_light_style", "Style"]
def scan(text):
return scanner.scan(text)
| true | true |
1c37c9049f3188be7356e8c6011ee01171864c66 | 3,234 | py | Python | tests/components/mqtt/test_server.py | logic/home-assistant | d3fed52254053a24e901cde8528c0e407d429311 | [
"Apache-2.0"
] | 7 | 2018-08-03T10:15:36.000Z | 2019-03-25T13:31:55.000Z | tests/components/mqtt/test_server.py | sara0871/thepracticaldev | 28de2d6f75656349de94dd897156d33fbadaa43a | [
"Apache-2.0"
] | 3 | 2021-09-08T03:06:43.000Z | 2022-03-12T00:56:04.000Z | tests/components/mqtt/test_server.py | sara0871/thepracticaldev | 28de2d6f75656349de94dd897156d33fbadaa43a | [
"Apache-2.0"
] | 3 | 2018-12-04T11:54:27.000Z | 2019-08-31T14:41:32.000Z | """The tests for the MQTT component embedded server."""
from unittest.mock import Mock, MagicMock, patch
import sys
import pytest
from homeassistant.setup import setup_component
import homeassistant.components.mqtt as mqtt
from tests.common import get_test_home_assistant, mock_coro
# Until https://github.com/beerfactory/hbmqtt/pull/139 is released
@pytest.mark.skipif(sys.version_info[:2] >= (3, 7),
reason='Package incompatible with Python 3.7')
class TestMQTT:
"""Test the MQTT component."""
def setup_method(self, method):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
setup_component(self.hass, 'http', {
'api_password': 'super_secret'
})
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
@patch('passlib.apps.custom_app_context', Mock(return_value=''))
@patch('tempfile.NamedTemporaryFile', Mock(return_value=MagicMock()))
@patch('hbmqtt.broker.Broker', Mock(return_value=MagicMock()))
@patch('hbmqtt.broker.Broker.start', Mock(return_value=mock_coro()))
@patch('homeassistant.components.mqtt.MQTT')
def test_creating_config_with_http_pass(self, mock_mqtt):
"""Test if the MQTT server gets started and subscribe/publish msg."""
mock_mqtt().async_connect.return_value = mock_coro(True)
self.hass.bus.listen_once = MagicMock()
password = 'super_secret'
self.hass.config.api = MagicMock(api_password=password)
assert setup_component(self.hass, mqtt.DOMAIN, {})
assert mock_mqtt.called
from pprint import pprint
pprint(mock_mqtt.mock_calls)
assert mock_mqtt.mock_calls[1][1][5] == 'homeassistant'
assert mock_mqtt.mock_calls[1][1][6] == password
@patch('passlib.apps.custom_app_context', Mock(return_value=''))
@patch('tempfile.NamedTemporaryFile', Mock(return_value=MagicMock()))
@patch('hbmqtt.broker.Broker', Mock(return_value=MagicMock()))
@patch('hbmqtt.broker.Broker.start', Mock(return_value=mock_coro()))
@patch('homeassistant.components.mqtt.MQTT')
def test_creating_config_with_http_no_pass(self, mock_mqtt):
"""Test if the MQTT server gets started and subscribe/publish msg."""
mock_mqtt().async_connect.return_value = mock_coro(True)
self.hass.bus.listen_once = MagicMock()
self.hass.config.api = MagicMock(api_password=None)
assert setup_component(self.hass, mqtt.DOMAIN, {})
assert mock_mqtt.called
assert mock_mqtt.mock_calls[1][1][5] is None
assert mock_mqtt.mock_calls[1][1][6] is None
@patch('tempfile.NamedTemporaryFile', Mock(return_value=MagicMock()))
@patch('hbmqtt.broker.Broker.start', return_value=mock_coro())
def test_broker_config_fails(self, mock_run):
"""Test if the MQTT component fails if server fails."""
from hbmqtt.broker import BrokerException
mock_run.side_effect = BrokerException
self.hass.config.api = MagicMock(api_password=None)
assert not setup_component(self.hass, mqtt.DOMAIN, {
mqtt.DOMAIN: {mqtt.CONF_EMBEDDED: {}}
})
| 41.461538 | 77 | 0.693568 | from unittest.mock import Mock, MagicMock, patch
import sys
import pytest
from homeassistant.setup import setup_component
import homeassistant.components.mqtt as mqtt
from tests.common import get_test_home_assistant, mock_coro
@pytest.mark.skipif(sys.version_info[:2] >= (3, 7),
reason='Package incompatible with Python 3.7')
class TestMQTT:
def setup_method(self, method):
self.hass = get_test_home_assistant()
setup_component(self.hass, 'http', {
'api_password': 'super_secret'
})
def teardown_method(self, method):
self.hass.stop()
@patch('passlib.apps.custom_app_context', Mock(return_value=''))
@patch('tempfile.NamedTemporaryFile', Mock(return_value=MagicMock()))
@patch('hbmqtt.broker.Broker', Mock(return_value=MagicMock()))
@patch('hbmqtt.broker.Broker.start', Mock(return_value=mock_coro()))
@patch('homeassistant.components.mqtt.MQTT')
def test_creating_config_with_http_pass(self, mock_mqtt):
mock_mqtt().async_connect.return_value = mock_coro(True)
self.hass.bus.listen_once = MagicMock()
password = 'super_secret'
self.hass.config.api = MagicMock(api_password=password)
assert setup_component(self.hass, mqtt.DOMAIN, {})
assert mock_mqtt.called
from pprint import pprint
pprint(mock_mqtt.mock_calls)
assert mock_mqtt.mock_calls[1][1][5] == 'homeassistant'
assert mock_mqtt.mock_calls[1][1][6] == password
@patch('passlib.apps.custom_app_context', Mock(return_value=''))
@patch('tempfile.NamedTemporaryFile', Mock(return_value=MagicMock()))
@patch('hbmqtt.broker.Broker', Mock(return_value=MagicMock()))
@patch('hbmqtt.broker.Broker.start', Mock(return_value=mock_coro()))
@patch('homeassistant.components.mqtt.MQTT')
def test_creating_config_with_http_no_pass(self, mock_mqtt):
mock_mqtt().async_connect.return_value = mock_coro(True)
self.hass.bus.listen_once = MagicMock()
self.hass.config.api = MagicMock(api_password=None)
assert setup_component(self.hass, mqtt.DOMAIN, {})
assert mock_mqtt.called
assert mock_mqtt.mock_calls[1][1][5] is None
assert mock_mqtt.mock_calls[1][1][6] is None
@patch('tempfile.NamedTemporaryFile', Mock(return_value=MagicMock()))
@patch('hbmqtt.broker.Broker.start', return_value=mock_coro())
def test_broker_config_fails(self, mock_run):
from hbmqtt.broker import BrokerException
mock_run.side_effect = BrokerException
self.hass.config.api = MagicMock(api_password=None)
assert not setup_component(self.hass, mqtt.DOMAIN, {
mqtt.DOMAIN: {mqtt.CONF_EMBEDDED: {}}
})
| true | true |
1c37c95e39cda9c81e5ca97fe1bf80cd91fe316a | 20,863 | py | Python | conans/model/conf.py | Mu-L/conan | 7c24ec4bbd6e8c16cdcd879403aae742689bc36a | [
"MIT"
] | 1 | 2019-11-04T17:23:09.000Z | 2019-11-04T17:23:09.000Z | conans/model/conf.py | Mu-L/conan | 7c24ec4bbd6e8c16cdcd879403aae742689bc36a | [
"MIT"
] | 1 | 2020-11-05T16:16:49.000Z | 2020-11-05T16:16:49.000Z | conans/model/conf.py | Mattlk13/conan | 005fc53485557b0a570bb71670f2ca9c66082165 | [
"MIT"
] | null | null | null | import fnmatch
from collections import OrderedDict
import six
from conans.errors import ConanException
BUILT_IN_CONFS = {
"core:required_conan_version": "Raise if current version does not match the defined range.",
"core.package_id:msvc_visual_incompatible": "Allows opting-out the fallback from the new msvc compiler to the Visual Studio compiler existing binaries",
"core:default_profile": "Defines the default host profile ('default' by default)",
"core:default_build_profile": "Defines the default build profile (None by default)",
"tools.android:ndk_path": "Argument for the CMAKE_ANDROID_NDK",
"tools.build:skip_test": "Do not execute CMake.test() and Meson.test() when enabled",
"tools.build:jobs": "Default compile jobs number -jX Ninja, Make, /MP VS (default: max CPUs)",
"tools.build:sysroot": "Pass the --sysroot=<tools.build:sysroot> flag if available. (None by default)",
"tools.cmake.cmaketoolchain:generator": "User defined CMake generator to use instead of default",
"tools.cmake.cmaketoolchain:find_package_prefer_config": "Argument for the CMAKE_FIND_PACKAGE_PREFER_CONFIG",
"tools.cmake.cmaketoolchain:toolchain_file": "Use other existing file rather than conan_toolchain.cmake one",
"tools.cmake.cmaketoolchain:user_toolchain": "Inject existing user toolchains at the beginning of conan_toolchain.cmake",
"tools.cmake.cmaketoolchain:system_name": "Define CMAKE_SYSTEM_NAME in CMakeToolchain",
"tools.cmake.cmaketoolchain:system_version": "Define CMAKE_SYSTEM_VERSION in CMakeToolchain",
"tools.cmake.cmaketoolchain:system_processor": "Define CMAKE_SYSTEM_PROCESSOR in CMakeToolchain",
"tools.env.virtualenv:auto_use": "Automatically activate virtualenv file generation",
"tools.cmake.cmake_layout:build_folder_vars": "Settings and Options that will produce a different build folder and different CMake presets names",
"tools.files.download:retry": "Number of retries in case of failure when downloading",
"tools.files.download:retry_wait": "Seconds to wait between download attempts",
"tools.gnu:make_program": "Indicate path to make program",
"tools.gnu:define_libcxx11_abi": "Force definition of GLIBCXX_USE_CXX11_ABI=1 for libstdc++11",
"tools.google.bazel:configs": "Define Bazel config file",
"tools.google.bazel:bazelrc_path": "Defines Bazel rc-path",
"tools.microsoft.msbuild:verbosity": "Verbosity level for MSBuild: 'Quiet', 'Minimal', 'Normal', 'Detailed', 'Diagnostic'",
"tools.microsoft.msbuild:vs_version": "Defines the IDE version when using the new msvc compiler",
"tools.microsoft.msbuild:max_cpu_count": "Argument for the /m when running msvc to build parallel projects",
"tools.microsoft.msbuild:installation_path": "VS install path, to avoid auto-detect via vswhere, like C:/Program Files (x86)/Microsoft Visual Studio/2019/Community",
"tools.microsoft.msbuilddeps:exclude_code_analysis": "Suppress MSBuild code analysis for patterns",
"tools.microsoft.msbuildtoolchain:compile_options": "Dictionary with MSBuild compiler options",
"tools.intel:installation_path": "Defines the Intel oneAPI installation root path",
"tools.intel:setvars_args": "Custom arguments to be passed onto the setvars.sh|bat script from Intel oneAPI",
"tools.system.package_manager:tool": "Default package manager tool: 'apt-get', 'yum', 'dnf', 'brew', 'pacman', 'choco', 'zypper', 'pkg' or 'pkgutil'",
"tools.system.package_manager:mode": "Mode for package_manager tools: 'check' or 'install'",
"tools.system.package_manager:sudo": "Use 'sudo' when invoking the package manager tools in Linux (False by default)",
"tools.system.package_manager:sudo_askpass": "Use the '-A' argument if using sudo in Linux to invoke the system package manager (False by default)",
"tools.apple.xcodebuild:verbosity": "Verbosity level for xcodebuild: 'verbose' or 'quiet",
"tools.apple:enable_bitcode": "(boolean) Enable/Disable Bitcode Apple Clang flags",
"tools.apple:enable_arc": "(boolean) Enable/Disable ARC Apple Clang flags",
"tools.apple:enable_visibility": "(boolean) Enable/Disable Visibility Apple Clang flags",
# Flags configuration
"tools.build:cxxflags": "List of extra CXX flags used by different toolchains like CMakeToolchain, AutotoolsToolchain and MesonToolchain",
"tools.build:cflags": "List of extra C flags used by different toolchains like CMakeToolchain, AutotoolsToolchain and MesonToolchain",
"tools.build:defines": "List of extra definition flags used by different toolchains like CMakeToolchain and AutotoolsToolchain",
"tools.build:sharedlinkflags": "List of extra flags used by CMakeToolchain for CMAKE_SHARED_LINKER_FLAGS_INIT variable",
"tools.build:exelinkflags": "List of extra flags used by CMakeToolchain for CMAKE_EXE_LINKER_FLAGS_INIT variable",
}
def _is_profile_module(module_name):
# These are the modules that are propagated to profiles and user recipes
_user_modules = "tools.", "user."
return any(module_name.startswith(user_module) for user_module in _user_modules)
# FIXME: Refactor all the next classes because they are mostly the same as
# conan.tools.env.environment ones
class _ConfVarPlaceHolder:
pass
class _ConfValue(object):
def __init__(self, name, value):
self._name = name
self._value = value
self._value_type = type(value)
def __repr__(self):
return repr(self._value)
@property
def value(self):
if self._value_type is list and _ConfVarPlaceHolder in self._value:
v = self._value[:]
v.remove(_ConfVarPlaceHolder)
return v
return self._value
def copy(self):
return _ConfValue(self._name, self._value)
def dumps(self):
if self._value is None:
return "{}=!".format(self._name) # unset
elif self._value_type is list and _ConfVarPlaceHolder in self._value:
v = self._value[:]
v.remove(_ConfVarPlaceHolder)
return "{}={}".format(self._name, v)
else:
return "{}={}".format(self._name, self._value)
def update(self, value):
if self._value_type is dict:
self._value.update(value)
def remove(self, value):
if self._value_type is list:
self._value.remove(value)
elif self._value_type is dict:
self._value.pop(value, None)
def append(self, value):
if self._value_type is not list:
raise ConanException("Only list-like values can append other values.")
if isinstance(value, list):
self._value.extend(value)
else:
self._value.append(value)
def prepend(self, value):
if self._value_type is not list:
raise ConanException("Only list-like values can prepend other values.")
if isinstance(value, list):
self._value = value + self._value
else:
self._value.insert(0, value)
def compose_conf_value(self, other):
"""
self has precedence, the "other" will add/append if possible and not conflicting, but
self mandates what to do. If self has define(), without placeholder, that will remain.
:type other: _ConfValue
"""
v_type = self._value_type
o_type = other._value_type
if v_type is list and o_type is list:
try:
index = self._value.index(_ConfVarPlaceHolder)
except ValueError: # It doesn't have placeholder
pass
else:
new_value = self._value[:] # do a copy
new_value[index:index + 1] = other._value # replace the placeholder
self._value = new_value
elif self._value is None or other._value is None \
or (isinstance(self._value, six.string_types) and isinstance(self._value, six.string_types)): # TODO: Python2, remove in 2.0
# It means any of those values were an "unset" so doing nothing because we don't
# really know the original value type
pass
elif o_type != v_type:
raise ConanException("It's not possible to compose {} values "
"and {} ones.".format(v_type.__name__, o_type.__name__))
# TODO: In case of any other object types?
class Conf:
# Putting some default expressions to check that any value could be false
boolean_false_expressions = ("0", '"0"', "false", '"false"', "off")
def __init__(self):
# It being ordered allows for Windows case-insensitive composition
self._values = OrderedDict() # {var_name: [] of values, including separators}
def __bool__(self):
return bool(self._values)
# TODO: Python2, remove in 2.0
__nonzero__ = __bool__
def __repr__(self):
return "Conf: " + repr(self._values)
def __eq__(self, other):
"""
:type other: Conf
"""
return other._values == self._values
# TODO: Python2, remove in 2.0
def __ne__(self, other):
return not self.__eq__(other)
def __getitem__(self, name):
"""
DEPRECATED: it's going to disappear in Conan 2.0. Use self.get() instead.
"""
# FIXME: Keeping backward compatibility
return self.get(name)
def __setitem__(self, name, value):
"""
DEPRECATED: it's going to disappear in Conan 2.0.
"""
# FIXME: Keeping backward compatibility
self.define(name, value) # it's like a new definition
def __delitem__(self, name):
"""
DEPRECATED: it's going to disappear in Conan 2.0.
"""
# FIXME: Keeping backward compatibility
del self._values[name]
def items(self):
# FIXME: Keeping backward compatibility
for k, v in self._values.items():
yield k, v.value
@property
def sha(self):
# FIXME: Keeping backward compatibility
return self.dumps()
@staticmethod
def _get_boolean_value(value):
if type(value) is bool:
return value
elif str(value).lower() in Conf.boolean_false_expressions:
return False
else:
return True
def get(self, conf_name, default=None, check_type=None):
"""
Get all the values belonging to the passed conf name.
:param conf_name: conf name
:param default: default value in case of conf does not have the conf_name key
:param check_type: check the conf type(value) is the same as the given by this param.
There are two default smart conversions for bool and str types.
"""
conf_value = self._values.get(conf_name)
if conf_value:
v = conf_value.value
# Some smart conversions
if check_type is bool and not isinstance(v, bool):
# Perhaps, user has introduced a "false", "0" or even "off"
return self._get_boolean_value(v)
elif check_type is str and not isinstance(v, str):
return str(v)
elif v is None: # value was unset
return default
elif check_type is not None and not isinstance(v, check_type):
raise ConanException("[conf] {name} must be a {type}-like object. "
"The value '{value}' introduced is a {vtype} "
"object".format(name=conf_name, type=check_type.__name__,
value=v, vtype=type(v).__name__))
return v
else:
return default
def pop(self, conf_name, default=None):
"""
Remove any key-value given the conf name
"""
value = self.get(conf_name, default=default)
self._values.pop(conf_name, None)
return value
@staticmethod
def _validate_lower_case(name):
if name != name.lower():
raise ConanException("Conf '{}' must be lowercase".format(name))
def copy(self):
c = Conf()
c._values = self._values.copy()
return c
def dumps(self):
""" returns a string with a profile-like original definition, not the full environment
values
"""
return "\n".join([v.dumps() for v in reversed(self._values.values())])
def define(self, name, value):
self._validate_lower_case(name)
self._values[name] = _ConfValue(name, value)
def unset(self, name):
"""
clears the variable, equivalent to a unset or set XXX=
"""
self._values[name] = _ConfValue(name, None)
def update(self, name, value):
self._validate_lower_case(name)
conf_value = _ConfValue(name, {})
self._values.setdefault(name, conf_value).update(value)
def append(self, name, value):
self._validate_lower_case(name)
conf_value = _ConfValue(name, [_ConfVarPlaceHolder])
self._values.setdefault(name, conf_value).append(value)
def prepend(self, name, value):
self._validate_lower_case(name)
conf_value = _ConfValue(name, [_ConfVarPlaceHolder])
self._values.setdefault(name, conf_value).prepend(value)
def remove(self, name, value):
conf_value = self._values.get(name)
if conf_value:
conf_value.remove(value)
else:
raise ConanException("Conf {} does not exist.".format(name))
def compose_conf(self, other):
"""
:param other: other has less priority than current one
:type other: Conf
"""
for k, v in other._values.items():
existing = self._values.get(k)
if existing is None:
self._values[k] = v.copy()
else:
existing.compose_conf_value(v)
return self
def filter_user_modules(self):
result = Conf()
for k, v in self._values.items():
if _is_profile_module(k):
result._values[k] = v
return result
class ConfDefinition:
actions = (("+=", "append"), ("=+", "prepend"),
("=!", "unset"), ("=", "define"))
def __init__(self):
self._pattern_confs = OrderedDict()
def __repr__(self):
return "ConfDefinition: " + repr(self._pattern_confs)
def __bool__(self):
return bool(self._pattern_confs)
__nonzero__ = __bool__
def __getitem__(self, module_name):
"""
DEPRECATED: it's going to disappear in Conan 2.0. Use self.get() instead.
if a module name is requested for this, it goes to the None-Global config by default
"""
pattern, name = self._split_pattern_name(module_name)
return self._pattern_confs.get(pattern, Conf()).get(name)
def __delitem__(self, module_name):
"""
DEPRECATED: it's going to disappear in Conan 2.0. Use self.pop() instead.
if a module name is requested for this, it goes to the None-Global config by default
"""
pattern, name = self._split_pattern_name(module_name)
del self._pattern_confs.get(pattern, Conf())[name]
def get(self, conf_name, default=None, check_type=None):
"""
Get the value of the conf name requested and convert it to the [type]-like passed.
"""
pattern, name = self._split_pattern_name(conf_name)
return self._pattern_confs.get(pattern, Conf()).get(name, default=default,
check_type=check_type)
def pop(self, conf_name, default=None):
"""
Remove the conf name passed.
"""
pattern, name = self._split_pattern_name(conf_name)
return self._pattern_confs.get(pattern, Conf()).pop(name, default=default)
@staticmethod
def _split_pattern_name(pattern_name):
if pattern_name.count(":") >= 2:
pattern, name = pattern_name.split(":", 1)
else:
pattern, name = None, pattern_name
return pattern, name
def get_conanfile_conf(self, ref):
""" computes package-specific Conf
it is only called when conanfile.buildenv is called
the last one found in the profile file has top priority
"""
result = Conf()
for pattern, conf in self._pattern_confs.items():
if pattern is None or fnmatch.fnmatch(str(ref), pattern):
# Latest declared has priority, copy() necessary to not destroy data
result = conf.copy().compose_conf(result)
return result
def update_conf_definition(self, other):
"""
:type other: ConfDefinition
:param other: The argument profile has priority/precedence over the current one.
"""
for pattern, conf in other._pattern_confs.items():
self._update_conf_definition(pattern, conf)
def _update_conf_definition(self, pattern, conf):
existing = self._pattern_confs.get(pattern)
if existing:
self._pattern_confs[pattern] = conf.compose_conf(existing)
else:
self._pattern_confs[pattern] = conf
def rebase_conf_definition(self, other):
"""
for taking the new global.conf and composing with the profile [conf]
:type other: ConfDefinition
"""
for pattern, conf in other._pattern_confs.items():
new_conf = conf.filter_user_modules() # Creates a copy, filtered
existing = self._pattern_confs.get(pattern)
if existing:
existing.compose_conf(new_conf)
else:
self._pattern_confs[pattern] = new_conf
def update(self, key, value, profile=False, method="define"):
"""
Define/append/prepend/unset any Conf line
>> update("tools.microsoft.msbuild:verbosity", "Detailed")
"""
pattern, name = self._split_pattern_name(key)
if not _is_profile_module(name):
if profile:
raise ConanException("[conf] '{}' not allowed in profiles".format(key))
if pattern is not None:
raise ConanException("Conf '{}' cannot have a package pattern".format(key))
# strip whitespaces before/after =
# values are not strip() unless they are a path, to preserve potential whitespaces
name = name.strip()
# When loading from profile file, latest line has priority
conf = Conf()
if method == "unset":
conf.unset(name)
else:
getattr(conf, method)(name, value)
# Update
self._update_conf_definition(pattern, conf)
def as_list(self):
result = []
for pattern, conf in self._pattern_confs.items():
for name, value in sorted(conf.items()):
if pattern:
result.append(("{}:{}".format(pattern, name), value))
else:
result.append((name, value))
return result
def dumps(self):
result = []
for pattern, conf in self._pattern_confs.items():
if pattern is None:
result.append(conf.dumps())
else:
result.append("\n".join("{}:{}".format(pattern, line) if line else ""
for line in conf.dumps().splitlines()))
if result:
result.append("")
return "\n".join(result)
@staticmethod
def _get_evaluated_value(__v):
"""
Function to avoid eval() catching local variables
"""
try:
# Isolated eval
parsed_value = eval(__v)
if isinstance(parsed_value, str): # xxx:xxx = "my string"
# Let's respect the quotes introduced by any user
parsed_value = '"{}"'.format(parsed_value)
except:
# It means eval() failed because of a string without quotes
parsed_value = __v.strip()
return parsed_value
def loads(self, text, profile=False):
self._pattern_confs = {}
for line in text.splitlines():
line = line.strip()
if not line or line.startswith("#"):
continue
for op, method in ConfDefinition.actions:
tokens = line.split(op, 1)
if len(tokens) != 2:
continue
pattern_name, value = tokens
parsed_value = ConfDefinition._get_evaluated_value(value)
self.update(pattern_name, parsed_value, profile=profile, method=method)
break
else:
raise ConanException("Bad conf definition: {}".format(line))
| 41.231225 | 169 | 0.629056 | import fnmatch
from collections import OrderedDict
import six
from conans.errors import ConanException
BUILT_IN_CONFS = {
"core:required_conan_version": "Raise if current version does not match the defined range.",
"core.package_id:msvc_visual_incompatible": "Allows opting-out the fallback from the new msvc compiler to the Visual Studio compiler existing binaries",
"core:default_profile": "Defines the default host profile ('default' by default)",
"core:default_build_profile": "Defines the default build profile (None by default)",
"tools.android:ndk_path": "Argument for the CMAKE_ANDROID_NDK",
"tools.build:skip_test": "Do not execute CMake.test() and Meson.test() when enabled",
"tools.build:jobs": "Default compile jobs number -jX Ninja, Make, /MP VS (default: max CPUs)",
"tools.build:sysroot": "Pass the --sysroot=<tools.build:sysroot> flag if available. (None by default)",
"tools.cmake.cmaketoolchain:generator": "User defined CMake generator to use instead of default",
"tools.cmake.cmaketoolchain:find_package_prefer_config": "Argument for the CMAKE_FIND_PACKAGE_PREFER_CONFIG",
"tools.cmake.cmaketoolchain:toolchain_file": "Use other existing file rather than conan_toolchain.cmake one",
"tools.cmake.cmaketoolchain:user_toolchain": "Inject existing user toolchains at the beginning of conan_toolchain.cmake",
"tools.cmake.cmaketoolchain:system_name": "Define CMAKE_SYSTEM_NAME in CMakeToolchain",
"tools.cmake.cmaketoolchain:system_version": "Define CMAKE_SYSTEM_VERSION in CMakeToolchain",
"tools.cmake.cmaketoolchain:system_processor": "Define CMAKE_SYSTEM_PROCESSOR in CMakeToolchain",
"tools.env.virtualenv:auto_use": "Automatically activate virtualenv file generation",
"tools.cmake.cmake_layout:build_folder_vars": "Settings and Options that will produce a different build folder and different CMake presets names",
"tools.files.download:retry": "Number of retries in case of failure when downloading",
"tools.files.download:retry_wait": "Seconds to wait between download attempts",
"tools.gnu:make_program": "Indicate path to make program",
"tools.gnu:define_libcxx11_abi": "Force definition of GLIBCXX_USE_CXX11_ABI=1 for libstdc++11",
"tools.google.bazel:configs": "Define Bazel config file",
"tools.google.bazel:bazelrc_path": "Defines Bazel rc-path",
"tools.microsoft.msbuild:verbosity": "Verbosity level for MSBuild: 'Quiet', 'Minimal', 'Normal', 'Detailed', 'Diagnostic'",
"tools.microsoft.msbuild:vs_version": "Defines the IDE version when using the new msvc compiler",
"tools.microsoft.msbuild:max_cpu_count": "Argument for the /m when running msvc to build parallel projects",
"tools.microsoft.msbuild:installation_path": "VS install path, to avoid auto-detect via vswhere, like C:/Program Files (x86)/Microsoft Visual Studio/2019/Community",
"tools.microsoft.msbuilddeps:exclude_code_analysis": "Suppress MSBuild code analysis for patterns",
"tools.microsoft.msbuildtoolchain:compile_options": "Dictionary with MSBuild compiler options",
"tools.intel:installation_path": "Defines the Intel oneAPI installation root path",
"tools.intel:setvars_args": "Custom arguments to be passed onto the setvars.sh|bat script from Intel oneAPI",
"tools.system.package_manager:tool": "Default package manager tool: 'apt-get', 'yum', 'dnf', 'brew', 'pacman', 'choco', 'zypper', 'pkg' or 'pkgutil'",
"tools.system.package_manager:mode": "Mode for package_manager tools: 'check' or 'install'",
"tools.system.package_manager:sudo": "Use 'sudo' when invoking the package manager tools in Linux (False by default)",
"tools.system.package_manager:sudo_askpass": "Use the '-A' argument if using sudo in Linux to invoke the system package manager (False by default)",
"tools.apple.xcodebuild:verbosity": "Verbosity level for xcodebuild: 'verbose' or 'quiet",
"tools.apple:enable_bitcode": "(boolean) Enable/Disable Bitcode Apple Clang flags",
"tools.apple:enable_arc": "(boolean) Enable/Disable ARC Apple Clang flags",
"tools.apple:enable_visibility": "(boolean) Enable/Disable Visibility Apple Clang flags",
# Flags configuration
"tools.build:cxxflags": "List of extra CXX flags used by different toolchains like CMakeToolchain, AutotoolsToolchain and MesonToolchain",
"tools.build:cflags": "List of extra C flags used by different toolchains like CMakeToolchain, AutotoolsToolchain and MesonToolchain",
"tools.build:defines": "List of extra definition flags used by different toolchains like CMakeToolchain and AutotoolsToolchain",
"tools.build:sharedlinkflags": "List of extra flags used by CMakeToolchain for CMAKE_SHARED_LINKER_FLAGS_INIT variable",
"tools.build:exelinkflags": "List of extra flags used by CMakeToolchain for CMAKE_EXE_LINKER_FLAGS_INIT variable",
}
def _is_profile_module(module_name):
# These are the modules that are propagated to profiles and user recipes
_user_modules = "tools.", "user."
return any(module_name.startswith(user_module) for user_module in _user_modules)
# FIXME: Refactor all the next classes because they are mostly the same as
# conan.tools.env.environment ones
class _ConfVarPlaceHolder:
pass
class _ConfValue(object):
def __init__(self, name, value):
self._name = name
self._value = value
self._value_type = type(value)
def __repr__(self):
return repr(self._value)
@property
def value(self):
if self._value_type is list and _ConfVarPlaceHolder in self._value:
v = self._value[:]
v.remove(_ConfVarPlaceHolder)
return v
return self._value
def copy(self):
return _ConfValue(self._name, self._value)
def dumps(self):
if self._value is None:
return "{}=!".format(self._name) # unset
elif self._value_type is list and _ConfVarPlaceHolder in self._value:
v = self._value[:]
v.remove(_ConfVarPlaceHolder)
return "{}={}".format(self._name, v)
else:
return "{}={}".format(self._name, self._value)
def update(self, value):
if self._value_type is dict:
self._value.update(value)
def remove(self, value):
if self._value_type is list:
self._value.remove(value)
elif self._value_type is dict:
self._value.pop(value, None)
def append(self, value):
if self._value_type is not list:
raise ConanException("Only list-like values can append other values.")
if isinstance(value, list):
self._value.extend(value)
else:
self._value.append(value)
def prepend(self, value):
if self._value_type is not list:
raise ConanException("Only list-like values can prepend other values.")
if isinstance(value, list):
self._value = value + self._value
else:
self._value.insert(0, value)
def compose_conf_value(self, other):
v_type = self._value_type
o_type = other._value_type
if v_type is list and o_type is list:
try:
index = self._value.index(_ConfVarPlaceHolder)
except ValueError: # It doesn't have placeholder
pass
else:
new_value = self._value[:]
new_value[index:index + 1] = other._value
self._value = new_value
elif self._value is None or other._value is None \
or (isinstance(self._value, six.string_types) and isinstance(self._value, six.string_types)):
# really know the original value type
pass
elif o_type != v_type:
raise ConanException("It's not possible to compose {} values "
"and {} ones.".format(v_type.__name__, o_type.__name__))
class Conf:
boolean_false_expressions = ("0", '"0"', "false", '"false"', "off")
def __init__(self):
self._values = OrderedDict()
def __bool__(self):
return bool(self._values)
__nonzero__ = __bool__
def __repr__(self):
return "Conf: " + repr(self._values)
def __eq__(self, other):
return other._values == self._values
def __ne__(self, other):
return not self.__eq__(other)
def __getitem__(self, name):
return self.get(name)
def __setitem__(self, name, value):
self.define(name, value)
def __delitem__(self, name):
# FIXME: Keeping backward compatibility
del self._values[name]
def items(self):
# FIXME: Keeping backward compatibility
for k, v in self._values.items():
yield k, v.value
@property
def sha(self):
# FIXME: Keeping backward compatibility
return self.dumps()
@staticmethod
def _get_boolean_value(value):
if type(value) is bool:
return value
elif str(value).lower() in Conf.boolean_false_expressions:
return False
else:
return True
def get(self, conf_name, default=None, check_type=None):
conf_value = self._values.get(conf_name)
if conf_value:
v = conf_value.value
# Some smart conversions
if check_type is bool and not isinstance(v, bool):
# Perhaps, user has introduced a "false", "0" or even "off"
return self._get_boolean_value(v)
elif check_type is str and not isinstance(v, str):
return str(v)
elif v is None: # value was unset
return default
elif check_type is not None and not isinstance(v, check_type):
raise ConanException("[conf] {name} must be a {type}-like object. "
"The value '{value}' introduced is a {vtype} "
"object".format(name=conf_name, type=check_type.__name__,
value=v, vtype=type(v).__name__))
return v
else:
return default
def pop(self, conf_name, default=None):
value = self.get(conf_name, default=default)
self._values.pop(conf_name, None)
return value
@staticmethod
def _validate_lower_case(name):
if name != name.lower():
raise ConanException("Conf '{}' must be lowercase".format(name))
def copy(self):
c = Conf()
c._values = self._values.copy()
return c
def dumps(self):
return "\n".join([v.dumps() for v in reversed(self._values.values())])
def define(self, name, value):
self._validate_lower_case(name)
self._values[name] = _ConfValue(name, value)
def unset(self, name):
self._values[name] = _ConfValue(name, None)
def update(self, name, value):
self._validate_lower_case(name)
conf_value = _ConfValue(name, {})
self._values.setdefault(name, conf_value).update(value)
def append(self, name, value):
self._validate_lower_case(name)
conf_value = _ConfValue(name, [_ConfVarPlaceHolder])
self._values.setdefault(name, conf_value).append(value)
def prepend(self, name, value):
self._validate_lower_case(name)
conf_value = _ConfValue(name, [_ConfVarPlaceHolder])
self._values.setdefault(name, conf_value).prepend(value)
def remove(self, name, value):
conf_value = self._values.get(name)
if conf_value:
conf_value.remove(value)
else:
raise ConanException("Conf {} does not exist.".format(name))
def compose_conf(self, other):
for k, v in other._values.items():
existing = self._values.get(k)
if existing is None:
self._values[k] = v.copy()
else:
existing.compose_conf_value(v)
return self
def filter_user_modules(self):
result = Conf()
for k, v in self._values.items():
if _is_profile_module(k):
result._values[k] = v
return result
class ConfDefinition:
actions = (("+=", "append"), ("=+", "prepend"),
("=!", "unset"), ("=", "define"))
def __init__(self):
self._pattern_confs = OrderedDict()
def __repr__(self):
return "ConfDefinition: " + repr(self._pattern_confs)
def __bool__(self):
return bool(self._pattern_confs)
__nonzero__ = __bool__
def __getitem__(self, module_name):
pattern, name = self._split_pattern_name(module_name)
return self._pattern_confs.get(pattern, Conf()).get(name)
def __delitem__(self, module_name):
pattern, name = self._split_pattern_name(module_name)
del self._pattern_confs.get(pattern, Conf())[name]
def get(self, conf_name, default=None, check_type=None):
pattern, name = self._split_pattern_name(conf_name)
return self._pattern_confs.get(pattern, Conf()).get(name, default=default,
check_type=check_type)
def pop(self, conf_name, default=None):
pattern, name = self._split_pattern_name(conf_name)
return self._pattern_confs.get(pattern, Conf()).pop(name, default=default)
@staticmethod
def _split_pattern_name(pattern_name):
if pattern_name.count(":") >= 2:
pattern, name = pattern_name.split(":", 1)
else:
pattern, name = None, pattern_name
return pattern, name
def get_conanfile_conf(self, ref):
result = Conf()
for pattern, conf in self._pattern_confs.items():
if pattern is None or fnmatch.fnmatch(str(ref), pattern):
# Latest declared has priority, copy() necessary to not destroy data
result = conf.copy().compose_conf(result)
return result
def update_conf_definition(self, other):
for pattern, conf in other._pattern_confs.items():
self._update_conf_definition(pattern, conf)
def _update_conf_definition(self, pattern, conf):
existing = self._pattern_confs.get(pattern)
if existing:
self._pattern_confs[pattern] = conf.compose_conf(existing)
else:
self._pattern_confs[pattern] = conf
def rebase_conf_definition(self, other):
for pattern, conf in other._pattern_confs.items():
new_conf = conf.filter_user_modules() # Creates a copy, filtered
existing = self._pattern_confs.get(pattern)
if existing:
existing.compose_conf(new_conf)
else:
self._pattern_confs[pattern] = new_conf
def update(self, key, value, profile=False, method="define"):
pattern, name = self._split_pattern_name(key)
if not _is_profile_module(name):
if profile:
raise ConanException("[conf] '{}' not allowed in profiles".format(key))
if pattern is not None:
raise ConanException("Conf '{}' cannot have a package pattern".format(key))
# strip whitespaces before/after =
# values are not strip() unless they are a path, to preserve potential whitespaces
name = name.strip()
# When loading from profile file, latest line has priority
conf = Conf()
if method == "unset":
conf.unset(name)
else:
getattr(conf, method)(name, value)
# Update
self._update_conf_definition(pattern, conf)
def as_list(self):
result = []
for pattern, conf in self._pattern_confs.items():
for name, value in sorted(conf.items()):
if pattern:
result.append(("{}:{}".format(pattern, name), value))
else:
result.append((name, value))
return result
def dumps(self):
result = []
for pattern, conf in self._pattern_confs.items():
if pattern is None:
result.append(conf.dumps())
else:
result.append("\n".join("{}:{}".format(pattern, line) if line else ""
for line in conf.dumps().splitlines()))
if result:
result.append("")
return "\n".join(result)
@staticmethod
def _get_evaluated_value(__v):
try:
# Isolated eval
parsed_value = eval(__v)
if isinstance(parsed_value, str): # xxx:xxx = "my string"
# Let's respect the quotes introduced by any user
parsed_value = '"{}"'.format(parsed_value)
except:
parsed_value = __v.strip()
return parsed_value
def loads(self, text, profile=False):
self._pattern_confs = {}
for line in text.splitlines():
line = line.strip()
if not line or line.startswith("#"):
continue
for op, method in ConfDefinition.actions:
tokens = line.split(op, 1)
if len(tokens) != 2:
continue
pattern_name, value = tokens
parsed_value = ConfDefinition._get_evaluated_value(value)
self.update(pattern_name, parsed_value, profile=profile, method=method)
break
else:
raise ConanException("Bad conf definition: {}".format(line))
| true | true |
1c37c9f2571d4f730113497bd724d86eecbeae20 | 4,454 | py | Python | lecture_05/homework5/tasks/oop_1.py | RomanSafe/epam_python_training | 3aac68062e1764af844cb3e96f9481791acffc9d | [
"MIT"
] | null | null | null | lecture_05/homework5/tasks/oop_1.py | RomanSafe/epam_python_training | 3aac68062e1764af844cb3e96f9481791acffc9d | [
"MIT"
] | 2 | 2020-12-30T19:39:36.000Z | 2020-12-30T21:49:33.000Z | lecture_05/homework5/tasks/oop_1.py | RomanSafe/epam_python_training | 3aac68062e1764af844cb3e96f9481791acffc9d | [
"MIT"
] | null | null | null | """
Необходимо создать 3 класса и взаимосвязь между ними (Student, Teacher,
Homework)
Наследование в этой задаче использовать не нужно.
Для работы с временем использовать модуль datetime
1. Homework принимает на вход 2 атрибута: текст задания и количество дней
на это задание
Атрибуты:
text - текст задания
deadline - хранит объект datetime.timedelta с количеством
дней на выполнение
created - c точной датой и временем создания
Методы:
is_active - проверяет не истело ли время на выполнение задания,
возвращает boolean
2. Student
Атрибуты:
last_name
first_name
Методы:
do_homework - принимает объект Homework и возвращает его же,
если задание уже просрочено, то печатет 'You are late' и возвращает None
3. Teacher
Атрибуты:
last_name
first_name
Методы:
create_homework - текст задания и количество дней на это задание,
возвращает экземпляр Homework
Обратите внимание, что для работы этого метода не требуется сам объект.
PEP8 соблюдать строго.
Всем перечисленным выше атрибутам и методам классов сохранить названия.
К названием остальных переменных, классов и тд. подходить ответственно -
давать логичные подходящие имена.
"""
import datetime
from typing import NewType, Union
# for static typing
Timedelta = NewType("Timedelta", datetime.timedelta)
class Homework:
"""Describes an instance of homework.
Atributes:
text: text of the current homework;
deadline: a datetime.timedelta object with quantity days till deadline for the
current homework;
created: the date and time of the instance's creation.
"""
def __init__(self, text: str, deadline: int) -> None:
"""Creates a class instance."""
self.text = text
self.deadline = datetime.timedelta(days=deadline)
self.created = datetime.datetime.now()
def is_active(self) -> bool:
"""Checks is there time till deadline of the current homework.
Returns:
If the deadline has not expired return True, overwise False.
"""
return datetime.datetime.now() - self.created < self.deadline
class Student:
"""Describes an instance of a student.
Atributes:
first_name: the name of a student;
last_name: the sername of a student.
"""
def __init__(self, first_name: str, last_name: str) -> None:
"""Creates a class instance."""
self.first_name = first_name
self.last_name = last_name
def do_homework(self, homework: Homework) -> Union[Homework, None]:
"""Checks is the deadline of the given homework expired or not.
Args:
homework: an instance of the Homework class that a student is going to do.
Returns:
the recieved instance of the Homework class if it's deadline hasn't
expired, overwise prints "You are late" and returns None.
"""
if homework.is_active():
return homework
print("You are late")
return None
class Teacher:
"""Describes an instance of a teacher.
atributes:
first_name: the name of a teacher;
last_name: the sername of a teacher.
"""
def __init__(self, first_name: str, last_name: str) -> None:
"""Create a class instance."""
self.first_name = first_name
self.last_name = last_name
@staticmethod
def create_homework(text: str, deadline: int) -> Homework:
"""Creates an instance of the Homework class.
Args:
text: text of created homework.
deadline: a term to complete the homework in days.
Returns:
an instance of Homework class.
"""
return Homework(text, deadline)
if __name__ == "__main__":
teacher = Teacher("Daniil", "Shadrin")
student = Student("Roman", "Petrov")
teacher.last_name # Daniil
student.first_name # Petrov
expired_homework = teacher.create_homework("Learn functions", 0)
expired_homework.created # Example: 2019-05-26 16:44:30.688762
expired_homework.deadline # 0:00:00
expired_homework.text # 'Learn functions'
# create function from method and use it
create_homework_too = teacher.create_homework
oop_homework = create_homework_too("create 2 simple classes", 5)
oop_homework.deadline # 5 days, 0:00:00
student.do_homework(oop_homework)
student.do_homework(expired_homework) # You are late
| 28.922078 | 86 | 0.680287 | import datetime
from typing import NewType, Union
Timedelta = NewType("Timedelta", datetime.timedelta)
class Homework:
def __init__(self, text: str, deadline: int) -> None:
self.text = text
self.deadline = datetime.timedelta(days=deadline)
self.created = datetime.datetime.now()
def is_active(self) -> bool:
return datetime.datetime.now() - self.created < self.deadline
class Student:
def __init__(self, first_name: str, last_name: str) -> None:
self.first_name = first_name
self.last_name = last_name
def do_homework(self, homework: Homework) -> Union[Homework, None]:
if homework.is_active():
return homework
print("You are late")
return None
class Teacher:
def __init__(self, first_name: str, last_name: str) -> None:
self.first_name = first_name
self.last_name = last_name
@staticmethod
def create_homework(text: str, deadline: int) -> Homework:
return Homework(text, deadline)
if __name__ == "__main__":
teacher = Teacher("Daniil", "Shadrin")
student = Student("Roman", "Petrov")
teacher.last_name
student.first_name
expired_homework = teacher.create_homework("Learn functions", 0)
expired_homework.created
expired_homework.deadline
expired_homework.text
create_homework_too = teacher.create_homework
oop_homework = create_homework_too("create 2 simple classes", 5)
oop_homework.deadline
student.do_homework(oop_homework)
student.do_homework(expired_homework)
| true | true |
1c37cb5565ca95fa508b29ea62af0b67f57a39d8 | 550 | py | Python | test_fizzbuzz.py | milton63/fizzbuzz | 411bd9fc720c081da1ef5eab5d273abab31b8fc5 | [
"MIT"
] | null | null | null | test_fizzbuzz.py | milton63/fizzbuzz | 411bd9fc720c081da1ef5eab5d273abab31b8fc5 | [
"MIT"
] | 1 | 2020-05-27T15:38:47.000Z | 2020-05-27T15:38:47.000Z | test_fizzbuzz.py | milton63/fizzbuzz | 411bd9fc720c081da1ef5eab5d273abab31b8fc5 | [
"MIT"
] | 1 | 2020-05-27T13:51:15.000Z | 2020-05-27T13:51:15.000Z | from fizzbuzz import fizzbuzz
def test_number():
assert fizzbuzz(1) == 1
def test_div_3():
assert fizzbuzz(3) == 'Fizz'
def test_div_5():
assert fizzbuzz(5) == 'Buzz'
def test_div_3and5():
assert fizzbuzz(15) == 'Fizz Buzz'
def test_range():
result = [fizzbuzz(x) for x in range(1, 16)]
assert result == [
1,
2,
'Fizz',
4,
'Buzz',
'Fizz',
7,
8,
'Fizz',
'Buzz',
11,
'Fizz',
13,
14,
'Fizz Buzz']
| 13.75 | 48 | 0.469091 | from fizzbuzz import fizzbuzz
def test_number():
assert fizzbuzz(1) == 1
def test_div_3():
assert fizzbuzz(3) == 'Fizz'
def test_div_5():
assert fizzbuzz(5) == 'Buzz'
def test_div_3and5():
assert fizzbuzz(15) == 'Fizz Buzz'
def test_range():
result = [fizzbuzz(x) for x in range(1, 16)]
assert result == [
1,
2,
'Fizz',
4,
'Buzz',
'Fizz',
7,
8,
'Fizz',
'Buzz',
11,
'Fizz',
13,
14,
'Fizz Buzz']
| true | true |
1c37cc07ef8124bd64a11be641a95a2aad421fcc | 2,000 | py | Python | src/ops/inventory/caching.py | Asjidkalam/ops-cli | 951b6e53452aef60cd7a67b95cb3bf227d81c02d | [
"Apache-2.0"
] | 182 | 2019-02-02T22:57:41.000Z | 2022-03-19T11:40:15.000Z | src/ops/inventory/caching.py | Asjidkalam/ops-cli | 951b6e53452aef60cd7a67b95cb3bf227d81c02d | [
"Apache-2.0"
] | 66 | 2019-02-04T14:43:53.000Z | 2021-10-05T14:19:56.000Z | src/ops/inventory/caching.py | Asjidkalam/ops-cli | 951b6e53452aef60cd7a67b95cb3bf227d81c02d | [
"Apache-2.0"
] | 48 | 2019-02-05T14:22:10.000Z | 2021-09-29T13:41:11.000Z | # Copyright 2019 Adobe. All rights reserved.
# This file is licensed to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import hashlib
import json
import os
import time
from six import PY3
def cache_callback_result(directory, func, max_age, cache_key_args):
directory = os.path.expanduser(directory)
path = get_cache_path(directory, cache_key_args)
if is_valid(path, max_age):
return read(path)
return write(path, func())
def get_cache_path(dir, args):
m = hashlib.md5()
json_dump = json.dumps(args)
if PY3:
json_dump = json_dump.encode('utf-8')
m.update(json_dump)
return os.path.join(dir, m.hexdigest())
def is_valid(filename, max_age):
""" Determines if the cache files have expired, or if it is still valid """
filename = os.path.expanduser(filename)
if os.path.isfile(filename):
mod_time = os.path.getmtime(filename)
current_time = time.time()
if (mod_time + max_age) > current_time:
return True
return False
def write(filename, data):
""" Writes data in JSON format to a file """
json_data = json.dumps(data, sort_keys=True, indent=2)
cache = open(os.path.expanduser(filename), 'w')
cache.write(json_data)
cache.close()
return data
def read(filename):
""" Reads the inventory from the cache file and returns it as a JSON
object """
cache = open(os.path.expanduser(filename), 'r')
json_inventory = cache.read()
return json.loads(json_inventory)
| 28.985507 | 88 | 0.7035 |
import hashlib
import json
import os
import time
from six import PY3
def cache_callback_result(directory, func, max_age, cache_key_args):
directory = os.path.expanduser(directory)
path = get_cache_path(directory, cache_key_args)
if is_valid(path, max_age):
return read(path)
return write(path, func())
def get_cache_path(dir, args):
m = hashlib.md5()
json_dump = json.dumps(args)
if PY3:
json_dump = json_dump.encode('utf-8')
m.update(json_dump)
return os.path.join(dir, m.hexdigest())
def is_valid(filename, max_age):
filename = os.path.expanduser(filename)
if os.path.isfile(filename):
mod_time = os.path.getmtime(filename)
current_time = time.time()
if (mod_time + max_age) > current_time:
return True
return False
def write(filename, data):
json_data = json.dumps(data, sort_keys=True, indent=2)
cache = open(os.path.expanduser(filename), 'w')
cache.write(json_data)
cache.close()
return data
def read(filename):
cache = open(os.path.expanduser(filename), 'r')
json_inventory = cache.read()
return json.loads(json_inventory)
| true | true |
1c37cc8897a039705453174914ac87e0aa6dd676 | 6,216 | py | Python | lib/googlecloudsdk/api_lib/transfer/operations_util.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | lib/googlecloudsdk/api_lib/transfer/operations_util.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/api_lib/transfer/operations_util.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 1 | 2020-07-25T01:40:19.000Z | 2020-07-25T01:40:19.000Z | # -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for common operations API interactions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import encoding
from googlecloudsdk.api_lib.transfer import jobs_util
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.command_lib.transfer import name_util
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_attr
from googlecloudsdk.core.console import progress_tracker
from googlecloudsdk.core.util import retry
from googlecloudsdk.core.util import scaled_integer
_LAST_RETRIAL = -1
_UNKNOWN_VALUE = 'UNKNOWN'
def _get_operation_to_poll(job_name, operation_name):
"""Returns operation name or last operation of job name."""
if (not job_name and not operation_name) or (job_name and operation_name):
raise ValueError(
'job_name or operation_name must be provided but not both.')
if job_name:
latest_operation_name = jobs_util.block_until_operation_created(job_name)
log.status.Print('Latest Operation: {}'.format(latest_operation_name))
return latest_operation_name
return operation_name
def _is_operation_in_progress(result, retryer_state):
"""Takes Operation Apitools object and returns if it is not marked done."""
del retryer_state # Unused.
return not result.done
def api_get(name):
"""Returns operation details from API as Apitools object."""
client = apis.GetClientInstance('storagetransfer', 'v1')
messages = apis.GetMessagesModule('storagetransfer', 'v1')
formatted_operation_name = name_util.add_operation_prefix(name)
return client.transferOperations.Get(
messages.StoragetransferTransferOperationsGetRequest(
name=formatted_operation_name))
def block_until_done(job_name=None, operation_name=None):
"""Does not return until API responds that operation is done.
Args:
job_name (str|None): If provided, poll job's last operation.
operation_name (str|None): Poll this operation name.
Raises:
ValueError: One of job_name or operation_name must be provided.
"""
polling_operation_name = _get_operation_to_poll(job_name, operation_name)
with progress_tracker.ProgressTracker(
message='Waiting for operation to complete'):
retry.Retryer().RetryOnResult(
api_get,
args=[polling_operation_name],
should_retry_if=_is_operation_in_progress,
sleep_ms=(
properties.VALUES.transfer.no_async_polling_interval_ms.GetInt()),
)
def _print_progress(operation, retryer_state):
"""Gets operation from API and prints its progress updating in-place."""
metadata = encoding.MessageToDict(operation.metadata)
if 'counters' in metadata:
skipped_bytes = int(metadata['counters'].get('bytesFromSourceSkippedBySync',
0))
skipped_string = scaled_integer.FormatBinaryNumber(
skipped_bytes, decimal_places=1)
copied_bytes = int(metadata['counters'].get('bytesCopiedToSink', 0))
total_bytes = int(metadata['counters'].get('bytesFoundFromSource', 0))
if total_bytes:
progress_percent = int(round(copied_bytes / total_bytes, 2) * 100)
else:
progress_percent = 0
progress_string = '{}% ({} of {})'.format(
progress_percent,
scaled_integer.FormatBinaryNumber(copied_bytes, decimal_places=1),
scaled_integer.FormatBinaryNumber(total_bytes, decimal_places=1))
else:
progress_string = 'Progress: {}'.format(_UNKNOWN_VALUE)
skipped_string = _UNKNOWN_VALUE
if 'errorBreakdowns' in metadata:
error_count = sum(
[int(error['errorCount']) for error in metadata['errorBreakdowns']])
else:
error_count = 0
spin_marks = console_attr.ProgressTrackerSymbolsAscii().spin_marks
if retryer_state.retrial == _LAST_RETRIAL:
spin_mark = ''
else:
spin_mark = spin_marks[retryer_state.retrial % len(spin_marks)]
log.status.write(('{} | {} | Skipped: {} | Errors: {} {}\r').format(
metadata['status'], progress_string, skipped_string, error_count,
spin_mark))
def _poll_progress(name):
"""Prints progress of operation and blocks until transfer is complete.
Args:
name (str|None): Poll this operation name.
Returns:
Apitools Operation object containing the completed operation's metadata.
"""
complete_operation = retry.Retryer(
jitter_ms=0, status_update_func=_print_progress).RetryOnResult(
api_get,
args=[name],
should_retry_if=_is_operation_in_progress,
sleep_ms=1000)
_print_progress(
complete_operation,
retry.RetryerState(
retrial=_LAST_RETRIAL, time_passed_ms=None, time_to_wait_ms=None))
return complete_operation
def display_monitoring_view(name):
"""Prints and updates operation statistics, blocking until copy complete."""
initial_operation = api_get(name)
initial_metadata = encoding.MessageToDict(initial_operation.metadata)
log.status.Print('Operation name: ' +
name_util.remove_operation_prefix(initial_operation.name))
log.status.Print(
'Parent job: ' +
name_util.remove_job_prefix(initial_metadata['transferJobName']))
if 'startTime' in initial_metadata:
log.status.Print('Start time: ' + initial_metadata['startTime'])
final_operation = _poll_progress(initial_operation.name)
final_metadata = encoding.MessageToDict(final_operation.metadata)
if 'endTime' in final_metadata:
log.status.Print('\nEnd time: ' + final_metadata['endTime'])
| 36.350877 | 80 | 0.741956 |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import encoding
from googlecloudsdk.api_lib.transfer import jobs_util
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.command_lib.transfer import name_util
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_attr
from googlecloudsdk.core.console import progress_tracker
from googlecloudsdk.core.util import retry
from googlecloudsdk.core.util import scaled_integer
_LAST_RETRIAL = -1
_UNKNOWN_VALUE = 'UNKNOWN'
def _get_operation_to_poll(job_name, operation_name):
if (not job_name and not operation_name) or (job_name and operation_name):
raise ValueError(
'job_name or operation_name must be provided but not both.')
if job_name:
latest_operation_name = jobs_util.block_until_operation_created(job_name)
log.status.Print('Latest Operation: {}'.format(latest_operation_name))
return latest_operation_name
return operation_name
def _is_operation_in_progress(result, retryer_state):
del retryer_state
return not result.done
def api_get(name):
client = apis.GetClientInstance('storagetransfer', 'v1')
messages = apis.GetMessagesModule('storagetransfer', 'v1')
formatted_operation_name = name_util.add_operation_prefix(name)
return client.transferOperations.Get(
messages.StoragetransferTransferOperationsGetRequest(
name=formatted_operation_name))
def block_until_done(job_name=None, operation_name=None):
polling_operation_name = _get_operation_to_poll(job_name, operation_name)
with progress_tracker.ProgressTracker(
message='Waiting for operation to complete'):
retry.Retryer().RetryOnResult(
api_get,
args=[polling_operation_name],
should_retry_if=_is_operation_in_progress,
sleep_ms=(
properties.VALUES.transfer.no_async_polling_interval_ms.GetInt()),
)
def _print_progress(operation, retryer_state):
metadata = encoding.MessageToDict(operation.metadata)
if 'counters' in metadata:
skipped_bytes = int(metadata['counters'].get('bytesFromSourceSkippedBySync',
0))
skipped_string = scaled_integer.FormatBinaryNumber(
skipped_bytes, decimal_places=1)
copied_bytes = int(metadata['counters'].get('bytesCopiedToSink', 0))
total_bytes = int(metadata['counters'].get('bytesFoundFromSource', 0))
if total_bytes:
progress_percent = int(round(copied_bytes / total_bytes, 2) * 100)
else:
progress_percent = 0
progress_string = '{}% ({} of {})'.format(
progress_percent,
scaled_integer.FormatBinaryNumber(copied_bytes, decimal_places=1),
scaled_integer.FormatBinaryNumber(total_bytes, decimal_places=1))
else:
progress_string = 'Progress: {}'.format(_UNKNOWN_VALUE)
skipped_string = _UNKNOWN_VALUE
if 'errorBreakdowns' in metadata:
error_count = sum(
[int(error['errorCount']) for error in metadata['errorBreakdowns']])
else:
error_count = 0
spin_marks = console_attr.ProgressTrackerSymbolsAscii().spin_marks
if retryer_state.retrial == _LAST_RETRIAL:
spin_mark = ''
else:
spin_mark = spin_marks[retryer_state.retrial % len(spin_marks)]
log.status.write(('{} | {} | Skipped: {} | Errors: {} {}\r').format(
metadata['status'], progress_string, skipped_string, error_count,
spin_mark))
def _poll_progress(name):
complete_operation = retry.Retryer(
jitter_ms=0, status_update_func=_print_progress).RetryOnResult(
api_get,
args=[name],
should_retry_if=_is_operation_in_progress,
sleep_ms=1000)
_print_progress(
complete_operation,
retry.RetryerState(
retrial=_LAST_RETRIAL, time_passed_ms=None, time_to_wait_ms=None))
return complete_operation
def display_monitoring_view(name):
initial_operation = api_get(name)
initial_metadata = encoding.MessageToDict(initial_operation.metadata)
log.status.Print('Operation name: ' +
name_util.remove_operation_prefix(initial_operation.name))
log.status.Print(
'Parent job: ' +
name_util.remove_job_prefix(initial_metadata['transferJobName']))
if 'startTime' in initial_metadata:
log.status.Print('Start time: ' + initial_metadata['startTime'])
final_operation = _poll_progress(initial_operation.name)
final_metadata = encoding.MessageToDict(final_operation.metadata)
if 'endTime' in final_metadata:
log.status.Print('\nEnd time: ' + final_metadata['endTime'])
| true | true |
1c37cc960048a6de7328e5484c890c05c68dd0cc | 1,325 | py | Python | Chapter05/virtualenvs/myproject_env/project/django-myproject/myproject/urls.py | PacktPublishing/Django-2-Web-Development-Cookbook-Third-Edition | f129613e2b1d00f5c76649025ae4d568f6286f2c | [
"MIT"
] | 75 | 2018-12-03T02:35:29.000Z | 2021-11-08T13:13:34.000Z | Chapter05/virtualenvs/myproject_env/project/django-myproject/myproject/urls.py | PacktPublishing/Django-2-Web-Development-Cookbook-Third-Edition | f129613e2b1d00f5c76649025ae4d568f6286f2c | [
"MIT"
] | 3 | 2019-08-11T13:35:01.000Z | 2020-09-29T06:52:36.000Z | Chapter04/virtualenvs/myproject_env/project/django-myproject/myproject/urls.py | PacktPublishing/Django-2-Web-Development-Cookbook-Third-Edition | f129613e2b1d00f5c76649025ae4d568f6286f2c | [
"MIT"
] | 45 | 2018-11-03T14:03:22.000Z | 2021-08-25T07:39:33.000Z | """myproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf.urls.i18n import i18n_patterns
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
path('bulletins/', include('bulletin_board.urls')),
path('cv/', include('cv.urls')),
path('email/', include('email_messages.urls')),
path('like/', include('likes.urls')),
path('locations/', include('locations.urls')),
path('movies/', include('movies.urls')),
path('quotes/', include('quotes.urls')),
]
urlpatterns += i18n_patterns(
path('search/', include('haystack.urls')),
path("js-settings/", render_js,
{"template_name": "settings.js"},
name="js_settings"),
)
| 35.810811 | 77 | 0.676981 | from django.contrib import admin
from django.conf.urls.i18n import i18n_patterns
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
path('bulletins/', include('bulletin_board.urls')),
path('cv/', include('cv.urls')),
path('email/', include('email_messages.urls')),
path('like/', include('likes.urls')),
path('locations/', include('locations.urls')),
path('movies/', include('movies.urls')),
path('quotes/', include('quotes.urls')),
]
urlpatterns += i18n_patterns(
path('search/', include('haystack.urls')),
path("js-settings/", render_js,
{"template_name": "settings.js"},
name="js_settings"),
)
| true | true |
1c37cce45e3f42c0c9228dc0917458e271696929 | 2,932 | py | Python | src/sentry/lang/native/utils.py | learninto/sentry | 4f9f564841498b3af49c1677d6b61f3e47b01923 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/lang/native/utils.py | learninto/sentry | 4f9f564841498b3af49c1677d6b61f3e47b01923 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/lang/native/utils.py | learninto/sentry | 4f9f564841498b3af49c1677d6b61f3e47b01923 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
import re
import six
import logging
from sentry.stacktraces.processing import find_stacktraces_in_data
from sentry.utils.safe import get_path
logger = logging.getLogger(__name__)
# Regex to parse OS versions from a minidump OS string.
VERSION_RE = re.compile(r"(\d+\.\d+\.\d+)\s+(.*)")
# Regex to guess whether we're dealing with Windows or Unix paths.
WINDOWS_PATH_RE = re.compile(r"^([a-z]:\\|\\\\)", re.IGNORECASE)
# Event platforms that could contain native stacktraces
NATIVE_PLATFORMS = ("cocoa", "native")
# Debug image types that can be handled by the symbolicator
NATIVE_IMAGE_TYPES = (
"apple", # Deprecated in favor of "macho"
"symbolic", # Generic if type is not known
"elf", # Linux
"macho", # macOS, iOS
"pe", # Windows
)
def is_native_platform(platform):
return platform in NATIVE_PLATFORMS
def is_native_image(image):
return (
bool(image)
and image.get("type") in NATIVE_IMAGE_TYPES
and image.get("image_addr") is not None
and image.get("image_size") is not None
and (image.get("debug_id") or image.get("id") or image.get("uuid")) is not None
)
def native_images_from_data(data):
return get_path(data, "debug_meta", "images", default=(), filter=is_native_image)
def is_native_event(data):
if is_native_platform(data.get("platform")):
return True
for stacktrace in find_stacktraces_in_data(data):
if any(is_native_platform(x) for x in stacktrace.platforms):
return True
return False
def is_minidump_event(data):
exceptions = get_path(data, "exception", "values", filter=True)
return get_path(exceptions, 0, "mechanism", "type") in ("minidump", "unreal")
def image_name(pkg):
if not pkg:
return pkg
split = "\\" if WINDOWS_PATH_RE.match(pkg) else "/"
return pkg.rsplit(split, 1)[-1]
def get_sdk_from_event(event):
sdk_info = get_path(event, "debug_meta", "sdk_info")
if sdk_info:
return sdk_info
os = get_path(event, "contexts", "os")
if os and os.get("type") == "os":
return get_sdk_from_os(os)
def get_sdk_from_os(data):
if data.get("name") is None or data.get("version") is None:
return
try:
version = six.text_type(data["version"]).split("-", 1)[0] + ".0" * 3
system_version = tuple(int(x) for x in version.split(".")[:3])
except ValueError:
return
return {
"sdk_name": data["name"],
"version_major": system_version[0],
"version_minor": system_version[1],
"version_patchlevel": system_version[2],
"build": data.get("build"),
}
def signal_from_data(data):
exceptions = get_path(data, "exception", "values", filter=True)
signal = get_path(exceptions, 0, "mechanism", "meta", "signal", "number")
if signal is not None:
return int(signal)
return None
| 27.148148 | 87 | 0.658595 | from __future__ import absolute_import
import re
import six
import logging
from sentry.stacktraces.processing import find_stacktraces_in_data
from sentry.utils.safe import get_path
logger = logging.getLogger(__name__)
VERSION_RE = re.compile(r"(\d+\.\d+\.\d+)\s+(.*)")
WINDOWS_PATH_RE = re.compile(r"^([a-z]:\\|\\\\)", re.IGNORECASE)
# Event platforms that could contain native stacktraces
NATIVE_PLATFORMS = ("cocoa", "native")
# Debug image types that can be handled by the symbolicator
NATIVE_IMAGE_TYPES = (
"apple", # Deprecated in favor of "macho"
"symbolic", # Generic if type is not known
"elf", # Linux
"macho", # macOS, iOS
"pe", # Windows
)
def is_native_platform(platform):
return platform in NATIVE_PLATFORMS
def is_native_image(image):
return (
bool(image)
and image.get("type") in NATIVE_IMAGE_TYPES
and image.get("image_addr") is not None
and image.get("image_size") is not None
and (image.get("debug_id") or image.get("id") or image.get("uuid")) is not None
)
def native_images_from_data(data):
return get_path(data, "debug_meta", "images", default=(), filter=is_native_image)
def is_native_event(data):
if is_native_platform(data.get("platform")):
return True
for stacktrace in find_stacktraces_in_data(data):
if any(is_native_platform(x) for x in stacktrace.platforms):
return True
return False
def is_minidump_event(data):
exceptions = get_path(data, "exception", "values", filter=True)
return get_path(exceptions, 0, "mechanism", "type") in ("minidump", "unreal")
def image_name(pkg):
if not pkg:
return pkg
split = "\\" if WINDOWS_PATH_RE.match(pkg) else "/"
return pkg.rsplit(split, 1)[-1]
def get_sdk_from_event(event):
sdk_info = get_path(event, "debug_meta", "sdk_info")
if sdk_info:
return sdk_info
os = get_path(event, "contexts", "os")
if os and os.get("type") == "os":
return get_sdk_from_os(os)
def get_sdk_from_os(data):
if data.get("name") is None or data.get("version") is None:
return
try:
version = six.text_type(data["version"]).split("-", 1)[0] + ".0" * 3
system_version = tuple(int(x) for x in version.split(".")[:3])
except ValueError:
return
return {
"sdk_name": data["name"],
"version_major": system_version[0],
"version_minor": system_version[1],
"version_patchlevel": system_version[2],
"build": data.get("build"),
}
def signal_from_data(data):
exceptions = get_path(data, "exception", "values", filter=True)
signal = get_path(exceptions, 0, "mechanism", "meta", "signal", "number")
if signal is not None:
return int(signal)
return None
| true | true |
1c37cd0577c3baf926e58ab3b3f22495cf34232b | 31 | py | Python | arranger/closest/__init__.py | dezimynona/icml2021submission | 009eb6c6b617536bda7a247cbf5d6b7c0c131f19 | [
"MIT"
] | 1 | 2021-07-11T17:20:02.000Z | 2021-07-11T17:20:02.000Z | arranger/closest/__init__.py | dezimynona/icml2021submission | 009eb6c6b617536bda7a247cbf5d6b7c0c131f19 | [
"MIT"
] | null | null | null | arranger/closest/__init__.py | dezimynona/icml2021submission | 009eb6c6b617536bda7a247cbf5d6b7c0c131f19 | [
"MIT"
] | 1 | 2021-02-03T19:22:27.000Z | 2021-02-03T19:22:27.000Z | """Closest-pitch algorithm."""
| 15.5 | 30 | 0.677419 | true | true | |
1c37cdca199ced6425daca6cc92bee848283a476 | 698 | py | Python | sdk/python-sdk/setup.py | tw-bc-group/verity-sdk | e932209ab849f04a389bdda0718cd6227187e5cf | [
"Apache-2.0"
] | null | null | null | sdk/python-sdk/setup.py | tw-bc-group/verity-sdk | e932209ab849f04a389bdda0718cd6227187e5cf | [
"Apache-2.0"
] | 2 | 2021-09-02T19:02:06.000Z | 2021-09-02T19:02:24.000Z | sdk/python-sdk/setup.py | tw-bc-group/verity-sdk | e932209ab849f04a389bdda0718cd6227187e5cf | [
"Apache-2.0"
] | 1 | 2021-01-13T10:43:14.000Z | 2021-01-13T10:43:14.000Z | import setuptools
from verity_sdk.version import VERSION
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name="verity-sdk",
version=VERSION, # see verity_sdk/version.py
author="Evernym, Inc.",
author_email="dev@evernym.com",
description='The official Python SDK for Evernym\'s Verity',
license="Apache-2.0",
url="https://github.com/evernym/verity-sdk",
install_requires=[
'python3-indy~=1.15.0',
'requests~=2.22',
'base58~=2.0.0'
],
python_requires='~=3.6',
long_description=long_description,
long_description_content_type='text/markdown',
packages=setuptools.find_packages(),
)
| 25.851852 | 64 | 0.667622 | import setuptools
from verity_sdk.version import VERSION
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name="verity-sdk",
version=VERSION,
author="Evernym, Inc.",
author_email="dev@evernym.com",
description='The official Python SDK for Evernym\'s Verity',
license="Apache-2.0",
url="https://github.com/evernym/verity-sdk",
install_requires=[
'python3-indy~=1.15.0',
'requests~=2.22',
'base58~=2.0.0'
],
python_requires='~=3.6',
long_description=long_description,
long_description_content_type='text/markdown',
packages=setuptools.find_packages(),
)
| true | true |
1c37ce6927cbfac725faaa0c541bb5f99388ebb2 | 1,428 | py | Python | utils/average_plate.py | khainn/ALPR_System | 73d8adc6bc7cb91507d1bf047ded20be844923ef | [
"Apache-2.0"
] | 71 | 2019-01-04T03:42:04.000Z | 2022-03-28T16:38:58.000Z | utils/average_plate.py | khainn/ALPR_System | 73d8adc6bc7cb91507d1bf047ded20be844923ef | [
"Apache-2.0"
] | 14 | 2019-11-05T18:20:05.000Z | 2022-02-10T00:30:54.000Z | utils/average_plate.py | khainn/ALPR_System | 73d8adc6bc7cb91507d1bf047ded20be844923ef | [
"Apache-2.0"
] | 38 | 2019-03-26T09:05:20.000Z | 2022-03-25T12:46:36.000Z | """ Gets the recognized plate in several frames and calculates the most possible plate value """
import math
from collections import Counter
def getDistance(pointA, pointB):
"""
calculates the distance between two points in the image
"""
return math.sqrt(math.pow((pointA[0] - pointB[0]), 2) + math.pow((pointA[1] - pointB[1]), 2))
def tracking(previous_coordinate, current_coordinate):
distance = getDistance(previous_coordinate, current_coordinate)
return distance
def get_average_plate_value(plates, plates_length):
"""
inputs an array of plates and returns the most possible value (average value) of the array
"""
# plates_length is an array containing the number of characters detected on each plate in plate array
plates_to_be_considered = []
number_char_on_plate = Counter(plates_length).most_common(1)[0][0]
for plate in plates:
if (len(plate) == number_char_on_plate):
plates_to_be_considered.append(plate)
temp = ''
for plate in plates_to_be_considered:
temp = temp + plate
counter = 0
final_plate = ''
for i in range(number_char_on_plate):
my_list = []
for i in range(len(plates_to_be_considered)):
my_list.append(temp[i*number_char_on_plate + counter])
final_plate = final_plate + str(Counter(my_list).most_common(1)[0][0])
counter += 1
return final_plate | 34 | 105 | 0.691877 |
import math
from collections import Counter
def getDistance(pointA, pointB):
return math.sqrt(math.pow((pointA[0] - pointB[0]), 2) + math.pow((pointA[1] - pointB[1]), 2))
def tracking(previous_coordinate, current_coordinate):
distance = getDistance(previous_coordinate, current_coordinate)
return distance
def get_average_plate_value(plates, plates_length):
plates_to_be_considered = []
number_char_on_plate = Counter(plates_length).most_common(1)[0][0]
for plate in plates:
if (len(plate) == number_char_on_plate):
plates_to_be_considered.append(plate)
temp = ''
for plate in plates_to_be_considered:
temp = temp + plate
counter = 0
final_plate = ''
for i in range(number_char_on_plate):
my_list = []
for i in range(len(plates_to_be_considered)):
my_list.append(temp[i*number_char_on_plate + counter])
final_plate = final_plate + str(Counter(my_list).most_common(1)[0][0])
counter += 1
return final_plate | true | true |
1c37cf8f0e69896397ea2c6e841faf4fa2ce3678 | 7,883 | py | Python | main.py | namansnghl/goblin-hunter-pygame | c740549ff70e4233d3c17558a5a6e62f0025a3cf | [
"MIT"
] | null | null | null | main.py | namansnghl/goblin-hunter-pygame | c740549ff70e4233d3c17558a5a6e62f0025a3cf | [
"MIT"
] | null | null | null | main.py | namansnghl/goblin-hunter-pygame | c740549ff70e4233d3c17558a5a6e62f0025a3cf | [
"MIT"
] | null | null | null | import pygame
pygame.init()
screen_width = 987
screen_height = 598
# setting window size.
win = pygame.display.set_mode((screen_width, screen_height))
# game window title
pygame.display.set_caption("Goblin Hunter")
# loading character movements
bg = pygame.image.load('img/bg.jpg')
idle = pygame.image.load('img/standing.png')
class player:
'''
Creates the main character of game
Object params:
x ---> initial x coordinate of player
y ---> initial y coordinate of player
width ---> width of player
height ---> height of player
jump_height ---> jumping height of player. Default 10
vel ---> Velocity of player. Default 8
Methods:
draw() ---> draws the character movement at location
'''
walkRight = list(map(pygame.image.load,
'{folder}R1.png {folder}R2.png {folder}R3.png {folder}R4.png {folder}R5.png {folder}R6.png {folder}R7.png {folder}R8.png {folder}R9.png'.format(folder='img/').split()))
walkLeft = list(map(pygame.image.load,
'{folder}L1.png {folder}L2.png {folder}L3.png {folder}L4.png {folder}L5.png {folder}L6.png {folder}L7.png {folder}L8.png {folder}L9.png'.format(folder='img/').split()))
def __init__(self, x, y, width, height, jump_height=10, vel=8):
self.x = x
self.y = y
self.width = width
self.height = height
self.vel = vel
self.left = False
self.right = False
self.walkCount = 0
self.jumpcount = jump_height
self.jump = False
self.standing = True
self.hitbox = (self.x+17, self.y+11, 29, 52)
def draw(self, win):
# since we have only 9 sprites/movement we set 3 frame = 1 image and walkCount <= 27
if (self.walkCount+1) >= 27:
self.walkCount = 0
if not(self.standing):
# logic to choose character image as per movements
if self.left:
win.blit(self.walkLeft[self.walkCount//3], (int(self.x), int(self.y)))
self.walkCount += 1
elif self.right:
win.blit(self.walkRight[self.walkCount//3], (int(self.x), int(self.y)))
self.walkCount += 1
else:
if self.right:
win.blit(self.walkRight[0], (int(self.x), int(self.y)))
else:
win.blit(self.walkLeft[0], (int(self.x), int(self.y)))
self.hitbox = (self.x+17, self.y+11, 29, 52)
pygame.draw.rect(win, (255, 0, 150), tuple(map(int, self.hitbox)), 2)
class fire_bullet:
'''
Creates the bullets fired
Object params:
x ---> initial x coordinate
y ---> initial y coordinate
radius ---> bullet size
color ---> bullet color
facing ---> direction player is facing while shooting. -1 = left, +1 = right
vel ---> Velocity of bullet. Default 10
Methods:
draw() ---> makes bullet animation
'''
def __init__(self, x, y, radius, color, facing, vel=10):
self.x, self.y, self.facing = x, y, facing
self.color = color
self.radius = radius
self.vel = vel*facing
def draw(self, win):
pygame.draw.circle(win, self.color, (self.x, self.y), self.radius)
class enemy:
'''
Creates the main character of game
Object params:
x ---> initial x coordinate of enemy
y ---> initial y coordinate of enemy
width ---> width of enemy
height ---> height of enemy
end ---> right end position of enemy. Default 10
vel ---> Velocity of enemy. Default 4
Methods:
draw() ---> draws the character movement at location
move() ---> enemy movement direction logic
'''
walkRight = list(map(pygame.image.load,
'{folder}R1E.png {folder}R2E.png {folder}R3E.png {folder}R4E.png {folder}R5E.png {folder}R6E.png {folder}R7E.png {folder}R8E.png {folder}R9E.png {folder}R10E.png {folder}R11E.png'.format(folder='img/').split()))
walkLeft = list(map(pygame.image.load,
'{folder}L1E.png {folder}L2E.png {folder}L3E.png {folder}L4E.png {folder}L5E.png {folder}L6E.png {folder}L7E.png {folder}L8E.png {folder}L9E.png {folder}L10E.png {folder}L11E.png'.format(folder='img/').split()))
def __init__(self, x, y, width, height, end, vel=4):
self.x = x
self.y = y
self.width = width
self.height = height
self.end = end
self.vel = vel
self.walkCount = 0
self.path = [self.x, self.end]
self.hitbox = (self.x+17, self.y+5, 30, 53)
def draw(self, win):
self.move()
if (self.walkCount+1) >= 33:
self.walkCount = 0
if self.vel > 0:
win.blit(self.walkRight[self.walkCount//3], (int(self.x), int(self.y)))
self.walkCount += 1
else:
win.blit(self.walkLeft[self.walkCount//3], (int(self.x), int(self.y)))
self.walkCount += 1
self.hitbox = (self.x+17, self.y+5, 30, 53)
pygame.draw.rect(win, (255, 0, 150), self.hitbox, 2)
def move(self):
if self.x+self.vel in range(self.path[0], self.path[1]):
self.x += self.vel
else:
self.vel = self.vel * -1
self.walkCount = 0
def hit(self):
print('hit')
def redrawGameWindow(): # function to draw objects on window
win.blit(bg, (0, 0))
hero.draw(win) # hero
goblin.draw(win)
for bullet in bullets: # bullets
bullet.draw(win)
pygame.display.update()
clock = pygame.time.Clock()
jump_height = 9
hero = player(20, 416, 64, 64, jump_height, vel=6)
goblin = enemy(0, 420, 64, 64, screen_width-50, vel=3)
run = True
shoot = 0
bullets = []
# game begins
while run:
clock.tick(27) # 27fps. 9 images per movement. 1 move = 3 frames
for event in pygame.event.get(): # Exiting game
if event.type == pygame.QUIT:
run = False
keys = pygame.key.get_pressed()
if shoot > 0:
shoot += 1
if shoot > 5:
shoot = 0
for bullet in bullets:
# bullet hits goblin logic
if bullet.y-bullet.radius < goblin.hitbox[1]+goblin.hitbox[3] and bullet.y+bullet.radius > goblin.hitbox[1]:
if bullet.x-bullet.radius < goblin.hitbox[0]+goblin.hitbox[2] and bullet.x+bullet.radius > goblin.hitbox[0]:
goblin.hit()
bullets.pop(bullets.index(bullet))
if bullet.x in range(0, screen_width):
bullet.x += bullet.vel
else: # delete when beyond screen
bullets.pop(bullets.index(bullet))
if keys[pygame.K_SPACE] and len(bullets) < 6 and shoot == 0: # bullet creation
if hero.left:
facing = -1
else:
facing = 1
bullets.append(fire_bullet(
round(hero.x+hero.width//2),
round(hero.y+hero.height//2),
5, (252, 177, 3), facing, vel=7))
shoot = 1
if keys[pygame.K_LEFT] and hero.x > 0: # movement control
hero.x -= hero.vel
hero.left, hero.right = True, False
hero.standing = False
elif keys[pygame.K_RIGHT] and hero.x < (screen_width-hero.width-hero.vel):
hero.x += hero.vel
hero.left, hero.right = False, True
hero.standing = False
else:
hero.walkCount = 0
hero.standing = True
if not(hero.jump): # Logic to make a jump and fall back
if keys[pygame.K_UP]:
hero.jump = True
hero.left, hero.right = False, False
hero.walkCount = 0
else:
if hero.jumpcount >= -(jump_height):
neg = 1
if hero.jumpcount < 0:
neg = -1
hero.y -= (hero.jumpcount**2)*0.5*neg
hero.jumpcount -= 1
else:
hero.jump = False
hero.jumpcount = jump_height
redrawGameWindow()
pygame.quit()
| 32.709544 | 236 | 0.578714 | import pygame
pygame.init()
screen_width = 987
screen_height = 598
win = pygame.display.set_mode((screen_width, screen_height))
pygame.display.set_caption("Goblin Hunter")
bg = pygame.image.load('img/bg.jpg')
idle = pygame.image.load('img/standing.png')
class player:
walkRight = list(map(pygame.image.load,
'{folder}R1.png {folder}R2.png {folder}R3.png {folder}R4.png {folder}R5.png {folder}R6.png {folder}R7.png {folder}R8.png {folder}R9.png'.format(folder='img/').split()))
walkLeft = list(map(pygame.image.load,
'{folder}L1.png {folder}L2.png {folder}L3.png {folder}L4.png {folder}L5.png {folder}L6.png {folder}L7.png {folder}L8.png {folder}L9.png'.format(folder='img/').split()))
def __init__(self, x, y, width, height, jump_height=10, vel=8):
self.x = x
self.y = y
self.width = width
self.height = height
self.vel = vel
self.left = False
self.right = False
self.walkCount = 0
self.jumpcount = jump_height
self.jump = False
self.standing = True
self.hitbox = (self.x+17, self.y+11, 29, 52)
def draw(self, win):
if (self.walkCount+1) >= 27:
self.walkCount = 0
if not(self.standing):
if self.left:
win.blit(self.walkLeft[self.walkCount//3], (int(self.x), int(self.y)))
self.walkCount += 1
elif self.right:
win.blit(self.walkRight[self.walkCount//3], (int(self.x), int(self.y)))
self.walkCount += 1
else:
if self.right:
win.blit(self.walkRight[0], (int(self.x), int(self.y)))
else:
win.blit(self.walkLeft[0], (int(self.x), int(self.y)))
self.hitbox = (self.x+17, self.y+11, 29, 52)
pygame.draw.rect(win, (255, 0, 150), tuple(map(int, self.hitbox)), 2)
class fire_bullet:
def __init__(self, x, y, radius, color, facing, vel=10):
self.x, self.y, self.facing = x, y, facing
self.color = color
self.radius = radius
self.vel = vel*facing
def draw(self, win):
pygame.draw.circle(win, self.color, (self.x, self.y), self.radius)
class enemy:
walkRight = list(map(pygame.image.load,
'{folder}R1E.png {folder}R2E.png {folder}R3E.png {folder}R4E.png {folder}R5E.png {folder}R6E.png {folder}R7E.png {folder}R8E.png {folder}R9E.png {folder}R10E.png {folder}R11E.png'.format(folder='img/').split()))
walkLeft = list(map(pygame.image.load,
'{folder}L1E.png {folder}L2E.png {folder}L3E.png {folder}L4E.png {folder}L5E.png {folder}L6E.png {folder}L7E.png {folder}L8E.png {folder}L9E.png {folder}L10E.png {folder}L11E.png'.format(folder='img/').split()))
def __init__(self, x, y, width, height, end, vel=4):
self.x = x
self.y = y
self.width = width
self.height = height
self.end = end
self.vel = vel
self.walkCount = 0
self.path = [self.x, self.end]
self.hitbox = (self.x+17, self.y+5, 30, 53)
def draw(self, win):
self.move()
if (self.walkCount+1) >= 33:
self.walkCount = 0
if self.vel > 0:
win.blit(self.walkRight[self.walkCount//3], (int(self.x), int(self.y)))
self.walkCount += 1
else:
win.blit(self.walkLeft[self.walkCount//3], (int(self.x), int(self.y)))
self.walkCount += 1
self.hitbox = (self.x+17, self.y+5, 30, 53)
pygame.draw.rect(win, (255, 0, 150), self.hitbox, 2)
def move(self):
if self.x+self.vel in range(self.path[0], self.path[1]):
self.x += self.vel
else:
self.vel = self.vel * -1
self.walkCount = 0
def hit(self):
print('hit')
def redrawGameWindow():
win.blit(bg, (0, 0))
hero.draw(win)
goblin.draw(win)
for bullet in bullets:
bullet.draw(win)
pygame.display.update()
clock = pygame.time.Clock()
jump_height = 9
hero = player(20, 416, 64, 64, jump_height, vel=6)
goblin = enemy(0, 420, 64, 64, screen_width-50, vel=3)
run = True
shoot = 0
bullets = []
while run:
clock.tick(27)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
keys = pygame.key.get_pressed()
if shoot > 0:
shoot += 1
if shoot > 5:
shoot = 0
for bullet in bullets:
if bullet.y-bullet.radius < goblin.hitbox[1]+goblin.hitbox[3] and bullet.y+bullet.radius > goblin.hitbox[1]:
if bullet.x-bullet.radius < goblin.hitbox[0]+goblin.hitbox[2] and bullet.x+bullet.radius > goblin.hitbox[0]:
goblin.hit()
bullets.pop(bullets.index(bullet))
if bullet.x in range(0, screen_width):
bullet.x += bullet.vel
else:
bullets.pop(bullets.index(bullet))
if keys[pygame.K_SPACE] and len(bullets) < 6 and shoot == 0:
if hero.left:
facing = -1
else:
facing = 1
bullets.append(fire_bullet(
round(hero.x+hero.width//2),
round(hero.y+hero.height//2),
5, (252, 177, 3), facing, vel=7))
shoot = 1
if keys[pygame.K_LEFT] and hero.x > 0:
hero.x -= hero.vel
hero.left, hero.right = True, False
hero.standing = False
elif keys[pygame.K_RIGHT] and hero.x < (screen_width-hero.width-hero.vel):
hero.x += hero.vel
hero.left, hero.right = False, True
hero.standing = False
else:
hero.walkCount = 0
hero.standing = True
if not(hero.jump):
if keys[pygame.K_UP]:
hero.jump = True
hero.left, hero.right = False, False
hero.walkCount = 0
else:
if hero.jumpcount >= -(jump_height):
neg = 1
if hero.jumpcount < 0:
neg = -1
hero.y -= (hero.jumpcount**2)*0.5*neg
hero.jumpcount -= 1
else:
hero.jump = False
hero.jumpcount = jump_height
redrawGameWindow()
pygame.quit()
| true | true |
1c37cff4afe4b2afdd67d7d83fdde90f400a23b5 | 35,293 | py | Python | python/lvmcam/araviscam/BlackflyCam.py | sdss/lvmcam | c5f421a546a0072a0dbb3d7b2ebc74316f339f64 | [
"BSD-3-Clause"
] | 3 | 2021-11-17T02:40:02.000Z | 2022-03-22T08:30:45.000Z | python/lvmcam/araviscam/BlackflyCam.py | sdss/lvmcam | c5f421a546a0072a0dbb3d7b2ebc74316f339f64 | [
"BSD-3-Clause"
] | 8 | 2021-11-25T10:18:31.000Z | 2021-12-17T13:04:52.000Z | python/lvmcam/araviscam/BlackflyCam.py | sdss/lvmcam | c5f421a546a0072a0dbb3d7b2ebc74316f339f64 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
"""
Python3 class to work with Aravis/GenICam cameras, subclass of sdss-basecam.
.. module:: araviscam
.. moduleauthor:: Richard J. Mathar <mathar@mpia.de>
"""
import sys
import math
import asyncio
import numpy
import astropy
from basecam.mixins import ImageAreaMixIn
from basecam import (
CameraSystem,
BaseCamera,
CameraEvent,
CameraConnectionError,
models,
ExposureError,
)
from lvmcam.actor import modules
# Since the aravis wrapper for GenICam cameras (such as the Blackfly)
# is using glib2 GObjects to represent cameras and streams, the
# PyGObject module allows to call the C functions of aravis in python.
# https://pygobject.readthedocs.io/en/latest/
from lvmcam.araviscam.aravis import Aravis
import basecam.models.card as card
from lvmcam.actor.commands import expose
# https://pypi.org/project/sdss-basecam/
# https://githum.com/sdss/basecam/
# from sdsstools import read_yaml_file
__all__ = ["BlackflyCameraSystem", "BlackflyCamera", "BlackflyImageAreaMixIn"]
class BlackflyCameraSystem(CameraSystem):
"""A collection of GenICam cameras, possibly online
:param camera_class : `.BaseCamera` subclass
The subclass of `.BaseCamera` to use with this camera system.
:param camera_config :
A dictionary with the configuration parameters for the multiple
cameras that can be present in the system, or the path to a YAML file.
Refer to the documentation for details on the accepted format.
:type camera_config : dict or path
:param include : List of camera UIDs that can be connected.
:type include : list
:param exclude : list
List of camera UIDs that will be ignored.
:param logger : ~logging.Logger
The logger instance to use. If `None`, a new logger will be created.
:param log_header : A string to be prefixed to each message logged.
:type log_header : str
:param log_file : The path to which to log.
:type log_file : str
:param verbose : Whether to log to stdout.
:type verbose : bool
:param ip_list: A list of IP-Adresses to be checked/pinged.
:type ip_list: List of strings.
"""
__version__ = "0.0.301"
# A list of ip addresses in the usual "xxx.yyy.zzz.ttt" or "name.subnet.net"
# format that have been added manually/explicitly and may not be found by the
# usual broadcase auto-detection (i.e., possibly on some other global network).
ips_nonlocal = []
def __init__(
self,
camera_class=None,
camera_config=None,
include=None,
exclude=None,
logger=None,
log_header=None,
log_file=None,
verbose=False,
ip_list=None,
):
super().__init__(
camera_class=camera_class,
camera_config=camera_config,
include=include,
exclude=exclude,
logger=logger,
log_header=log_header,
log_file=log_file,
verbose=verbose,
)
# If the ctor is fed with an explicit list of IP addresses, add them to
# the scanner (with delayed inspection in list_available_cameras).
if ip_list is not None:
self.ips_nonlocal.extend(ip_list)
# debuging: print yaml configuration
# print(self._config)
# @modules.timeit
def list_available_cameras(self):
"""Gather serial numbers of online Aravis/Genicam devices.
:return: a list of serial numbers (as strings). This list may be
empty if no cameras are online/switched on.
For cameras explicitly addressed by IP, the serial
numbers have the format sn@ip, with an @ between number and address.
:rtype: list
.. todo:: optionally implement a specific filter for Blackfly's if Basler
cameras should not be listed.
"""
# Start with (pessimistic) initially empty set of online devices
serialNums = []
addrs = []
# Broadcast ethernet/bus for recognized cameras.
# Warning/todo: this gathers also cameras that are not of the Blackfly class,
# and in conjunction with the SDSS may also recognize the Basler cameras..
Aravis.update_device_list()
Ndev = Aravis.get_n_devices()
# print(str(Ndev) + " cameras online")
# get_device_id returns a string of type, SN, MAC etc
for i in range(Ndev):
cam = Aravis.Camera.new(Aravis.get_device_id(i))
uid = cam.get_string("DeviceSerialNumber")
serialNums.append(uid)
addrs.append("")
# Try to ping cameras explicitly proposed with ctor.
for ip in self.ips_nonlocal:
try:
cam = Aravis.Camera.new(ip)
uid = cam.get_string("DeviceSerialNumber")
# If is this was already in the scan: discard, else add
if uid not in serialNums:
serialNums.append(uid)
addrs.append("@" + ip)
except:
# apparently no such camera at this address....
pass
# we zip the two lists to the format 'serialnumber{@ip}'
ids = []
for cam in range(len(serialNums)):
ids.append(serialNums[cam] + addrs[cam])
return ids
from basecam.models.builtin import basic_fz_fits_model
class BlackflyCamera(BaseCamera):
"""A FLIR (formerly Point Grey Research) Blackfly camera.
Given the pixel scale on the benches of LVMi and the assumption
of 9 um pixel sizes of the LVMi cameras, we assume that the
cameras have roughly 1 arsec per pixel, so they are used without binning.
In addition we let the camera flip the standard image orientation of the data
values assuming that values are stored into a FITS interface (where
the first values in the sequential data are the bottom row).
So this is not done in this python code but by the camera.
"""
# fits_model=basic_fz_fits_model
def __init__(
self,
uid,
camera_system,
name=None,
force=False,
image_namer=None,
camera_params={},
):
super().__init__(
uid=uid,
camera_system=camera_system,
name=name,
force=force,
image_namer=image_namer,
camera_params=camera_params,
)
self.header = []
@modules.atimeit
async def _connect_internal(self, **kwargs):
"""Connect to a camera and upload basic binning and ROI parameters.
:param kwargs: recognizes the key uid with integer value, the serial number
If the key uid is absent, tries to attach to the first camera.
This is a subdictionary of 'cameras' in practise.
"""
# print(self.name)
# search for an optional uid key in the arguments
try:
uid = kwargs["uid"]
except:
uid = None
# reverse lookup of the uid in the list of known cameras
cs = BlackflyCameraSystem(BlackflyCamera)
slist = cs.list_available_cameras()
if uid is None:
# uid was not specified: grab the first device that is found
# print("no uid provided, attaching to first camera")
idx = 0
else:
# print("searching " + uid + " in " + str(slist) )
idx = -1
for id in slist:
# remove the optional ip address of the id
slistuid = id.split("@")[0]
if slistuid == uid:
idx = slist.index(id)
# not found
if idx < 0:
raise CameraConnectionError("SN " + uid + " not connected")
cam = None
try:
if "@" in slist[idx]:
# if the camera was not on local network use the address part
cam = Aravis.Camera.new(slist[idx].split("@")[1])
else:
# otherwise the index is the same as the search order...
cam = Aravis.Camera.new(Aravis.get_device_id(idx))
except:
raise CameraConnectionError(" not connected")
# search for an optional gain key in the arguments
# todo: one could interpret gain=0 here as to call set_gain_auto(ARV_AUTO_ON)
try:
gain = kwargs["gain"]
if gain > 0.0:
# todo: it might make sense to squeeze this into the minimum
# and maximum range of the camera's gain if outside that range.
self.device.set_gain_auto(0)
cam.set_gain(gain)
except Exception as ex:
# print("failed to set gain " + str(ex))
pass
# see arvenums.h for the list of pixel formats. This is MONO_16 here, always
cam.set_pixel_format(0x01100007)
# search for an optional x and y binning factor
try:
var = kwargs["binning"]
cam.set_binning(var[0], var[1])
except Exception as ex:
# print("failed to set binning " + str(ex))
# horizontal and vertical binning set to 1
cam.set_binning(1, 1)
# scan the general list of genicam featured values
# of the four native types
for typp, arvLst in kwargs.items():
if arvLst is not None:
if typp == "bool":
for genkey, genval in arvLst.items():
try:
cam.set_boolean(genkey, int(genval))
except:
# probably a typo in the yaml file... todo: log this
# print("failed for " + str(genkey)+str(genval))
pass
elif typp == "int":
for genkey, genval in arvLst.items():
try:
cam.set_integer(genkey, genval)
except:
# probably a typo in the yaml file... todo: log this
# print("failed for " + str(genkey)+str(genval))
pass
elif typp == "float":
for genkey, genval in arvLst.items():
try:
cam.set_float(genkey, genval)
except:
# probably a typo in the yaml file... todo: log this
# print("failed for " + str(genkey)+str(genval))
pass
elif typp == "string":
for genkey, genval in arvLst.items():
try:
cam.set_string(genkey, genval)
except:
# probably a typo in the yaml file... todo: log this
# print("failed for " + str(genkey)+str(genval))
pass
dev = cam.get_device()
# Take full frames by default (maximizing probability of LVM guide camera
# to find guide stars in the field)
roiBounds = [-1, -1]
try:
roiBounds[0] = dev.get_integer_feature_value("WidthMax")
roiBounds[1] = dev.get_integer_feature_value("HeightMax")
# print(" ROI " + str(roiBounds[0]) + " x " + str(roiBounds[1]) )
cam.set_region(0, 0, roiBounds[0], roiBounds[1])
except Exception as ex:
# print("failed to set ROI " + str(ex))
pass
self.device = cam
self.regionBounds = roiBounds
@modules.atimeit
async def _disconnect_internal(self):
"""Close connection to camera."""
self.device = None
# @modules.atimeit
async def _expose_grabFrame(self, exposure):
"""Read a single unbinned full frame.
The class splits the parent class' exposure into this function and
the part which generates the FITS file, because applications in guiders
are usually only interested in the frame's data, and would not
take the detour of generating a FITS file and reading it back from
disk.
:param exposure: On entry, exposure.exptim is the intended exposure time in [sec]
On exit, exposure.data is the numpy array of the 16bit data
arranged in FITS order (i.e., the data of the bottom row appear first...)
:return: The dictionary with the window location and size (x=,y=,width=,height=)
"""
# To avoid being left over by other programs with no change
# to set the exposure time, we switch the auto=0=off first
self.device.set_exposure_time_auto(0)
# Aravis assumes exptime in micro second integers
exptime_ms = int(0.5 + exposure.exptime * 1e6)
self.device.set_exposure_time(exptime_ms)
# timeout (factor 2: assuming there may be two frames in auto mode taken
# internally)
# And 5 seconds margin for any sort of transmission overhead over PoE
tout_ms = int(1.0e6 * (2.0 * exposure.exptime + 5))
self.notify(CameraEvent.EXPOSURE_INTEGRATING)
# the buffer allocated/created within the acquisition()
buf = await self.loop.run_in_executor(None, self.device.acquisition, tout_ms)
if buf is None:
raise ExposureError(
"Exposing for "
+ str(exposure.exptime)
+ " sec failed. Timout "
+ str(tout_ms / 1.0e6)
)
# Decipher which methods this aravis buffer has...
# print(dir(buf))
# reg becomes a x=, y=, width= height= dictionary
# these are in standard X11 coordinates where upper left =(0,0)
reg = buf.get_image_region()
# print('region',reg)
data = buf.get_data()
exposure.data = numpy.ndarray(
buffer=data, dtype=numpy.uint16, shape=(1, reg.height, reg.width)
)
# print("exposure data shape", exposure.data.shape)
return reg
@modules.atimeit
async def _expose_internal(self, exposure):
"""Read a single unbinned full frame and store in a FITS file.
:param exposure: On entry exposure.exptim is the intended exposure time in [sec]
On exit, exposure.data contains the 16bit data of a single frame
:return: There is no return value
"""
# fill exposure.data with the frame's 16bit data
# reg becomes a x=, y=, width= height= dictionary
# these are in standard X11 coordinates where upper left =(0,0)
reg = await self._expose_grabFrame(exposure)
# print('region',reg)
binxy = {}
try:
# becomes a dictionary with dx=... dy=... for the 2 horiz/vert binn fact
binxy = self.device.get_binning()
except Exception as ex:
binxy = None
# append FITS header cards
# For the x/y coordinates transform from X11 to FITS coordinates
# Todo: reports the camera y-flipped reg.y if ReversY=true above??
addHeaders = [
("BinX", binxy.dx, "[ct] Horizontal Bin Factor 1, 2 or 4"),
("BinY", binxy.dy, "[ct] Vertical Bin Factor 1, 2 or 4"),
("Width", reg.width, "[ct] Pixel Columns"),
("Height", reg.height, "[ct] Pixel Rows"),
("RegX", 1 + reg.x, "[ct] Pixel Region Horiz start"),
# The lower left FITS corner is the upper left X11 corner...
(
"RegY",
self.regionBounds[1] - (reg.y + reg.height - 1),
"[ct] Pixel Region Vert start",
),
]
dev = self.device.get_device()
# print(dir(dev))
# print(dir(self))
# print(self.camera_system.get_camera(self.name))
# print(self.camera_system._config[self.name])
try:
gain = dev.get_float_feature_value("Gain")
addHeaders.append(("Gain", gain, "Gain"))
except Exception as ex:
# print("failed to read gain" + str(ex))
pass
imgrev = [False, False]
try:
imgrev[0] = self.device.get_boolean("ReverseX")
addHeaders.append(("ReverseX", imgrev[0] != 0, " Flipped left-right"))
imgrev[1] = self.device.get_boolean("ReverseY")
addHeaders.append(("ReverseY", imgrev[1] != 0, " Flipped up-down"))
# print("reversed" + str(imgrev[0]) + str(imgrev[1]) )
except Exception as ex:
# print("failed to read ReversXY" + str(ex))
pass
# This is an enumeration in the GenICam. See features list of
# `arv-tool-0.8 --address=192.168.70.50 features`
binMod = [-1, -1]
try:
binMod[0] = dev.get_integer_feature_value("BinningHorizontalMode")
if binMod[0] == 0:
addHeaders.append(
("BinModeX", "Averag", "Horiz Bin Mode Sum or Averag")
)
else:
addHeaders.append(("BinModeX", "Sum", "Horiz Bin Mode Sum or Averag"))
binMod[1] = dev.get_integer_feature_value("BinningVerticalMode")
if binMod[1] == 0:
addHeaders.append(("BinModeY", "Averag", "Vert Bin Mode Sum or Averag"))
else:
addHeaders.append(("BinModeY", "Sum", "Vert Bin Mode Sum or Averag"))
except Exception as ex:
# print("failed to read binmode" + str(ex))
pass
tmp = False
try:
tmp = self.device.get_boolean("BlackLevelClampingEnable")
addHeaders.append(
("CAMBLCLM", tmp != 0, "Black Level Clamping en/disabled")
)
# print("BlackLevelClampingEnable" + str(imgrev[0]) + str(imgrev[1]) )
except Exception as ex:
# print("failed to read BlackLevelClampingEnable" + str(ex))
pass
try:
camtyp = self.device.get_model_name()
addHeaders.append(("CAMTYP", camtyp, "Camera model"))
except:
pass
# call _expose_wcs() to gather WCS header keywords
addHeaders.extend(self._expose_wcs(exposure, reg))
# for headr in addHeaders:
# exposure.fits_model[0].header_model.append(models.Card(headr))
self.header = addHeaders
# print(repr(exposure.to_hdu()[0].header))
# unref() is currently usupported in this GObject library.
# Hope that this does not lead to any memory leak....
# buf.unref()
return
# @modules.timeit
def _expose_wcs(self, exposure, reg):
"""Gather information for the WCS FITS keywords
:param exposure: On entry exposure.exptim is the intended exposure time in [sec]
On exit, exposure.data contains the 16bit data of a single frame
:param reg The binning and region information
"""
# the section/dictionary of the yaml file for this camera
yamlconfig = self.camera_system._config[self.name]
wcsHeaders = []
# The distance from the long edge of the FLIR camera to the center
# of the focus (fiber) is 7.144+4.0 mm according to SDSS-V_0110 figure 6
# and 11.14471 according to figure 3-1 of LVMi-0081
# For the *w or *e cameras the pixel row 1 (in FITS) is that far
# away in the y-coordinate and in the middle of the x-coordinate.
# For the *c cameras at the fiber bundle we assume them to be in the beam center.
wcsHeaders.append(("CRPIX1", reg.width / 2, "[px] RA center along axis 1"))
if self.name[-1] == "c":
wcsHeaders.append(
("CRPIX2", reg.height / 2, "[px] DEC center along axis 2")
)
else:
# convert 11.14471 mm to microns and to to pixels
crefy = 11.14471 * 1000.0 / yamlconfig["pixsize"]
wcsHeaders.append(("CRPIX2", -crefy, "[px] DEC center along axis 2"))
return wcsHeaders
class BlackflyImageAreaMixIn(ImageAreaMixIn):
"""Allows to select image region and binning factors"""
async def _get_image_area_internal(self):
pass
async def _set_image_area_internal(self, area=None):
pass
async def _get_binning_internal(self):
pass
async def _set_binning_internal(self, hbin, vbin):
pass
# async def singleFrame(
# exptim,
# name,
# verb=False,
# ip_add=None,
# config="cameras.yaml",
# targ=None,
# kmirr=0.0,
# flen=None,
# ):
# """Expose once and write the image to a FITS file.
# :param exptim: The exposure time in seconds. Non-negative.
# :type exptim: float
# :param verb: Verbosity on or off
# :type verb: boolean
# :param ip_add: list of explicit IP's (like 192.168.70.51 or lvmt.irws2.mpia.de)
# :type ip_add: list of strings
# :param config: Name of the YAML file with the cameras configuration
# :type config: string of the file name
# :param targ: alpha/delta ra/dec of the sidereal target
# :type targ: astropy.coordinates.SkyCoord
# :param kmirr: Kmirr angle in degrees (0 if up, positive with right hand rule along North on bench)
# :type kmirr: float
# :param flen: focal length of telescope/siderostat in mm
# If not provided it will be taken from the configuration file
# :type flen: float
# """
# cs = BlackflyCameraSystem(
# BlackflyCamera, camera_config=config, verbose=verb, ip_list=ip_add
# )
# cam = await cs.add_camera(name=name)
# # print("cameras", cs.cameras)
# # print("config" ,config)
# exp = await cam.expose(exptim, "LAB TEST")
# if targ is not None and kmirr is not None:
# # if there is already a (partial) header information, keep it,
# # otherwise create one ab ovo.
# if exp.wcs is None:
# wcshdr = astropy.io.fits.Header()
# else:
# wcshdr = exp.wcs.to_header()
# key = astropy.io.fits.Card("CUNIT1", "deg", "WCS units along axis 1")
# wcshdr.append(key)
# key = astropy.io.fits.Card("CUNIT2", "deg", "WCS units along axis 2")
# wcshdr.append(key)
# key = astropy.io.fits.Card("CTYPE1", "RA---TAN", "WCS type axis 1")
# wcshdr.append(key)
# key = astropy.io.fits.Card("CTYPE2", "DEC--TAN", "WCS type axis 2")
# wcshdr.append(key)
# key = astropy.io.fits.Card("CRVAL1", targ.ra.deg, "[deg] RA at reference pixel")
# wcshdr.append(key)
# key = astropy.io.fits.Card(
# "CRVAL2", targ.dec.deg, "[deg] DEC at reference pixel"
# )
# wcshdr.append(key)
# # field angle: degrees, then radians
# # direction of NCP on the detectors (where we have already flipped pixels
# # on all detectors so fieldrot=kmirr=0 implies North is up and East is left)
# # With right-handed-rule: zero if N=up (y-axis), 90 deg if N=right (x-axis)
# # so the direction is the vector ( sin(f), cos(f)) before the K-mirror.
# # Action of K-mirror is ( cos(2*m), sin(2*m); sin(2*m), -cos(2*m))
# # and action of prism is (-1 0 ; 0 1), i.e. to flip the horizontal coordinate.
# # todo: get starting value from a siderostat field rotation tracking model
# fieldrot = 0.0
# if name[-1] == "c":
# # without prism, assuming center camera placed horizontally
# if name[:4] == "spec":
# # without K-mirror
# pass
# else:
# # with K-mirror
# # in the configuration the y-axis of the image has been flipped,
# # the combined action of (1, 0; 0, -1) and the K-mirror is (cos(2m), sin(2m); -sin(2m), cos(2m))
# # and applied to the input vector this is (sin(2m+f), cos(2m+f))
# fieldrot += 2.0 * kmirr
# else:
# # with prism
# if name[:4] == "spec":
# # without K-mirror
# # Applied to input beam this gives (-sin(f), cos(f)) but prism effect
# # had been undone by vertical flip in the FLIR image.
# pass
# else:
# # with K-mirror
# # Combined action of K-mirror and prism is (-cos(2*m), -sin(2*m);sin(2*m), -cos(2*m)).
# # Applied to input beam this gives (-sin(2*m+f), -cos(2*m+f)) = (sin(2*m+f+pi), cos(2*m+f+pi)).
# fieldrot += 2.0 * kmirr + 180.0
# if name[-1] == "w":
# # Camera is vertically,
# # so up in the lab is right in the image
# fieldrot += 90
# else:
# # Camera is vertically,
# # so up in the lab is left in the image
# fieldrot -= 90
# fieldrot = math.radians(fieldrot)
# # the section/dictionary of the yaml file for this camera
# yamlconfig = cs._config[name]
# if flen is None:
# flen = yamlconfig["flen"]
# # pixel scale per arcseconds is focal length *pi/180 /3600
# # = flen * mm *pi/180 /3600
# # = flen * um *pi/180 /3.6, so in microns per arcsec...
# pixscal = math.radians(flen) / 3.6
# # degrees per pixel is arcseconds per pixel/3600 = (mu/pix)/(mu/arcsec)/3600
# degperpix = yamlconfig["pixsize"] / pixscal / 3600.0
# # for the right handed coordinates
# # (pixx,pixy) = (cos f', -sin f'; sin f', cos f')*(DEC,RA) where f' =90deg -fieldrot
# # (pixx,pixy) = (sin f, -cos f; cos f , sin f)*(DEC,RA)
# # (sin f, cos f; -cos f, sin f)*(pixx,pixy) = (DEC,RA)
# # (-cos f, sin f; sin f, cos f)*(pixx,pixy) = (RA,DEC)
# # Note that the det of the WCS matrix is negativ (because RA/DEC is left-handed...)
# cosperpix = degperpix * math.cos(fieldrot)
# sinperpix = degperpix * math.sin(fieldrot)
# key = astropy.io.fits.Card("CD1_1", -cosperpix, "[deg/px] WCS matrix diagonal")
# wcshdr.append(key)
# key = astropy.io.fits.Card("CD2_2", cosperpix, "[deg/px] WCS matrix diagonal")
# wcshdr.append(key)
# key = astropy.io.fits.Card(
# "CD1_2", sinperpix, "[deg/px] WCS matrix outer diagonal"
# )
# wcshdr.append(key)
# key = astropy.io.fits.Card(
# "CD2_1", sinperpix, "[deg/px] WCS matrix outer diagonal"
# )
# wcshdr.append(key)
# exp.wcs = astropy.wcs.WCS(wcshdr)
# # print(exp.wcs.to_header_string())
# for headr in wcshdr.cards:
# exp.fits_model[0].header_model.append(models.Card(headr))
# await exp.write()
# if verb:
# print("wrote ", exp.filename)
# # A debugging aid, demonstrator and simple test run
# # This allows to call this file as an executable from the command line.
# # The last command line argument must be the name of the camera
# # as used in the configuration file.
# # Example
# # BlackflyCam.py [-e seconds] [-v] [-c ../etc/cameras.yaml] [-r 2h10m10s] [-d -20d10m3s]
# # [-K kmirrdegrees] [-s "LCO"|"MPIA"|"APO"|"KHU"] [-f focallengthmm] {spec.age|spec.agw|...}
# if __name__ == "__main__":
# import argparse
# parser = argparse.ArgumentParser()
# parser.add_argument(
# "-e",
# "--exptime",
# type=float,
# default=5.0,
# help="Expose for for exptime seconds",
# )
# parser.add_argument(
# "-v", "--verbose", action="store_true", help="print some notes to stdout"
# )
# # With the -i switch we can add an explicit IP-Adress for a
# # camera if we want to read a camera that is not reachable
# # by the broadcast scanner.
# parser.add_argument("-i", "--ip", help="IP address of camera")
# # Name of an optional YAML file
# parser.add_argument(
# "-c", "--cfg", default="cameras.yaml", help="YAML file of lvmt cameras"
# )
# # right ascension in degrees
# parser.add_argument("-r", "--ra", help="RA J2000 in degrees or in xxhxxmxxs format")
# # declination in degrees
# parser.add_argument(
# "-d", "--dec", help="DEC J2000 in degrees or in +-xxdxxmxxs format"
# )
# # K-mirror angle in degrees
# # Note this is only relevant for 3 of the 4 tables/telescopes
# parser.add_argument("-K", "--Kmirr", type=float, help="K-mirror angle in degrees")
# # focal length of telescope in mm
# # Default is the LCO triple lens configuration of 1.8 meters
# parser.add_argument(
# "-f", "--flen", type=float, default=1839.8, help="focal length in mm"
# )
# # shortcut for site coordinates: observatory
# # parser.add_argument("-s", '--site', default="LCO", help="LCO or MPIA or APO or KHU")
# # the last argument is mandatory: must be the name of exactly one camera
# # as used in the configuration file
# parser.add_argument("camname", default="sci.agw")
# args = parser.parse_args()
# ip_cmdLine = []
# if args.ip is not None:
# ip_cmdLine.append(args.ip)
# # check ranges and combine ra/dec into a single SkyCoord
# if args.ra is not None and args.dec is not None:
# if args.ra.find("h") < 0:
# # apparently simple floating point representation
# targ = astropy.coordinates.SkyCoord(
# ra=float(args.ra), dec=float(args.dec), unit="deg"
# )
# else:
# targ = astropy.coordinates.SkyCoord(args.ra + " " + args.dec)
# else:
# targ = None
# # print(targ)
# # The following 2 lines test that listing the connected cameras works...
# # bsys = BlackflyCameraSystem(camera_class=BlackflyCamera)
# # bsys.list_available_cameras()
# asyncio.run(
# singleFrame(
# args.exptime,
# args.camname,
# verb=args.verbose,
# ip_add=ip_cmdLine,
# config=args.cfg,
# targ=targ,
# kmirr=args.Kmirr,
# flen=args.flen,
# )
# )
class WcsHdrCards(card.MacroCard):
def macro(self, exposure, context={}):
wcshdr = get_wcshdr(modules.variables.cs_list[0], modules.variables.camname, modules.variables.targ, modules.variables.kmirr, modules.variables.flen)
return wcshdr
# @modules.timeit
def get_wcshdr(
cs,
name,
targ,
kmirr,
flen,
):
if targ is not None and kmirr is not None:
# wcshdr = astropy.io.fits.Header()
wcshdr = []
key = astropy.io.fits.Card("CUNIT1", "deg", "WCS units along axis 1")
wcshdr.append(key)
key = astropy.io.fits.Card("CUNIT2", "deg", "WCS units along axis 2")
wcshdr.append(key)
key = astropy.io.fits.Card("CTYPE1", "RA---TAN", "WCS type axis 1")
wcshdr.append(key)
key = astropy.io.fits.Card("CTYPE2", "DEC--TAN", "WCS type axis 2")
wcshdr.append(key)
key = astropy.io.fits.Card("CRVAL1", targ.ra.deg, "[deg] RA at reference pixel")
wcshdr.append(key)
key = astropy.io.fits.Card(
"CRVAL2", targ.dec.deg, "[deg] DEC at reference pixel"
)
wcshdr.append(key)
# field angle: degrees, then radians
# direction of NCP on the detectors (where we have already flipped pixels
# on all detectors so fieldrot=kmirr=0 implies North is up and East is left)
# With right-handed-rule: zero if N=up (y-axis), 90 deg if N=right (x-axis)
# so the direction is the vector ( sin(f), cos(f)) before the K-mirror.
# Action of K-mirror is ( cos(2*m), sin(2*m); sin(2*m), -cos(2*m))
# and action of prism is (-1 0 ; 0 1), i.e. to flip the horizontal coordinate.
# todo: get starting value from a siderostat field rotation tracking model
fieldrot = 0.0
if name[-1] == "c":
# without prism, assuming center camera placed horizontally
if name[:4] == "spec":
# without K-mirror
pass
else:
# with K-mirror
# in the configuration the y-axis of the image has been flipped,
# the combined action of (1, 0; 0, -1) and the K-mirror is (cos(2m), sin(2m); -sin(2m), cos(2m))
# and applied to the input vector this is (sin(2m+f), cos(2m+f))
fieldrot += 2.0 * kmirr
else:
# with prism
if name[:4] == "spec":
# without K-mirror
# Applied to input beam this gives (-sin(f), cos(f)) but prism effect
# had been undone by vertical flip in the FLIR image.
pass
else:
# with K-mirror
# Combined action of K-mirror and prism is (-cos(2*m), -sin(2*m);sin(2*m), -cos(2*m)).
# Applied to input beam this gives (-sin(2*m+f), -cos(2*m+f)) = (sin(2*m+f+pi), cos(2*m+f+pi)).
fieldrot += 2.0 * kmirr + 180.0
if name[-1] == "w":
# Camera is vertically,
# so up in the lab is right in the image
fieldrot += 90
else:
# Camera is vertically,
# so up in the lab is left in the image
fieldrot -= 90
fieldrot = math.radians(fieldrot)
# the section/dictionary of the yaml file for this camera
yamlconfig = cs._config[name]
if flen is None:
flen = yamlconfig["flen"]
# pixel scale per arcseconds is focal length *pi/180 /3600
# = flen * mm *pi/180 /3600
# = flen * um *pi/180 /3.6, so in microns per arcsec...
pixscal = math.radians(flen) / 3.6
# degrees per pixel is arcseconds per pixel/3600 = (mu/pix)/(mu/arcsec)/3600
degperpix = yamlconfig["pixsize"] / pixscal / 3600.0
# for the right handed coordinates
# (pixx,pixy) = (cos f', -sin f'; sin f', cos f')*(DEC,RA) where f' =90deg -fieldrot
# (pixx,pixy) = (sin f, -cos f; cos f , sin f)*(DEC,RA)
# (sin f, cos f; -cos f, sin f)*(pixx,pixy) = (DEC,RA)
# (-cos f, sin f; sin f, cos f)*(pixx,pixy) = (RA,DEC)
# Note that the det of the WCS matrix is negativ (because RA/DEC is left-handed...)
cosperpix = degperpix * math.cos(fieldrot)
sinperpix = degperpix * math.sin(fieldrot)
key = astropy.io.fits.Card("CD1_1", -cosperpix, "[deg/px] WCS matrix diagonal")
wcshdr.append(key)
key = astropy.io.fits.Card("CD2_2", cosperpix, "[deg/px] WCS matrix diagonal")
wcshdr.append(key)
key = astropy.io.fits.Card(
"CD1_2", sinperpix, "[deg/px] WCS matrix outer diagonal"
)
wcshdr.append(key)
key = astropy.io.fits.Card(
"CD2_1", sinperpix, "[deg/px] WCS matrix outer diagonal"
)
wcshdr.append(key)
return wcshdr
else:
return None
| 39.170921 | 157 | 0.572096 |
import sys
import math
import asyncio
import numpy
import astropy
from basecam.mixins import ImageAreaMixIn
from basecam import (
CameraSystem,
BaseCamera,
CameraEvent,
CameraConnectionError,
models,
ExposureError,
)
from lvmcam.actor import modules
from lvmcam.araviscam.aravis import Aravis
import basecam.models.card as card
from lvmcam.actor.commands import expose
__all__ = ["BlackflyCameraSystem", "BlackflyCamera", "BlackflyImageAreaMixIn"]
class BlackflyCameraSystem(CameraSystem):
__version__ = "0.0.301"
ips_nonlocal = []
def __init__(
self,
camera_class=None,
camera_config=None,
include=None,
exclude=None,
logger=None,
log_header=None,
log_file=None,
verbose=False,
ip_list=None,
):
super().__init__(
camera_class=camera_class,
camera_config=camera_config,
include=include,
exclude=exclude,
logger=logger,
log_header=log_header,
log_file=log_file,
verbose=verbose,
)
if ip_list is not None:
self.ips_nonlocal.extend(ip_list)
def list_available_cameras(self):
serialNums = []
addrs = []
Aravis.update_device_list()
Ndev = Aravis.get_n_devices()
for i in range(Ndev):
cam = Aravis.Camera.new(Aravis.get_device_id(i))
uid = cam.get_string("DeviceSerialNumber")
serialNums.append(uid)
addrs.append("")
for ip in self.ips_nonlocal:
try:
cam = Aravis.Camera.new(ip)
uid = cam.get_string("DeviceSerialNumber")
if uid not in serialNums:
serialNums.append(uid)
addrs.append("@" + ip)
except:
pass
ids = []
for cam in range(len(serialNums)):
ids.append(serialNums[cam] + addrs[cam])
return ids
from basecam.models.builtin import basic_fz_fits_model
class BlackflyCamera(BaseCamera):
def __init__(
self,
uid,
camera_system,
name=None,
force=False,
image_namer=None,
camera_params={},
):
super().__init__(
uid=uid,
camera_system=camera_system,
name=name,
force=force,
image_namer=image_namer,
camera_params=camera_params,
)
self.header = []
@modules.atimeit
async def _connect_internal(self, **kwargs):
try:
uid = kwargs["uid"]
except:
uid = None
cs = BlackflyCameraSystem(BlackflyCamera)
slist = cs.list_available_cameras()
if uid is None:
idx = 0
else:
idx = -1
for id in slist:
slistuid = id.split("@")[0]
if slistuid == uid:
idx = slist.index(id)
if idx < 0:
raise CameraConnectionError("SN " + uid + " not connected")
cam = None
try:
if "@" in slist[idx]:
cam = Aravis.Camera.new(slist[idx].split("@")[1])
else:
cam = Aravis.Camera.new(Aravis.get_device_id(idx))
except:
raise CameraConnectionError(" not connected")
try:
gain = kwargs["gain"]
if gain > 0.0:
self.device.set_gain_auto(0)
cam.set_gain(gain)
except Exception as ex:
# print("failed to set gain " + str(ex))
pass
# see arvenums.h for the list of pixel formats. This is MONO_16 here, always
cam.set_pixel_format(0x01100007)
# search for an optional x and y binning factor
try:
var = kwargs["binning"]
cam.set_binning(var[0], var[1])
except Exception as ex:
# print("failed to set binning " + str(ex))
# horizontal and vertical binning set to 1
cam.set_binning(1, 1)
# scan the general list of genicam featured values
# of the four native types
for typp, arvLst in kwargs.items():
if arvLst is not None:
if typp == "bool":
for genkey, genval in arvLst.items():
try:
cam.set_boolean(genkey, int(genval))
except:
# probably a typo in the yaml file... todo: log this
# print("failed for " + str(genkey)+str(genval))
pass
elif typp == "int":
for genkey, genval in arvLst.items():
try:
cam.set_integer(genkey, genval)
except:
# probably a typo in the yaml file... todo: log this
# print("failed for " + str(genkey)+str(genval))
pass
elif typp == "float":
for genkey, genval in arvLst.items():
try:
cam.set_float(genkey, genval)
except:
# probably a typo in the yaml file... todo: log this
# print("failed for " + str(genkey)+str(genval))
pass
elif typp == "string":
for genkey, genval in arvLst.items():
try:
cam.set_string(genkey, genval)
except:
# probably a typo in the yaml file... todo: log this
# print("failed for " + str(genkey)+str(genval))
pass
dev = cam.get_device()
# Take full frames by default (maximizing probability of LVM guide camera
# to find guide stars in the field)
roiBounds = [-1, -1]
try:
roiBounds[0] = dev.get_integer_feature_value("WidthMax")
roiBounds[1] = dev.get_integer_feature_value("HeightMax")
# print(" ROI " + str(roiBounds[0]) + " x " + str(roiBounds[1]) )
cam.set_region(0, 0, roiBounds[0], roiBounds[1])
except Exception as ex:
# print("failed to set ROI " + str(ex))
pass
self.device = cam
self.regionBounds = roiBounds
@modules.atimeit
async def _disconnect_internal(self):
self.device = None
# @modules.atimeit
async def _expose_grabFrame(self, exposure):
# To avoid being left over by other programs with no change
# to set the exposure time, we switch the auto=0=off first
self.device.set_exposure_time_auto(0)
# Aravis assumes exptime in micro second integers
exptime_ms = int(0.5 + exposure.exptime * 1e6)
self.device.set_exposure_time(exptime_ms)
# timeout (factor 2: assuming there may be two frames in auto mode taken
# internally)
# And 5 seconds margin for any sort of transmission overhead over PoE
tout_ms = int(1.0e6 * (2.0 * exposure.exptime + 5))
self.notify(CameraEvent.EXPOSURE_INTEGRATING)
# the buffer allocated/created within the acquisition()
buf = await self.loop.run_in_executor(None, self.device.acquisition, tout_ms)
if buf is None:
raise ExposureError(
"Exposing for "
+ str(exposure.exptime)
+ " sec failed. Timout "
+ str(tout_ms / 1.0e6)
)
# Decipher which methods this aravis buffer has...
# print(dir(buf))
# reg becomes a x=, y=, width= height= dictionary
# these are in standard X11 coordinates where upper left =(0,0)
reg = buf.get_image_region()
# print('region',reg)
data = buf.get_data()
exposure.data = numpy.ndarray(
buffer=data, dtype=numpy.uint16, shape=(1, reg.height, reg.width)
)
# print("exposure data shape", exposure.data.shape)
return reg
@modules.atimeit
async def _expose_internal(self, exposure):
# fill exposure.data with the frame's 16bit data
reg = await self._expose_grabFrame(exposure)
binxy = {}
try:
binxy = self.device.get_binning()
except Exception as ex:
binxy = None
addHeaders = [
("BinX", binxy.dx, "[ct] Horizontal Bin Factor 1, 2 or 4"),
("BinY", binxy.dy, "[ct] Vertical Bin Factor 1, 2 or 4"),
("Width", reg.width, "[ct] Pixel Columns"),
("Height", reg.height, "[ct] Pixel Rows"),
("RegX", 1 + reg.x, "[ct] Pixel Region Horiz start"),
(
"RegY",
self.regionBounds[1] - (reg.y + reg.height - 1),
"[ct] Pixel Region Vert start",
),
]
dev = self.device.get_device()
try:
gain = dev.get_float_feature_value("Gain")
addHeaders.append(("Gain", gain, "Gain"))
except Exception as ex:
pass
imgrev = [False, False]
try:
imgrev[0] = self.device.get_boolean("ReverseX")
addHeaders.append(("ReverseX", imgrev[0] != 0, " Flipped left-right"))
imgrev[1] = self.device.get_boolean("ReverseY")
addHeaders.append(("ReverseY", imgrev[1] != 0, " Flipped up-down"))
except Exception as ex:
pass
binMod = [-1, -1]
try:
binMod[0] = dev.get_integer_feature_value("BinningHorizontalMode")
if binMod[0] == 0:
addHeaders.append(
("BinModeX", "Averag", "Horiz Bin Mode Sum or Averag")
)
else:
addHeaders.append(("BinModeX", "Sum", "Horiz Bin Mode Sum or Averag"))
binMod[1] = dev.get_integer_feature_value("BinningVerticalMode")
if binMod[1] == 0:
addHeaders.append(("BinModeY", "Averag", "Vert Bin Mode Sum or Averag"))
else:
addHeaders.append(("BinModeY", "Sum", "Vert Bin Mode Sum or Averag"))
except Exception as ex:
pass
tmp = False
try:
tmp = self.device.get_boolean("BlackLevelClampingEnable")
addHeaders.append(
("CAMBLCLM", tmp != 0, "Black Level Clamping en/disabled")
)
except Exception as ex:
pass
try:
camtyp = self.device.get_model_name()
addHeaders.append(("CAMTYP", camtyp, "Camera model"))
except:
pass
addHeaders.extend(self._expose_wcs(exposure, reg))
self.header = addHeaders
return
def _expose_wcs(self, exposure, reg):
yamlconfig = self.camera_system._config[self.name]
wcsHeaders = []
wcsHeaders.append(("CRPIX1", reg.width / 2, "[px] RA center along axis 1"))
if self.name[-1] == "c":
wcsHeaders.append(
("CRPIX2", reg.height / 2, "[px] DEC center along axis 2")
)
else:
crefy = 11.14471 * 1000.0 / yamlconfig["pixsize"]
wcsHeaders.append(("CRPIX2", -crefy, "[px] DEC center along axis 2"))
return wcsHeaders
class BlackflyImageAreaMixIn(ImageAreaMixIn):
async def _get_image_area_internal(self):
pass
async def _set_image_area_internal(self, area=None):
pass
async def _get_binning_internal(self):
pass
async def _set_binning_internal(self, hbin, vbin):
pass
# :param exptim: The exposure time in seconds. Non-negative.
# :type exptim: float
# :param verb: Verbosity on or off
# :type verb: boolean
# :param ip_add: list of explicit IP's (like 192.168.70.51 or lvmt.irws2.mpia.de)
# :type ip_add: list of strings
# :param config: Name of the YAML file with the cameras configuration
# :type config: string of the file name
# :param targ: alpha/delta ra/dec of the sidereal target
# :type targ: astropy.coordinates.SkyCoord
# :param kmirr: Kmirr angle in degrees (0 if up, positive with right hand rule along North on bench)
# :type kmirr: float
# :param flen: focal length of telescope/siderostat in mm
# If not provided it will be taken from the configuration file
# :type flen: float
# """
# cs = BlackflyCameraSystem(
# BlackflyCamera, camera_config=config, verbose=verb, ip_list=ip_add
# )
# cam = await cs.add_camera(name=name)
# # print("cameras", cs.cameras)
# # print("config" ,config)
# exp = await cam.expose(exptim, "LAB TEST")
# if targ is not None and kmirr is not None:
# # if there is already a (partial) header information, keep it,
# # otherwise create one ab ovo.
# if exp.wcs is None:
# wcshdr = astropy.io.fits.Header()
# else:
# wcshdr = exp.wcs.to_header()
# key = astropy.io.fits.Card("CUNIT1", "deg", "WCS units along axis 1")
# wcshdr.append(key)
# key = astropy.io.fits.Card("CUNIT2", "deg", "WCS units along axis 2")
# wcshdr.append(key)
# key = astropy.io.fits.Card("CTYPE1", "RA---TAN", "WCS type axis 1")
# wcshdr.append(key)
# key = astropy.io.fits.Card("CTYPE2", "DEC--TAN", "WCS type axis 2")
# wcshdr.append(key)
# key = astropy.io.fits.Card("CRVAL1", targ.ra.deg, "[deg] RA at reference pixel")
# wcshdr.append(key)
# key = astropy.io.fits.Card(
# "CRVAL2", targ.dec.deg, "[deg] DEC at reference pixel"
# )
# wcshdr.append(key)
# # field angle: degrees, then radians
# # direction of NCP on the detectors (where we have already flipped pixels
# # on all detectors so fieldrot=kmirr=0 implies North is up and East is left)
# # With right-handed-rule: zero if N=up (y-axis), 90 deg if N=right (x-axis)
# # so the direction is the vector ( sin(f), cos(f)) before the K-mirror.
# # Action of K-mirror is ( cos(2*m), sin(2*m); sin(2*m), -cos(2*m))
# # and action of prism is (-1 0 ; 0 1), i.e. to flip the horizontal coordinate.
# # todo: get starting value from a siderostat field rotation tracking model
# fieldrot = 0.0
# if name[-1] == "c":
# # without prism, assuming center camera placed horizontally
# if name[:4] == "spec":
# # without K-mirror
# pass
# else:
# # with K-mirror
# # in the configuration the y-axis of the image has been flipped,
# # the combined action of (1, 0; 0, -1) and the K-mirror is (cos(2m), sin(2m); -sin(2m), cos(2m))
# # and applied to the input vector this is (sin(2m+f), cos(2m+f))
# fieldrot += 2.0 * kmirr
# else:
# # with prism
# if name[:4] == "spec":
# # without K-mirror
# # Applied to input beam this gives (-sin(f), cos(f)) but prism effect
# # had been undone by vertical flip in the FLIR image.
# pass
# else:
# # with K-mirror
# # Combined action of K-mirror and prism is (-cos(2*m), -sin(2*m);sin(2*m), -cos(2*m)).
# # Applied to input beam this gives (-sin(2*m+f), -cos(2*m+f)) = (sin(2*m+f+pi), cos(2*m+f+pi)).
# fieldrot += 2.0 * kmirr + 180.0
# if name[-1] == "w":
# # Camera is vertically,
# # so up in the lab is right in the image
# fieldrot += 90
# else:
# # Camera is vertically,
# # so up in the lab is left in the image
# fieldrot -= 90
# fieldrot = math.radians(fieldrot)
# # the section/dictionary of the yaml file for this camera
# yamlconfig = cs._config[name]
# if flen is None:
# flen = yamlconfig["flen"]
# # pixel scale per arcseconds is focal length *pi/180 /3600
# # = flen * mm *pi/180 /3600
# # = flen * um *pi/180 /3.6, so in microns per arcsec...
# pixscal = math.radians(flen) / 3.6
# # degrees per pixel is arcseconds per pixel/3600 = (mu/pix)/(mu/arcsec)/3600
# degperpix = yamlconfig["pixsize"] / pixscal / 3600.0
# # for the right handed coordinates
# # (pixx,pixy) = (cos f', -sin f'; sin f', cos f')*(DEC,RA) where f' =90deg -fieldrot
pass
else:
fieldrot += 2.0 * kmirr + 180.0
if name[-1] == "w":
fieldrot += 90
else:
fieldrot -= 90
fieldrot = math.radians(fieldrot)
yamlconfig = cs._config[name]
if flen is None:
flen = yamlconfig["flen"]
pixscal = math.radians(flen) / 3.6
degperpix = yamlconfig["pixsize"] / pixscal / 3600.0
# (pixx,pixy) = (sin f, -cos f; cos f , sin f)*(DEC,RA)
# (sin f, cos f; -cos f, sin f)*(pixx,pixy) = (DEC,RA)
# (-cos f, sin f; sin f, cos f)*(pixx,pixy) = (RA,DEC)
# Note that the det of the WCS matrix is negativ (because RA/DEC is left-handed...)
cosperpix = degperpix * math.cos(fieldrot)
sinperpix = degperpix * math.sin(fieldrot)
key = astropy.io.fits.Card("CD1_1", -cosperpix, "[deg/px] WCS matrix diagonal")
wcshdr.append(key)
key = astropy.io.fits.Card("CD2_2", cosperpix, "[deg/px] WCS matrix diagonal")
wcshdr.append(key)
key = astropy.io.fits.Card(
"CD1_2", sinperpix, "[deg/px] WCS matrix outer diagonal"
)
wcshdr.append(key)
key = astropy.io.fits.Card(
"CD2_1", sinperpix, "[deg/px] WCS matrix outer diagonal"
)
wcshdr.append(key)
return wcshdr
else:
return None
| true | true |
1c37d00a7158416e048e17ac299d114678c5fcb7 | 548 | py | Python | is-prime/solution.py | astone648/CodeWars | 13f0bf9108433909abd5cf7270515cc63a06ebd1 | [
"MIT"
] | null | null | null | is-prime/solution.py | astone648/CodeWars | 13f0bf9108433909abd5cf7270515cc63a06ebd1 | [
"MIT"
] | null | null | null | is-prime/solution.py | astone648/CodeWars | 13f0bf9108433909abd5cf7270515cc63a06ebd1 | [
"MIT"
] | null | null | null | from random import *
import math
def is_prime(num):
if num < 1:
return False;
elif num == 1:
return False;
elif num > 2 and num % 2 == 0:
return False;
else:
for n in range(3,int(math.sqrt(num)//1)+1):
if num % n == 0:
return False
return True
def testPrime(num):
if is_prime(num):
print(str(num) + ' is prime.')
else:
print(str(num) + ' is not prime.')
randArrayLength = 25
for n in range(randArrayLength):
testPrime(randrange(0, 100))
| 21.076923 | 51 | 0.54562 | from random import *
import math
def is_prime(num):
if num < 1:
return False;
elif num == 1:
return False;
elif num > 2 and num % 2 == 0:
return False;
else:
for n in range(3,int(math.sqrt(num)//1)+1):
if num % n == 0:
return False
return True
def testPrime(num):
if is_prime(num):
print(str(num) + ' is prime.')
else:
print(str(num) + ' is not prime.')
randArrayLength = 25
for n in range(randArrayLength):
testPrime(randrange(0, 100))
| true | true |
1c37d0215a8ae853b15095889bc7d195c9b05519 | 10,236 | py | Python | homeassistant/components/emulated_hue/__init__.py | dzmitov/core | 7697ef7f5ec357ae5ab76237dc52af55fc044c36 | [
"Apache-2.0"
] | 1 | 2021-01-14T11:42:12.000Z | 2021-01-14T11:42:12.000Z | homeassistant/components/emulated_hue/__init__.py | dzmitov/core | 7697ef7f5ec357ae5ab76237dc52af55fc044c36 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/emulated_hue/__init__.py | dzmitov/core | 7697ef7f5ec357ae5ab76237dc52af55fc044c36 | [
"Apache-2.0"
] | 1 | 2020-09-23T16:41:16.000Z | 2020-09-23T16:41:16.000Z | """Support for local control of entities by emulating a Philips Hue bridge."""
import logging
from aiohttp import web
import voluptuous as vol
from homeassistant import util
from homeassistant.components.http import real_ip
from homeassistant.const import EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.util.json import load_json, save_json
from .hue_api import (
HueAllGroupsStateView,
HueAllLightsStateView,
HueFullStateView,
HueGroupView,
HueOneLightChangeView,
HueOneLightStateView,
HueUnauthorizedUser,
HueUsernameView,
)
from .upnp import DescriptionXmlView, UPNPResponderThread
DOMAIN = "emulated_hue"
_LOGGER = logging.getLogger(__name__)
NUMBERS_FILE = "emulated_hue_ids.json"
CONF_ADVERTISE_IP = "advertise_ip"
CONF_ADVERTISE_PORT = "advertise_port"
CONF_ENTITIES = "entities"
CONF_ENTITY_HIDDEN = "hidden"
CONF_ENTITY_NAME = "name"
CONF_EXPOSE_BY_DEFAULT = "expose_by_default"
CONF_EXPOSED_DOMAINS = "exposed_domains"
CONF_HOST_IP = "host_ip"
CONF_LISTEN_PORT = "listen_port"
CONF_OFF_MAPS_TO_ON_DOMAINS = "off_maps_to_on_domains"
CONF_TYPE = "type"
CONF_UPNP_BIND_MULTICAST = "upnp_bind_multicast"
TYPE_ALEXA = "alexa"
TYPE_GOOGLE = "google_home"
DEFAULT_LISTEN_PORT = 8300
DEFAULT_UPNP_BIND_MULTICAST = True
DEFAULT_OFF_MAPS_TO_ON_DOMAINS = ["script", "scene"]
DEFAULT_EXPOSE_BY_DEFAULT = True
DEFAULT_EXPOSED_DOMAINS = [
"switch",
"light",
"group",
"input_boolean",
"media_player",
"fan",
]
DEFAULT_TYPE = TYPE_GOOGLE
CONFIG_ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_ENTITY_NAME): cv.string,
vol.Optional(CONF_ENTITY_HIDDEN): cv.boolean,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_HOST_IP): cv.string,
vol.Optional(CONF_LISTEN_PORT, default=DEFAULT_LISTEN_PORT): cv.port,
vol.Optional(CONF_ADVERTISE_IP): cv.string,
vol.Optional(CONF_ADVERTISE_PORT): cv.port,
vol.Optional(CONF_UPNP_BIND_MULTICAST): cv.boolean,
vol.Optional(CONF_OFF_MAPS_TO_ON_DOMAINS): cv.ensure_list,
vol.Optional(CONF_EXPOSE_BY_DEFAULT): cv.boolean,
vol.Optional(CONF_EXPOSED_DOMAINS): cv.ensure_list,
vol.Optional(CONF_TYPE, default=DEFAULT_TYPE): vol.Any(
TYPE_ALEXA, TYPE_GOOGLE
),
vol.Optional(CONF_ENTITIES): vol.Schema(
{cv.entity_id: CONFIG_ENTITY_SCHEMA}
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
ATTR_EMULATED_HUE_NAME = "emulated_hue_name"
async def async_setup(hass, yaml_config):
"""Activate the emulated_hue component."""
config = Config(hass, yaml_config.get(DOMAIN, {}))
app = web.Application()
app["hass"] = hass
real_ip.setup_real_ip(app, False, [])
# We misunderstood the startup signal. You're not allowed to change
# anything during startup. Temp workaround.
# pylint: disable=protected-access
app._on_startup.freeze()
await app.startup()
runner = None
site = None
DescriptionXmlView(config).register(app, app.router)
HueUsernameView().register(app, app.router)
HueUnauthorizedUser().register(app, app.router)
HueAllLightsStateView(config).register(app, app.router)
HueOneLightStateView(config).register(app, app.router)
HueOneLightChangeView(config).register(app, app.router)
HueAllGroupsStateView(config).register(app, app.router)
HueGroupView(config).register(app, app.router)
HueFullStateView(config).register(app, app.router)
upnp_listener = UPNPResponderThread(
config.host_ip_addr,
config.listen_port,
config.upnp_bind_multicast,
config.advertise_ip,
config.advertise_port,
)
async def stop_emulated_hue_bridge(event):
"""Stop the emulated hue bridge."""
upnp_listener.stop()
if site:
await site.stop()
if runner:
await runner.cleanup()
async def start_emulated_hue_bridge(event):
"""Start the emulated hue bridge."""
upnp_listener.start()
nonlocal site
nonlocal runner
runner = web.AppRunner(app)
await runner.setup()
site = web.TCPSite(runner, config.host_ip_addr, config.listen_port)
try:
await site.start()
except OSError as error:
_LOGGER.error(
"Failed to create HTTP server at port %d: %s", config.listen_port, error
)
else:
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, stop_emulated_hue_bridge
)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_emulated_hue_bridge)
return True
class Config:
"""Hold configuration variables for the emulated hue bridge."""
def __init__(self, hass, conf):
"""Initialize the instance."""
self.hass = hass
self.type = conf.get(CONF_TYPE)
self.numbers = None
self.cached_states = {}
if self.type == TYPE_ALEXA:
_LOGGER.warning(
"Emulated Hue running in legacy mode because type has been "
"specified. More info at https://goo.gl/M6tgz8"
)
# Get the IP address that will be passed to the Echo during discovery
self.host_ip_addr = conf.get(CONF_HOST_IP)
if self.host_ip_addr is None:
self.host_ip_addr = util.get_local_ip()
_LOGGER.info(
"Listen IP address not specified, auto-detected address is %s",
self.host_ip_addr,
)
# Get the port that the Hue bridge will listen on
self.listen_port = conf.get(CONF_LISTEN_PORT)
if not isinstance(self.listen_port, int):
self.listen_port = DEFAULT_LISTEN_PORT
_LOGGER.info(
"Listen port not specified, defaulting to %s", self.listen_port
)
# Get whether or not UPNP binds to multicast address (239.255.255.250)
# or to the unicast address (host_ip_addr)
self.upnp_bind_multicast = conf.get(
CONF_UPNP_BIND_MULTICAST, DEFAULT_UPNP_BIND_MULTICAST
)
# Get domains that cause both "on" and "off" commands to map to "on"
# This is primarily useful for things like scenes or scripts, which
# don't really have a concept of being off
self.off_maps_to_on_domains = conf.get(CONF_OFF_MAPS_TO_ON_DOMAINS)
if not isinstance(self.off_maps_to_on_domains, list):
self.off_maps_to_on_domains = DEFAULT_OFF_MAPS_TO_ON_DOMAINS
# Get whether or not entities should be exposed by default, or if only
# explicitly marked ones will be exposed
self.expose_by_default = conf.get(
CONF_EXPOSE_BY_DEFAULT, DEFAULT_EXPOSE_BY_DEFAULT
)
# Get domains that are exposed by default when expose_by_default is
# True
self.exposed_domains = set(
conf.get(CONF_EXPOSED_DOMAINS, DEFAULT_EXPOSED_DOMAINS)
)
# Calculated effective advertised IP and port for network isolation
self.advertise_ip = conf.get(CONF_ADVERTISE_IP) or self.host_ip_addr
self.advertise_port = conf.get(CONF_ADVERTISE_PORT) or self.listen_port
self.entities = conf.get(CONF_ENTITIES, {})
self._entities_with_hidden_attr_in_config = {}
for entity_id in self.entities:
hidden_value = self.entities[entity_id].get(CONF_ENTITY_HIDDEN, None)
if hidden_value is not None:
self._entities_with_hidden_attr_in_config[entity_id] = hidden_value
def entity_id_to_number(self, entity_id):
"""Get a unique number for the entity id."""
if self.type == TYPE_ALEXA:
return entity_id
if self.numbers is None:
self.numbers = _load_json(self.hass.config.path(NUMBERS_FILE))
# Google Home
for number, ent_id in self.numbers.items():
if entity_id == ent_id:
return number
number = "1"
if self.numbers:
number = str(max(int(k) for k in self.numbers) + 1)
self.numbers[number] = entity_id
save_json(self.hass.config.path(NUMBERS_FILE), self.numbers)
return number
def number_to_entity_id(self, number):
"""Convert unique number to entity id."""
if self.type == TYPE_ALEXA:
return number
if self.numbers is None:
self.numbers = _load_json(self.hass.config.path(NUMBERS_FILE))
# Google Home
assert isinstance(number, str)
return self.numbers.get(number)
def get_entity_name(self, entity):
"""Get the name of an entity."""
if (
entity.entity_id in self.entities
and CONF_ENTITY_NAME in self.entities[entity.entity_id]
):
return self.entities[entity.entity_id][CONF_ENTITY_NAME]
return entity.attributes.get(ATTR_EMULATED_HUE_NAME, entity.name)
def is_entity_exposed(self, entity):
"""Determine if an entity should be exposed on the emulated bridge.
Async friendly.
"""
if entity.attributes.get("view") is not None:
# Ignore entities that are views
return False
if entity.entity_id in self._entities_with_hidden_attr_in_config:
return not self._entities_with_hidden_attr_in_config[entity.entity_id]
if not self.expose_by_default:
return False
# Expose an entity if the entity's domain is exposed by default and
# the configuration doesn't explicitly exclude it from being
# exposed, or if the entity is explicitly exposed
if entity.domain in self.exposed_domains:
return True
return False
def _load_json(filename):
"""Load JSON, handling invalid syntax."""
try:
return load_json(filename)
except HomeAssistantError:
pass
return {}
| 33.126214 | 88 | 0.661293 | import logging
from aiohttp import web
import voluptuous as vol
from homeassistant import util
from homeassistant.components.http import real_ip
from homeassistant.const import EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.util.json import load_json, save_json
from .hue_api import (
HueAllGroupsStateView,
HueAllLightsStateView,
HueFullStateView,
HueGroupView,
HueOneLightChangeView,
HueOneLightStateView,
HueUnauthorizedUser,
HueUsernameView,
)
from .upnp import DescriptionXmlView, UPNPResponderThread
DOMAIN = "emulated_hue"
_LOGGER = logging.getLogger(__name__)
NUMBERS_FILE = "emulated_hue_ids.json"
CONF_ADVERTISE_IP = "advertise_ip"
CONF_ADVERTISE_PORT = "advertise_port"
CONF_ENTITIES = "entities"
CONF_ENTITY_HIDDEN = "hidden"
CONF_ENTITY_NAME = "name"
CONF_EXPOSE_BY_DEFAULT = "expose_by_default"
CONF_EXPOSED_DOMAINS = "exposed_domains"
CONF_HOST_IP = "host_ip"
CONF_LISTEN_PORT = "listen_port"
CONF_OFF_MAPS_TO_ON_DOMAINS = "off_maps_to_on_domains"
CONF_TYPE = "type"
CONF_UPNP_BIND_MULTICAST = "upnp_bind_multicast"
TYPE_ALEXA = "alexa"
TYPE_GOOGLE = "google_home"
DEFAULT_LISTEN_PORT = 8300
DEFAULT_UPNP_BIND_MULTICAST = True
DEFAULT_OFF_MAPS_TO_ON_DOMAINS = ["script", "scene"]
DEFAULT_EXPOSE_BY_DEFAULT = True
DEFAULT_EXPOSED_DOMAINS = [
"switch",
"light",
"group",
"input_boolean",
"media_player",
"fan",
]
DEFAULT_TYPE = TYPE_GOOGLE
CONFIG_ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_ENTITY_NAME): cv.string,
vol.Optional(CONF_ENTITY_HIDDEN): cv.boolean,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_HOST_IP): cv.string,
vol.Optional(CONF_LISTEN_PORT, default=DEFAULT_LISTEN_PORT): cv.port,
vol.Optional(CONF_ADVERTISE_IP): cv.string,
vol.Optional(CONF_ADVERTISE_PORT): cv.port,
vol.Optional(CONF_UPNP_BIND_MULTICAST): cv.boolean,
vol.Optional(CONF_OFF_MAPS_TO_ON_DOMAINS): cv.ensure_list,
vol.Optional(CONF_EXPOSE_BY_DEFAULT): cv.boolean,
vol.Optional(CONF_EXPOSED_DOMAINS): cv.ensure_list,
vol.Optional(CONF_TYPE, default=DEFAULT_TYPE): vol.Any(
TYPE_ALEXA, TYPE_GOOGLE
),
vol.Optional(CONF_ENTITIES): vol.Schema(
{cv.entity_id: CONFIG_ENTITY_SCHEMA}
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
ATTR_EMULATED_HUE_NAME = "emulated_hue_name"
async def async_setup(hass, yaml_config):
config = Config(hass, yaml_config.get(DOMAIN, {}))
app = web.Application()
app["hass"] = hass
real_ip.setup_real_ip(app, False, [])
# anything during startup. Temp workaround.
# pylint: disable=protected-access
app._on_startup.freeze()
await app.startup()
runner = None
site = None
DescriptionXmlView(config).register(app, app.router)
HueUsernameView().register(app, app.router)
HueUnauthorizedUser().register(app, app.router)
HueAllLightsStateView(config).register(app, app.router)
HueOneLightStateView(config).register(app, app.router)
HueOneLightChangeView(config).register(app, app.router)
HueAllGroupsStateView(config).register(app, app.router)
HueGroupView(config).register(app, app.router)
HueFullStateView(config).register(app, app.router)
upnp_listener = UPNPResponderThread(
config.host_ip_addr,
config.listen_port,
config.upnp_bind_multicast,
config.advertise_ip,
config.advertise_port,
)
async def stop_emulated_hue_bridge(event):
upnp_listener.stop()
if site:
await site.stop()
if runner:
await runner.cleanup()
async def start_emulated_hue_bridge(event):
upnp_listener.start()
nonlocal site
nonlocal runner
runner = web.AppRunner(app)
await runner.setup()
site = web.TCPSite(runner, config.host_ip_addr, config.listen_port)
try:
await site.start()
except OSError as error:
_LOGGER.error(
"Failed to create HTTP server at port %d: %s", config.listen_port, error
)
else:
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, stop_emulated_hue_bridge
)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_emulated_hue_bridge)
return True
class Config:
def __init__(self, hass, conf):
self.hass = hass
self.type = conf.get(CONF_TYPE)
self.numbers = None
self.cached_states = {}
if self.type == TYPE_ALEXA:
_LOGGER.warning(
"Emulated Hue running in legacy mode because type has been "
"specified. More info at https://goo.gl/M6tgz8"
)
# Get the IP address that will be passed to the Echo during discovery
self.host_ip_addr = conf.get(CONF_HOST_IP)
if self.host_ip_addr is None:
self.host_ip_addr = util.get_local_ip()
_LOGGER.info(
"Listen IP address not specified, auto-detected address is %s",
self.host_ip_addr,
)
# Get the port that the Hue bridge will listen on
self.listen_port = conf.get(CONF_LISTEN_PORT)
if not isinstance(self.listen_port, int):
self.listen_port = DEFAULT_LISTEN_PORT
_LOGGER.info(
"Listen port not specified, defaulting to %s", self.listen_port
)
# Get whether or not UPNP binds to multicast address (239.255.255.250)
# or to the unicast address (host_ip_addr)
self.upnp_bind_multicast = conf.get(
CONF_UPNP_BIND_MULTICAST, DEFAULT_UPNP_BIND_MULTICAST
)
# Get domains that cause both "on" and "off" commands to map to "on"
# This is primarily useful for things like scenes or scripts, which
# don't really have a concept of being off
self.off_maps_to_on_domains = conf.get(CONF_OFF_MAPS_TO_ON_DOMAINS)
if not isinstance(self.off_maps_to_on_domains, list):
self.off_maps_to_on_domains = DEFAULT_OFF_MAPS_TO_ON_DOMAINS
self.expose_by_default = conf.get(
CONF_EXPOSE_BY_DEFAULT, DEFAULT_EXPOSE_BY_DEFAULT
)
self.exposed_domains = set(
conf.get(CONF_EXPOSED_DOMAINS, DEFAULT_EXPOSED_DOMAINS)
)
self.advertise_ip = conf.get(CONF_ADVERTISE_IP) or self.host_ip_addr
self.advertise_port = conf.get(CONF_ADVERTISE_PORT) or self.listen_port
self.entities = conf.get(CONF_ENTITIES, {})
self._entities_with_hidden_attr_in_config = {}
for entity_id in self.entities:
hidden_value = self.entities[entity_id].get(CONF_ENTITY_HIDDEN, None)
if hidden_value is not None:
self._entities_with_hidden_attr_in_config[entity_id] = hidden_value
def entity_id_to_number(self, entity_id):
if self.type == TYPE_ALEXA:
return entity_id
if self.numbers is None:
self.numbers = _load_json(self.hass.config.path(NUMBERS_FILE))
for number, ent_id in self.numbers.items():
if entity_id == ent_id:
return number
number = "1"
if self.numbers:
number = str(max(int(k) for k in self.numbers) + 1)
self.numbers[number] = entity_id
save_json(self.hass.config.path(NUMBERS_FILE), self.numbers)
return number
def number_to_entity_id(self, number):
if self.type == TYPE_ALEXA:
return number
if self.numbers is None:
self.numbers = _load_json(self.hass.config.path(NUMBERS_FILE))
assert isinstance(number, str)
return self.numbers.get(number)
def get_entity_name(self, entity):
if (
entity.entity_id in self.entities
and CONF_ENTITY_NAME in self.entities[entity.entity_id]
):
return self.entities[entity.entity_id][CONF_ENTITY_NAME]
return entity.attributes.get(ATTR_EMULATED_HUE_NAME, entity.name)
def is_entity_exposed(self, entity):
if entity.attributes.get("view") is not None:
return False
if entity.entity_id in self._entities_with_hidden_attr_in_config:
return not self._entities_with_hidden_attr_in_config[entity.entity_id]
if not self.expose_by_default:
return False
# the configuration doesn't explicitly exclude it from being
if entity.domain in self.exposed_domains:
return True
return False
def _load_json(filename):
try:
return load_json(filename)
except HomeAssistantError:
pass
return {}
| true | true |
1c37d11febd64d8252bc97e1eb9d9befd448a37b | 531 | py | Python | finpro/urls.py | aditya1702/FinPro | bd0b6a8abc0ad613b39f0e814e5d5dea746ded50 | [
"MIT"
] | null | null | null | finpro/urls.py | aditya1702/FinPro | bd0b6a8abc0ad613b39f0e814e5d5dea746ded50 | [
"MIT"
] | null | null | null | finpro/urls.py | aditya1702/FinPro | bd0b6a8abc0ad613b39f0e814e5d5dea746ded50 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from FinPro import views
from django.contrib.auth import views as auth_views
from django.urls import path, include
from django.contrib import admin
urlpatterns = [
path('admin/', admin.site.urls),
url(r'^dashboard', views.DashboardPageView.as_view(), name = 'dashboard'),
url(r'^maps', views.GlobalPageView.as_view(), name = 'maps'),
url(r'^company-page', views.CompanyPageView.as_view(), name = 'company-page'),
url(r'^login', views.LoginPageView.as_view(), name = 'login')
]
| 37.928571 | 82 | 0.717514 | from django.conf.urls import url
from FinPro import views
from django.contrib.auth import views as auth_views
from django.urls import path, include
from django.contrib import admin
urlpatterns = [
path('admin/', admin.site.urls),
url(r'^dashboard', views.DashboardPageView.as_view(), name = 'dashboard'),
url(r'^maps', views.GlobalPageView.as_view(), name = 'maps'),
url(r'^company-page', views.CompanyPageView.as_view(), name = 'company-page'),
url(r'^login', views.LoginPageView.as_view(), name = 'login')
]
| true | true |
1c37d15a08615b8399b234c97c570adaf8660123 | 2,834 | py | Python | eoxserver/services/ows/wcs/v11/describecoverage.py | constantinius/eoxserver_combined | 68f261133fed65a4e8a6ddba82b0d2845171e4bf | [
"OML"
] | null | null | null | eoxserver/services/ows/wcs/v11/describecoverage.py | constantinius/eoxserver_combined | 68f261133fed65a4e8a6ddba82b0d2845171e4bf | [
"OML"
] | null | null | null | eoxserver/services/ows/wcs/v11/describecoverage.py | constantinius/eoxserver_combined | 68f261133fed65a4e8a6ddba82b0d2845171e4bf | [
"OML"
] | null | null | null | #-------------------------------------------------------------------------------
#
# Project: EOxServer <http://eoxserver.org>
# Authors: Fabian Schindler <fabian.schindler@eox.at>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2013 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
from eoxserver.core import Component, implements
from eoxserver.core.decoders import xml, kvp, typelist
from eoxserver.services.ows.interfaces import (
ServiceHandlerInterface, GetServiceHandlerInterface,
PostServiceHandlerInterface
)
from eoxserver.services.ows.wcs.basehandlers import (
WCSDescribeCoverageHandlerBase
)
from eoxserver.services.ows.wcs.v11.parameters import (
WCS11CoverageDescrptionRenderParams
)
from eoxserver.services.ows.wcs.v11.util import nsmap
class WCS11DescribeCoverageHandler(WCSDescribeCoverageHandlerBase, Component):
implements(ServiceHandlerInterface)
implements(GetServiceHandlerInterface)
implements(PostServiceHandlerInterface)
versions = ("1.1.0", "1.1.1", "1.1.2",)
def get_decoder(self, request):
if request.method == "GET":
return WCS11DescribeCoverageKVPDecoder(request.GET)
elif request.method == "POST":
return WCS11DescribeCoverageXMLDecoder(request.body)
def get_params(self, coverages, decoder):
return WCS11CoverageDescrptionRenderParams(coverages)
class WCS11DescribeCoverageKVPDecoder(kvp.Decoder):
coverage_ids = kvp.Parameter("identifier", type=typelist(separator=","), num=1)
class WCS11DescribeCoverageXMLDecoder(xml.Decoder):
coverage_ids = xml.Parameter("wcs:Identifier/text()", num="+")
namespaces = nsmap
| 40.485714 | 83 | 0.702893 |
from eoxserver.core import Component, implements
from eoxserver.core.decoders import xml, kvp, typelist
from eoxserver.services.ows.interfaces import (
ServiceHandlerInterface, GetServiceHandlerInterface,
PostServiceHandlerInterface
)
from eoxserver.services.ows.wcs.basehandlers import (
WCSDescribeCoverageHandlerBase
)
from eoxserver.services.ows.wcs.v11.parameters import (
WCS11CoverageDescrptionRenderParams
)
from eoxserver.services.ows.wcs.v11.util import nsmap
class WCS11DescribeCoverageHandler(WCSDescribeCoverageHandlerBase, Component):
implements(ServiceHandlerInterface)
implements(GetServiceHandlerInterface)
implements(PostServiceHandlerInterface)
versions = ("1.1.0", "1.1.1", "1.1.2",)
def get_decoder(self, request):
if request.method == "GET":
return WCS11DescribeCoverageKVPDecoder(request.GET)
elif request.method == "POST":
return WCS11DescribeCoverageXMLDecoder(request.body)
def get_params(self, coverages, decoder):
return WCS11CoverageDescrptionRenderParams(coverages)
class WCS11DescribeCoverageKVPDecoder(kvp.Decoder):
coverage_ids = kvp.Parameter("identifier", type=typelist(separator=","), num=1)
class WCS11DescribeCoverageXMLDecoder(xml.Decoder):
coverage_ids = xml.Parameter("wcs:Identifier/text()", num="+")
namespaces = nsmap
| true | true |
1c37d16e5211f0fdc491488938e9fe8fd5027f52 | 453 | py | Python | public/migrations/0001_initial.py | Andrew-Chen-Wang/django-infinite-scroll | 2e8871daf7fe37cbbd15a078fb99d8a22e12012f | [
"MIT"
] | null | null | null | public/migrations/0001_initial.py | Andrew-Chen-Wang/django-infinite-scroll | 2e8871daf7fe37cbbd15a078fb99d8a22e12012f | [
"MIT"
] | 3 | 2021-03-30T14:15:27.000Z | 2021-06-10T19:56:36.000Z | public/migrations/0001_initial.py | Andrew-Chen-Wang/django-infinite-scroll | 2e8871daf7fe37cbbd15a078fb99d8a22e12012f | [
"MIT"
] | null | null | null | # Generated by Django 3.0.8 on 2020-07-29 04:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='English',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('name', models.TextField(max_length=9001)),
],
),
]
| 20.590909 | 79 | 0.560706 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='English',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('name', models.TextField(max_length=9001)),
],
),
]
| true | true |
1c37d323b295d8307a87e53028346c16fffb06ba | 678 | py | Python | src/python/tensorflow_cloud/version.py | ucdmkt/cloud | 5920c6cbe2f0f56600760d6857f90a170caf3359 | [
"Apache-2.0"
] | null | null | null | src/python/tensorflow_cloud/version.py | ucdmkt/cloud | 5920c6cbe2f0f56600760d6857f90a170caf3359 | [
"Apache-2.0"
] | null | null | null | src/python/tensorflow_cloud/version.py | ucdmkt/cloud | 5920c6cbe2f0f56600760d6857f90a170caf3359 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the version string of TensorFlow Cloud."""
__version__ = "0.1.7.dev"
| 39.882353 | 74 | 0.756637 |
__version__ = "0.1.7.dev"
| true | true |
1c37d3c5e34b3c2b0571209fb3be9fff642cd9c6 | 21,393 | py | Python | pipeline/configs/grb-citeseer/config.py | THUDM/grb | 2f438ccc9e62ffb33a26ca98a95e504985443055 | [
"MIT"
] | 51 | 2021-06-09T06:33:51.000Z | 2022-03-14T07:55:06.000Z | pipeline/configs/grb-citeseer/config.py | THUDM/grb | 2f438ccc9e62ffb33a26ca98a95e504985443055 | [
"MIT"
] | 3 | 2021-08-12T13:12:47.000Z | 2021-12-08T02:16:02.000Z | pipeline/configs/grb-citeseer/config.py | THUDM/grb | 2f438ccc9e62ffb33a26ca98a95e504985443055 | [
"MIT"
] | 11 | 2021-06-10T08:30:05.000Z | 2022-03-28T02:10:11.000Z | """Configuration for reproducing leaderboard of grb-citeseer dataset."""
import torch
import torch.nn.functional as F
from grb.evaluator import metric
model_list = ["gcn",
"gcn_ln",
"gcn_at",
"graphsage",
"graphsage_ln",
"graphsage_at",
"sgcn",
"sgcn_ln",
"sgcn_at",
"robustgcn",
"robustgcn_at",
"tagcn",
"tagcn_ln",
"tagcn_at",
"appnp",
"appnp_ln",
"appnp_at",
"gin",
"gin_ln",
"gin_at",
"gat",
"gat_ln",
"gat_at",
"gcnguard",
"gatguard",
"gcnsvd"]
model_list_basic = ["gcn",
"graphsage",
"sgcn",
"tagcn",
"appnp",
"gin",
"gat"]
modification_attack_list = ["dice",
"rand",
"flip",
"fga",
"nea",
"pgd",
"prbcd",
"stack"]
injection_attack_list = ["rand",
"fgsm",
"pgd",
"speit",
"tdgia"]
model_sur_list = ["gcn"]
def build_model(model_name, num_features, num_classes):
"""Hyper-parameters are determined by auto training, refer to grb.utils.trainer.AutoTrainer."""
if model_name in ["gcn", "gcn_ln", "gcn_at", "gcn_ln_at"]:
from grb.model.torch import GCN
model = GCN(in_features=num_features,
out_features=num_classes,
hidden_features=128,
n_layers=3,
layer_norm=True if "ln" in model_name else False,
dropout=0.7)
train_params = {
"lr" : 0.001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["graphsage", "graphsage_ln", "graphsage_at", "graphsage_ln_at"]:
from grb.model.torch import GraphSAGE
model = GraphSAGE(in_features=num_features,
out_features=num_classes,
hidden_features=256,
n_layers=5,
layer_norm=True if "ln" in model_name else False,
dropout=0.5)
train_params = {
"lr" : 0.0001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["sgcn", "sgcn_ln", "sgcn_at", "sgcn_ln_at"]:
from grb.model.torch import SGCN
model = SGCN(in_features=num_features,
out_features=num_classes,
hidden_features=256,
n_layers=4,
k=4,
layer_norm=True if "ln" in model_name else False,
dropout=0.5)
train_params = {
"lr" : 0.01,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["tagcn", "tagcn_ln", "tagcn_at", "tagcn_ln_at"]:
from grb.model.torch import TAGCN
model = TAGCN(in_features=num_features,
out_features=num_classes,
hidden_features=256,
n_layers=3,
k=2,
layer_norm=True if "ln" in model_name else False,
dropout=0.5)
train_params = {
"lr" : 0.005,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["appnp", "appnp_ln", "appnp_at", "appnp_ln_at"]:
from grb.model.torch import APPNP
model = APPNP(in_features=num_features,
out_features=num_classes,
hidden_features=128,
n_layers=3,
k=3,
layer_norm=True if "ln" in model_name else False,
dropout=0.5)
train_params = {
"lr" : 0.001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["gin", "gin_ln", "gin_at", "gin_ln_at"]:
from grb.model.torch import GIN
model = GIN(in_features=num_features,
out_features=num_classes,
hidden_features=256,
n_layers=2,
layer_norm=True if "ln" in model_name else False,
dropout=0.6)
train_params = {
"lr" : 0.0001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["gat", "gat_ln", "gat_at", "gat_ln_at"]:
from grb.model.dgl import GAT
model = GAT(in_features=num_features,
out_features=num_classes,
hidden_features=64,
n_layers=3,
n_heads=6,
layer_norm=True if "ln" in model_name else False,
dropout=0.6)
train_params = {
"lr" : 0.005,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["robustgcn", "robustgcn_at"]:
from grb.defense import RobustGCN
model = RobustGCN(in_features=num_features,
out_features=num_classes,
hidden_features=128,
n_layers=3,
dropout=0.5)
train_params = {
"lr" : 0.001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["gcnsvd", "gcnsvd_ln"]:
from grb.defense.gcnsvd import GCNSVD
model = GCNSVD(in_features=num_features,
out_features=num_classes,
hidden_features=128,
n_layers=3,
dropout=0.5)
train_params = {
"lr" : 0.001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["gcnguard"]:
from grb.defense import GCNGuard
model = GCNGuard(in_features=num_features,
out_features=num_classes,
hidden_features=128,
n_layers=3,
dropout=0.5)
train_params = {
"lr" : 0.001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["gatguard"]:
from grb.defense import GATGuard
model = GATGuard(in_features=num_features,
out_features=num_classes,
hidden_features=64,
n_heads=6,
n_layers=3,
dropout=0.5)
train_params = {
"lr" : 0.001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
def build_optimizer(model, lr):
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
return optimizer
def build_loss():
return F.nll_loss
def build_metric():
return metric.eval_acc
def build_attack(attack_name, device="cpu", args=None, mode="modification"):
if mode == "modification":
if attack_name == "dice":
from grb.attack.modification import DICE
attack = DICE(n_edge_mod=args.n_edge_mod,
ratio_delete=0.6,
device=device)
return attack
if attack_name == "fga":
from grb.attack.modification import FGA
attack = FGA(n_edge_mod=args.n_edge_mod,
device=device)
return attack
if attack_name == "flip":
from grb.attack.modification import FLIP
attack = FLIP(n_edge_mod=args.n_edge_mod,
flip_type=args.flip_type,
mode="descend",
device=device)
return attack
if attack_name == "rand":
from grb.attack.modification import RAND
attack = RAND(n_edge_mod=args.n_edge_mod,
device=device)
return attack
if attack_name == "nea":
from grb.attack.modification import NEA
attack = NEA(n_edge_mod=args.n_edge_mod,
device=device)
return attack
if attack_name == "stack":
from grb.attack.modification import STACK
attack = STACK(n_edge_mod=args.n_edge_mod,
device=device)
return attack
if attack_name == "pgd":
from grb.attack.modification import PGD
attack = PGD(epsilon=args.epsilon,
n_epoch=args.attack_epoch,
n_node_mod=args.n_node_mod,
n_edge_mod=args.n_edge_mod,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
device=device)
return attack
if attack_name == "prbcd":
from grb.attack.modification import PRBCD
attack = PRBCD(epsilon=args.epsilon,
n_epoch=args.attack_epoch,
n_node_mod=args.n_node_mod,
n_edge_mod=args.n_edge_mod,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
device=device)
return attack
elif mode == "injection":
if attack_name == "rand":
from grb.attack.injection import RAND
attack = RAND(n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
device=device)
return attack
elif attack_name == "fgsm":
from grb.attack.injection import FGSM
attack = FGSM(epsilon=args.attack_lr,
n_epoch=args.attack_epoch,
n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
device=device)
return attack
elif attack_name == "pgd":
from grb.attack.injection import PGD
attack = PGD(epsilon=args.attack_lr,
n_epoch=args.attack_epoch,
n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
device=device)
return attack
elif attack_name == "speit":
from grb.attack.injection import SPEIT
attack = SPEIT(lr=args.attack_lr,
n_epoch=args.attack_epoch,
n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
device=device)
return attack
elif attack_name == "tdgia":
from grb.attack.injection import TDGIA
attack = TDGIA(lr=args.attack_lr,
n_epoch=args.attack_epoch,
n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
inject_mode='random',
sequential_step=1.0,
device=device)
return attack
elif attack_name == "tdgia_random":
from grb.attack.injection.tdgia import TDGIA
attack = TDGIA(lr=args.attack_lr,
n_epoch=args.attack_epoch,
n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
inject_mode='random',
device=device)
return attack
elif attack_name == "tdgia_uniform":
from grb.attack.injection import TDGIA
attack = TDGIA(lr=args.attack_lr,
n_epoch=args.attack_epoch,
n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
inject_mode='uniform',
sequential_step=1.0,
device=device)
return attack
else:
raise NotImplementedError
def build_model_autotrain(model_name):
if model_name == "gcn":
from grb.model.torch import GCN
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return GCN, params_search
if model_name == "graphsage":
from grb.model.torch import GraphSAGE
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return GraphSAGE, params_search
if model_name == "sgcn":
from grb.model.torch import SGCN
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return SGCN, params_search
if model_name == "tagcn":
from grb.model.torch import TAGCN
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"k" : trial.suggest_categorical("k", [2, 3, 4, 5]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return TAGCN, params_search
if model_name == "appnp":
from grb.model.torch import APPNP
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"k" : trial.suggest_categorical("k", [2, 3, 4, 5]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return APPNP, params_search
if model_name == "gin":
from grb.model.torch import GIN
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return GIN, params_search
if model_name == "gat":
from grb.model.dgl import GAT
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"n_heads" : trial.suggest_categorical("n_heads", [2, 4, 6, 8]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return GAT, params_search
| 39.325368 | 100 | 0.465666 | import torch
import torch.nn.functional as F
from grb.evaluator import metric
model_list = ["gcn",
"gcn_ln",
"gcn_at",
"graphsage",
"graphsage_ln",
"graphsage_at",
"sgcn",
"sgcn_ln",
"sgcn_at",
"robustgcn",
"robustgcn_at",
"tagcn",
"tagcn_ln",
"tagcn_at",
"appnp",
"appnp_ln",
"appnp_at",
"gin",
"gin_ln",
"gin_at",
"gat",
"gat_ln",
"gat_at",
"gcnguard",
"gatguard",
"gcnsvd"]
model_list_basic = ["gcn",
"graphsage",
"sgcn",
"tagcn",
"appnp",
"gin",
"gat"]
modification_attack_list = ["dice",
"rand",
"flip",
"fga",
"nea",
"pgd",
"prbcd",
"stack"]
injection_attack_list = ["rand",
"fgsm",
"pgd",
"speit",
"tdgia"]
model_sur_list = ["gcn"]
def build_model(model_name, num_features, num_classes):
if model_name in ["gcn", "gcn_ln", "gcn_at", "gcn_ln_at"]:
from grb.model.torch import GCN
model = GCN(in_features=num_features,
out_features=num_classes,
hidden_features=128,
n_layers=3,
layer_norm=True if "ln" in model_name else False,
dropout=0.7)
train_params = {
"lr" : 0.001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["graphsage", "graphsage_ln", "graphsage_at", "graphsage_ln_at"]:
from grb.model.torch import GraphSAGE
model = GraphSAGE(in_features=num_features,
out_features=num_classes,
hidden_features=256,
n_layers=5,
layer_norm=True if "ln" in model_name else False,
dropout=0.5)
train_params = {
"lr" : 0.0001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["sgcn", "sgcn_ln", "sgcn_at", "sgcn_ln_at"]:
from grb.model.torch import SGCN
model = SGCN(in_features=num_features,
out_features=num_classes,
hidden_features=256,
n_layers=4,
k=4,
layer_norm=True if "ln" in model_name else False,
dropout=0.5)
train_params = {
"lr" : 0.01,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["tagcn", "tagcn_ln", "tagcn_at", "tagcn_ln_at"]:
from grb.model.torch import TAGCN
model = TAGCN(in_features=num_features,
out_features=num_classes,
hidden_features=256,
n_layers=3,
k=2,
layer_norm=True if "ln" in model_name else False,
dropout=0.5)
train_params = {
"lr" : 0.005,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["appnp", "appnp_ln", "appnp_at", "appnp_ln_at"]:
from grb.model.torch import APPNP
model = APPNP(in_features=num_features,
out_features=num_classes,
hidden_features=128,
n_layers=3,
k=3,
layer_norm=True if "ln" in model_name else False,
dropout=0.5)
train_params = {
"lr" : 0.001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["gin", "gin_ln", "gin_at", "gin_ln_at"]:
from grb.model.torch import GIN
model = GIN(in_features=num_features,
out_features=num_classes,
hidden_features=256,
n_layers=2,
layer_norm=True if "ln" in model_name else False,
dropout=0.6)
train_params = {
"lr" : 0.0001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["gat", "gat_ln", "gat_at", "gat_ln_at"]:
from grb.model.dgl import GAT
model = GAT(in_features=num_features,
out_features=num_classes,
hidden_features=64,
n_layers=3,
n_heads=6,
layer_norm=True if "ln" in model_name else False,
dropout=0.6)
train_params = {
"lr" : 0.005,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["robustgcn", "robustgcn_at"]:
from grb.defense import RobustGCN
model = RobustGCN(in_features=num_features,
out_features=num_classes,
hidden_features=128,
n_layers=3,
dropout=0.5)
train_params = {
"lr" : 0.001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["gcnsvd", "gcnsvd_ln"]:
from grb.defense.gcnsvd import GCNSVD
model = GCNSVD(in_features=num_features,
out_features=num_classes,
hidden_features=128,
n_layers=3,
dropout=0.5)
train_params = {
"lr" : 0.001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["gcnguard"]:
from grb.defense import GCNGuard
model = GCNGuard(in_features=num_features,
out_features=num_classes,
hidden_features=128,
n_layers=3,
dropout=0.5)
train_params = {
"lr" : 0.001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
if model_name in ["gatguard"]:
from grb.defense import GATGuard
model = GATGuard(in_features=num_features,
out_features=num_classes,
hidden_features=64,
n_heads=6,
n_layers=3,
dropout=0.5)
train_params = {
"lr" : 0.001,
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
"train_mode" : "inductive",
}
return model, train_params
def build_optimizer(model, lr):
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
return optimizer
def build_loss():
return F.nll_loss
def build_metric():
return metric.eval_acc
def build_attack(attack_name, device="cpu", args=None, mode="modification"):
if mode == "modification":
if attack_name == "dice":
from grb.attack.modification import DICE
attack = DICE(n_edge_mod=args.n_edge_mod,
ratio_delete=0.6,
device=device)
return attack
if attack_name == "fga":
from grb.attack.modification import FGA
attack = FGA(n_edge_mod=args.n_edge_mod,
device=device)
return attack
if attack_name == "flip":
from grb.attack.modification import FLIP
attack = FLIP(n_edge_mod=args.n_edge_mod,
flip_type=args.flip_type,
mode="descend",
device=device)
return attack
if attack_name == "rand":
from grb.attack.modification import RAND
attack = RAND(n_edge_mod=args.n_edge_mod,
device=device)
return attack
if attack_name == "nea":
from grb.attack.modification import NEA
attack = NEA(n_edge_mod=args.n_edge_mod,
device=device)
return attack
if attack_name == "stack":
from grb.attack.modification import STACK
attack = STACK(n_edge_mod=args.n_edge_mod,
device=device)
return attack
if attack_name == "pgd":
from grb.attack.modification import PGD
attack = PGD(epsilon=args.epsilon,
n_epoch=args.attack_epoch,
n_node_mod=args.n_node_mod,
n_edge_mod=args.n_edge_mod,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
device=device)
return attack
if attack_name == "prbcd":
from grb.attack.modification import PRBCD
attack = PRBCD(epsilon=args.epsilon,
n_epoch=args.attack_epoch,
n_node_mod=args.n_node_mod,
n_edge_mod=args.n_edge_mod,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
device=device)
return attack
elif mode == "injection":
if attack_name == "rand":
from grb.attack.injection import RAND
attack = RAND(n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
device=device)
return attack
elif attack_name == "fgsm":
from grb.attack.injection import FGSM
attack = FGSM(epsilon=args.attack_lr,
n_epoch=args.attack_epoch,
n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
device=device)
return attack
elif attack_name == "pgd":
from grb.attack.injection import PGD
attack = PGD(epsilon=args.attack_lr,
n_epoch=args.attack_epoch,
n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
device=device)
return attack
elif attack_name == "speit":
from grb.attack.injection import SPEIT
attack = SPEIT(lr=args.attack_lr,
n_epoch=args.attack_epoch,
n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
device=device)
return attack
elif attack_name == "tdgia":
from grb.attack.injection import TDGIA
attack = TDGIA(lr=args.attack_lr,
n_epoch=args.attack_epoch,
n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
inject_mode='random',
sequential_step=1.0,
device=device)
return attack
elif attack_name == "tdgia_random":
from grb.attack.injection.tdgia import TDGIA
attack = TDGIA(lr=args.attack_lr,
n_epoch=args.attack_epoch,
n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
inject_mode='random',
device=device)
return attack
elif attack_name == "tdgia_uniform":
from grb.attack.injection import TDGIA
attack = TDGIA(lr=args.attack_lr,
n_epoch=args.attack_epoch,
n_inject_max=args.n_inject_max,
n_edge_max=args.n_edge_max,
feat_lim_min=args.feat_lim_min,
feat_lim_max=args.feat_lim_max,
early_stop=args.early_stop,
inject_mode='uniform',
sequential_step=1.0,
device=device)
return attack
else:
raise NotImplementedError
def build_model_autotrain(model_name):
if model_name == "gcn":
from grb.model.torch import GCN
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return GCN, params_search
if model_name == "graphsage":
from grb.model.torch import GraphSAGE
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return GraphSAGE, params_search
if model_name == "sgcn":
from grb.model.torch import SGCN
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return SGCN, params_search
if model_name == "tagcn":
from grb.model.torch import TAGCN
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"k" : trial.suggest_categorical("k", [2, 3, 4, 5]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return TAGCN, params_search
if model_name == "appnp":
from grb.model.torch import APPNP
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"k" : trial.suggest_categorical("k", [2, 3, 4, 5]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return APPNP, params_search
if model_name == "gin":
from grb.model.torch import GIN
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return GIN, params_search
if model_name == "gat":
from grb.model.dgl import GAT
def params_search(trial):
model_params = {
"hidden_features": trial.suggest_categorical("hidden_features", [32, 64, 128, 256]),
"n_layers" : trial.suggest_categorical("n_layers", [2, 3, 4, 5]),
"n_heads" : trial.suggest_categorical("n_heads", [2, 4, 6, 8]),
"dropout" : trial.suggest_categorical("dropout", [0.5, 0.6, 0.7, 0.8]),
}
other_params = {
"lr" : trial.suggest_categorical("lr", [1e-2, 1e-3, 5e-3, 1e-4]),
"n_epoch" : 5000,
"early_stop" : True,
"early_stop_patience": 500,
}
return model_params, other_params
return GAT, params_search
| true | true |
1c37d3f21b7a001c9055b55e762d87a2946c1496 | 261 | py | Python | Sources/02XXX/2941/2941.py | DDManager/Baekjoon-Online-Judge | 7dd6d76838d3309bfe5bef46f1778c5776ebdf2a | [
"MIT"
] | 1 | 2019-07-02T09:07:58.000Z | 2019-07-02T09:07:58.000Z | Sources/02XXX/2941/2941.py | DDManager/Baekjoon-Online-Judge | 7dd6d76838d3309bfe5bef46f1778c5776ebdf2a | [
"MIT"
] | null | null | null | Sources/02XXX/2941/2941.py | DDManager/Baekjoon-Online-Judge | 7dd6d76838d3309bfe5bef46f1778c5776ebdf2a | [
"MIT"
] | 1 | 2022-02-13T04:17:10.000Z | 2022-02-13T04:17:10.000Z | ##
# BOJ 2941번 Python 3 소스 코드
# 작성자 : 동동매니저 (DDManager)
#
# ※ 실행 결과
# 사용 메모리 : 32,876 KB / 294,912 KB
# 소요 시간 : 112 ms / 5,000 ms
#
# Copyright 2020. DDManager all rights reserved.
##
import re
print(len(re.sub("(dz=|c=|c-|d-|lj|nj|s=|z=)","0",input()))) | 20.076923 | 60 | 0.582375 |
import re
print(len(re.sub("(dz=|c=|c-|d-|lj|nj|s=|z=)","0",input()))) | true | true |
1c37d3f6f4efd7eafe388a457039c5630ca8ffcf | 828 | py | Python | test/functional/bitcoin_cli.py | karthik2883/ABCCoin | e6daef308ba7bc81a59ba3aff8bc503cdece5cc6 | [
"MIT"
] | 1 | 2018-04-25T12:18:41.000Z | 2018-04-25T12:18:41.000Z | test/functional/bitcoin_cli.py | karthik2883/ABCCoin | e6daef308ba7bc81a59ba3aff8bc503cdece5cc6 | [
"MIT"
] | null | null | null | test/functional/bitcoin_cli.py | karthik2883/ABCCoin | e6daef308ba7bc81a59ba3aff8bc503cdece5cc6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoin-cli"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class TestBitcoinCli(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
"""Main test logic"""
self.log.info("Compare responses from getinfo RPC and `abccoin-cli getinfo`")
cli_get_info = self.nodes[0].cli.getinfo()
rpc_get_info = self.nodes[0].getinfo()
assert_equal(cli_get_info, rpc_get_info)
if __name__ == '__main__':
TestBitcoinCli().main()
| 31.846154 | 85 | 0.721014 |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class TestBitcoinCli(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
self.log.info("Compare responses from getinfo RPC and `abccoin-cli getinfo`")
cli_get_info = self.nodes[0].cli.getinfo()
rpc_get_info = self.nodes[0].getinfo()
assert_equal(cli_get_info, rpc_get_info)
if __name__ == '__main__':
TestBitcoinCli().main()
| true | true |
1c37d427226bb485f6d7aeb1d6435345e62f6017 | 52 | py | Python | src/allocation/adapters/email.py | jeantardelli/architecture-patterns-with-python | d48c7d6d4a44073b815c7e6770e44cf2e231e35b | [
"MIT"
] | 1 | 2021-04-07T18:04:56.000Z | 2021-04-07T18:04:56.000Z | src/allocation/adapters/email.py | jeantardelli/architecture-patterns-with-python | d48c7d6d4a44073b815c7e6770e44cf2e231e35b | [
"MIT"
] | null | null | null | src/allocation/adapters/email.py | jeantardelli/architecture-patterns-with-python | d48c7d6d4a44073b815c7e6770e44cf2e231e35b | [
"MIT"
] | null | null | null | def send(*args):
print("SENDING EMAIL:", *args)
| 17.333333 | 34 | 0.615385 | def send(*args):
print("SENDING EMAIL:", *args)
| true | true |
1c37d42984d41533828aed23483e53f4a7a0b343 | 1,610 | py | Python | icv/data/core/segmap.py | dmxj/icv | 0b074ec9475f2c70038d2e8b7166414fd5b93e61 | [
"MIT"
] | 5 | 2019-09-10T04:02:19.000Z | 2020-07-24T07:46:08.000Z | icv/data/core/segmap.py | dmxj/icv | 0b074ec9475f2c70038d2e8b7166414fd5b93e61 | [
"MIT"
] | null | null | null | icv/data/core/segmap.py | dmxj/icv | 0b074ec9475f2c70038d2e8b7166414fd5b93e61 | [
"MIT"
] | 1 | 2020-03-20T03:44:04.000Z | 2020-03-20T03:44:04.000Z | # -*- coding: UTF-8 -*-
import numpy as np
class Segmap(object):
DEFAULT_SEGMENT_COLORS = [
(0, 0, 0), # black
(230, 25, 75), # red
(60, 180, 75), # green
(255, 225, 25), # yellow
(0, 130, 200), # blue
(245, 130, 48), # orange
(145, 30, 180), # purple
(70, 240, 240), # cyan
(240, 50, 230), # magenta
(210, 245, 60), # lime
(250, 190, 190), # pink
(0, 128, 128), # teal
(230, 190, 255), # lavender
(170, 110, 40), # brown
(255, 250, 200), # beige
(128, 0, 0), # maroon
(170, 255, 195), # mint
(128, 128, 0), # olive
(255, 215, 180), # coral
(0, 0, 128), # navy
(128, 128, 128), # grey
(255, 255, 255), # white
# --
(115, 12, 37), # dark red
(30, 90, 37), # dark green
(127, 112, 12), # dark yellow
(0, 65, 100), # dark blue
(122, 65, 24), # dark orange
(72, 15, 90), # dark purple
(35, 120, 120), # dark cyan
(120, 25, 115), # dark magenta
(105, 122, 30), # dark lime
(125, 95, 95), # dark pink
(0, 64, 64), # dark teal
(115, 95, 127), # dark lavender
(85, 55, 20), # dark brown
(127, 125, 100), # dark beige
(64, 0, 0), # dark maroon
(85, 127, 97), # dark mint
(64, 64, 0), # dark olive
(127, 107, 90), # dark coral
(0, 0, 64), # dark navy
(64, 64, 64), # dark grey
]
def __init__(self,arr):
pass
| 28.245614 | 40 | 0.418634 |
import numpy as np
class Segmap(object):
DEFAULT_SEGMENT_COLORS = [
(0, 0, 0),
(230, 25, 75),
(60, 180, 75),
(255, 225, 25),
(0, 130, 200),
(245, 130, 48),
(145, 30, 180),
(70, 240, 240),
(240, 50, 230),
(210, 245, 60),
(250, 190, 190),
(0, 128, 128),
(230, 190, 255),
(170, 110, 40),
(255, 250, 200),
(128, 0, 0),
(170, 255, 195),
(128, 128, 0),
(255, 215, 180),
(0, 0, 128),
(128, 128, 128),
(255, 255, 255),
(115, 12, 37),
(30, 90, 37),
(127, 112, 12),
(0, 65, 100),
(122, 65, 24),
(72, 15, 90),
(35, 120, 120),
(120, 25, 115),
(105, 122, 30),
(125, 95, 95),
(0, 64, 64),
(115, 95, 127),
(85, 55, 20),
(127, 125, 100),
(64, 0, 0),
(85, 127, 97),
(64, 64, 0),
(127, 107, 90),
(0, 0, 64),
(64, 64, 64),
]
def __init__(self,arr):
pass
| true | true |
1c37d4c8dca32f72c89cbd0b0de7d852eb42ca60 | 10,464 | py | Python | tensor2tensor/mesh_tensorflow/mtf_layers_test.py | ReDeiPirati/tensor2tensor | 39f44893b82a5052c9eddba760fc4094d3d706bb | [
"Apache-2.0"
] | 4 | 2019-04-20T23:28:41.000Z | 2021-01-03T03:21:43.000Z | tensor2tensor/mesh_tensorflow/mtf_layers_test.py | ReDeiPirati/tensor2tensor | 39f44893b82a5052c9eddba760fc4094d3d706bb | [
"Apache-2.0"
] | null | null | null | tensor2tensor/mesh_tensorflow/mtf_layers_test.py | ReDeiPirati/tensor2tensor | 39f44893b82a5052c9eddba760fc4094d3d706bb | [
"Apache-2.0"
] | 1 | 2019-01-29T18:44:17.000Z | 2019-01-29T18:44:17.000Z | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Mesh TensorFlow layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensor2tensor.layers import common_layers
from tensor2tensor.mesh_tensorflow import mesh_tensorflow as mtf
from tensor2tensor.mesh_tensorflow import mtf_layers
from tensor2tensor.mesh_tensorflow import placement_mesh_impl
import tensorflow as tf
class MtfLayersTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(4, True),
(8, False),
)
def testDense(self, units, use_bias):
batch = 2
channels = 3
inputs = tf.random_normal([batch, channels])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
channels_dim = mtf.Dimension("channels", channels)
depth_dim = mtf.Dimension("depth", units)
mtf_inputs = mtf.import_tf_tensor(
mesh, inputs, shape=mtf.Shape([batch_dim, channels_dim]))
mtf_outputs = mtf_layers.dense(mtf_inputs,
output_dim=depth_dim,
reduced_dims=[channels_dim],
activation=mtf.relu,
use_bias=use_bias)
mesh_impl = placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
expected_outputs = tf.keras.layers.Dense(units=units,
activation=tf.nn.relu,
use_bias=use_bias)(inputs)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
sess.run(tf_group)
actual, expected = sess.run([actual_outputs, expected_outputs])
self.assertEqual(actual.shape, expected.shape)
def testLayerNorm(self):
batch = 2
channels = 3
inputs = tf.random_normal([batch, channels])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
channels_dim = mtf.Dimension("channels", channels)
mtf_inputs = mtf.import_tf_tensor(
mesh, inputs, shape=mtf.Shape([batch_dim, channels_dim]))
mtf_outputs = mtf_layers.layer_norm(mtf_inputs,
dim=channels_dim)
mesh_impl = placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
expected_outputs = common_layers.layer_norm(inputs)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
sess.run(tf_group)
actual, expected = sess.run([actual_outputs, expected_outputs])
self.assertEqual(actual.shape, expected.shape)
def testWeightsNonzero(self):
inputs = tf.constant([[3, 1, 0], [1, 0, 0]])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", inputs.shape.as_list()[0])
channels_dim = mtf.Dimension("channels", inputs.shape.as_list()[1])
mtf_inputs = mtf.import_tf_tensor(
mesh, inputs, shape=mtf.Shape([batch_dim, channels_dim]))
mtf_outputs = mtf_layers.weights_nonzero(mtf_inputs)
mesh_impl = placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
expected_outputs = common_layers.weights_nonzero(inputs)
tf_group = lowering.copy_masters_to_slices()
with self.test_session() as sess:
sess.run(tf_group)
actual, expected = sess.run([actual_outputs, expected_outputs])
self.assertAllEqual(actual, expected)
def testDenseReluDense(self):
batch = 2
channels = 3
hidden = 5
inputs = tf.random_normal([batch, channels])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
channels_dim = mtf.Dimension("channels", channels)
hidden_dim = mtf.Dimension("hidden", hidden)
mtf_inputs = mtf.import_tf_tensor(
mesh, inputs, shape=mtf.Shape([batch_dim, channels_dim]))
mtf_outputs = mtf_layers.dense_relu_dense(mtf_inputs,
hidden_channels=hidden_dim)
mesh_impl = placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
sess.run(tf_group)
actual = sess.run(actual_outputs)
self.assertEqual(actual.shape, inputs.shape)
@parameterized.parameters(
(4, 2),
)
def testMaskedLocalAttention1D(self, kv_channels, heads):
batch = 2
length_q = 16
length_m = 16
channels = 3
query = tf.random_normal([batch, length_q, channels])
memory = tf.random_normal([batch, length_m, channels])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
length_q_dim = mtf.Dimension("length_q", length_q)
length_m_dim = mtf.Dimension("length_m", length_m)
channels_dim = mtf.Dimension("channels", channels)
kv_channels_dim = mtf.Dimension("kv_channels", kv_channels)
heads_dim = mtf.Dimension("heads", heads)
mtf_query = mtf.import_tf_tensor(
mesh, query,
shape=mtf.Shape([batch_dim, length_q_dim, channels_dim]))
mtf_memory = mtf.import_tf_tensor(
mesh, memory,
shape=mtf.Shape([batch_dim, length_m_dim, channels_dim]))
mtf_outputs = mtf_layers.masked_local_attention_1d(
mtf_query,
mtf_memory,
kv_channels=kv_channels_dim,
heads=heads_dim,
block_length=2)
mesh_impl = placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
sess.run(tf_group)
actual = sess.run(actual_outputs)
self.assertEqual(actual.shape, (batch, length_q, channels))
@parameterized.parameters(
(2, 4, 5, 7, 3, 1),
)
def testDotProductAttention(
self, batch, heads, length_q, length_kv, depth_k, depth_v):
query = tf.random_normal([batch, heads, length_q, depth_k])
key = tf.random_normal([batch, heads, length_kv, depth_k])
value = tf.random_normal([batch, heads, length_kv, depth_v])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
heads_dim = mtf.Dimension("heads", heads)
length_q_dim = mtf.Dimension("length_q", length_q)
length_kv_dim = mtf.Dimension("length_kv", length_kv)
depth_k_dim = mtf.Dimension("depth_k", depth_k)
depth_v_dim = mtf.Dimension("depth_v", depth_v)
mtf_query = mtf.import_tf_tensor(
mesh, query,
shape=mtf.Shape(
[batch_dim, heads_dim, length_q_dim, depth_k_dim]))
mtf_key = mtf.import_tf_tensor(
mesh, key,
shape=mtf.Shape(
[batch_dim, heads_dim, length_kv_dim, depth_k_dim]))
mtf_value = mtf.import_tf_tensor(
mesh, value,
shape=mtf.Shape(
[batch_dim, heads_dim, length_kv_dim, depth_v_dim]))
mtf_outputs = mtf_layers.dot_product_attention(
mtf_query,
mtf_key,
mtf_value,
mask=None)
mesh_impl = placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
sess.run(tf_group)
actual = sess.run(actual_outputs)
self.assertEqual(actual.shape, (batch, heads, length_q, depth_v))
@parameterized.parameters(
(16, 4),
(32, 8),
)
def testMultiheadAttention(self, kv_channels, heads):
batch = 2
length = 8
channels = 3
query = tf.random_normal([batch, length, channels])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
length_dim = mtf.Dimension("length", length)
channels_dim = mtf.Dimension("channels", channels)
kv_channels_dim = mtf.Dimension("kv_channels", kv_channels)
heads_dim = mtf.Dimension("heads", heads)
mtf_query = mtf.import_tf_tensor(
mesh, query,
shape=mtf.Shape([batch_dim, length_dim, channels_dim]))
mtf_outputs = mtf_layers.multihead_attention(
mtf_query,
memory_antecedent=None,
mask=None,
kv_channels=kv_channels_dim,
heads=heads_dim)
mesh_impl = placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
sess.run(tf_group)
actual = sess.run(actual_outputs)
self.assertEqual(actual.shape, query.shape)
if __name__ == "__main__":
tf.test.main()
| 35.713311 | 74 | 0.676223 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensor2tensor.layers import common_layers
from tensor2tensor.mesh_tensorflow import mesh_tensorflow as mtf
from tensor2tensor.mesh_tensorflow import mtf_layers
from tensor2tensor.mesh_tensorflow import placement_mesh_impl
import tensorflow as tf
class MtfLayersTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(4, True),
(8, False),
)
def testDense(self, units, use_bias):
batch = 2
channels = 3
inputs = tf.random_normal([batch, channels])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
channels_dim = mtf.Dimension("channels", channels)
depth_dim = mtf.Dimension("depth", units)
mtf_inputs = mtf.import_tf_tensor(
mesh, inputs, shape=mtf.Shape([batch_dim, channels_dim]))
mtf_outputs = mtf_layers.dense(mtf_inputs,
output_dim=depth_dim,
reduced_dims=[channels_dim],
activation=mtf.relu,
use_bias=use_bias)
mesh_impl = placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
expected_outputs = tf.keras.layers.Dense(units=units,
activation=tf.nn.relu,
use_bias=use_bias)(inputs)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
sess.run(tf_group)
actual, expected = sess.run([actual_outputs, expected_outputs])
self.assertEqual(actual.shape, expected.shape)
def testLayerNorm(self):
batch = 2
channels = 3
inputs = tf.random_normal([batch, channels])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
channels_dim = mtf.Dimension("channels", channels)
mtf_inputs = mtf.import_tf_tensor(
mesh, inputs, shape=mtf.Shape([batch_dim, channels_dim]))
mtf_outputs = mtf_layers.layer_norm(mtf_inputs,
dim=channels_dim)
mesh_impl = placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
expected_outputs = common_layers.layer_norm(inputs)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
sess.run(tf_group)
actual, expected = sess.run([actual_outputs, expected_outputs])
self.assertEqual(actual.shape, expected.shape)
def testWeightsNonzero(self):
inputs = tf.constant([[3, 1, 0], [1, 0, 0]])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", inputs.shape.as_list()[0])
channels_dim = mtf.Dimension("channels", inputs.shape.as_list()[1])
mtf_inputs = mtf.import_tf_tensor(
mesh, inputs, shape=mtf.Shape([batch_dim, channels_dim]))
mtf_outputs = mtf_layers.weights_nonzero(mtf_inputs)
mesh_impl = placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
expected_outputs = common_layers.weights_nonzero(inputs)
tf_group = lowering.copy_masters_to_slices()
with self.test_session() as sess:
sess.run(tf_group)
actual, expected = sess.run([actual_outputs, expected_outputs])
self.assertAllEqual(actual, expected)
def testDenseReluDense(self):
batch = 2
channels = 3
hidden = 5
inputs = tf.random_normal([batch, channels])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
channels_dim = mtf.Dimension("channels", channels)
hidden_dim = mtf.Dimension("hidden", hidden)
mtf_inputs = mtf.import_tf_tensor(
mesh, inputs, shape=mtf.Shape([batch_dim, channels_dim]))
mtf_outputs = mtf_layers.dense_relu_dense(mtf_inputs,
hidden_channels=hidden_dim)
mesh_impl = placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
sess.run(tf_group)
actual = sess.run(actual_outputs)
self.assertEqual(actual.shape, inputs.shape)
@parameterized.parameters(
(4, 2),
)
def testMaskedLocalAttention1D(self, kv_channels, heads):
batch = 2
length_q = 16
length_m = 16
channels = 3
query = tf.random_normal([batch, length_q, channels])
memory = tf.random_normal([batch, length_m, channels])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
length_q_dim = mtf.Dimension("length_q", length_q)
length_m_dim = mtf.Dimension("length_m", length_m)
channels_dim = mtf.Dimension("channels", channels)
kv_channels_dim = mtf.Dimension("kv_channels", kv_channels)
heads_dim = mtf.Dimension("heads", heads)
mtf_query = mtf.import_tf_tensor(
mesh, query,
shape=mtf.Shape([batch_dim, length_q_dim, channels_dim]))
mtf_memory = mtf.import_tf_tensor(
mesh, memory,
shape=mtf.Shape([batch_dim, length_m_dim, channels_dim]))
mtf_outputs = mtf_layers.masked_local_attention_1d(
mtf_query,
mtf_memory,
kv_channels=kv_channels_dim,
heads=heads_dim,
block_length=2)
mesh_impl = placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
sess.run(tf_group)
actual = sess.run(actual_outputs)
self.assertEqual(actual.shape, (batch, length_q, channels))
@parameterized.parameters(
(2, 4, 5, 7, 3, 1),
)
def testDotProductAttention(
self, batch, heads, length_q, length_kv, depth_k, depth_v):
query = tf.random_normal([batch, heads, length_q, depth_k])
key = tf.random_normal([batch, heads, length_kv, depth_k])
value = tf.random_normal([batch, heads, length_kv, depth_v])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
heads_dim = mtf.Dimension("heads", heads)
length_q_dim = mtf.Dimension("length_q", length_q)
length_kv_dim = mtf.Dimension("length_kv", length_kv)
depth_k_dim = mtf.Dimension("depth_k", depth_k)
depth_v_dim = mtf.Dimension("depth_v", depth_v)
mtf_query = mtf.import_tf_tensor(
mesh, query,
shape=mtf.Shape(
[batch_dim, heads_dim, length_q_dim, depth_k_dim]))
mtf_key = mtf.import_tf_tensor(
mesh, key,
shape=mtf.Shape(
[batch_dim, heads_dim, length_kv_dim, depth_k_dim]))
mtf_value = mtf.import_tf_tensor(
mesh, value,
shape=mtf.Shape(
[batch_dim, heads_dim, length_kv_dim, depth_v_dim]))
mtf_outputs = mtf_layers.dot_product_attention(
mtf_query,
mtf_key,
mtf_value,
mask=None)
mesh_impl = placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
sess.run(tf_group)
actual = sess.run(actual_outputs)
self.assertEqual(actual.shape, (batch, heads, length_q, depth_v))
@parameterized.parameters(
(16, 4),
(32, 8),
)
def testMultiheadAttention(self, kv_channels, heads):
batch = 2
length = 8
channels = 3
query = tf.random_normal([batch, length, channels])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
length_dim = mtf.Dimension("length", length)
channels_dim = mtf.Dimension("channels", channels)
kv_channels_dim = mtf.Dimension("kv_channels", kv_channels)
heads_dim = mtf.Dimension("heads", heads)
mtf_query = mtf.import_tf_tensor(
mesh, query,
shape=mtf.Shape([batch_dim, length_dim, channels_dim]))
mtf_outputs = mtf_layers.multihead_attention(
mtf_query,
memory_antecedent=None,
mask=None,
kv_channels=kv_channels_dim,
heads=heads_dim)
mesh_impl = placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
sess.run(tf_group)
actual = sess.run(actual_outputs)
self.assertEqual(actual.shape, query.shape)
if __name__ == "__main__":
tf.test.main()
| true | true |
1c37d5112ebf0b75c7c97a7889e7af24572cb0ae | 13,851 | py | Python | examples/domain_adaptation/digits/mdd.py | wuaodi/Transfer-Learning-Library | 29a946143e63b66a1da9ffa685bfe95f5640028a | [
"MIT"
] | 1 | 2021-04-08T00:13:13.000Z | 2021-04-08T00:13:13.000Z | examples/domain_adaptation/digits/mdd.py | wuaodi/Transfer-Learning-Library | 29a946143e63b66a1da9ffa685bfe95f5640028a | [
"MIT"
] | null | null | null | examples/domain_adaptation/digits/mdd.py | wuaodi/Transfer-Learning-Library | 29a946143e63b66a1da9ffa685bfe95f5640028a | [
"MIT"
] | null | null | null | import random
import time
import warnings
import sys
import argparse
import shutil
import os.path as osp
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.optim import Adam
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
import torchvision.transforms as T
import torch.nn.functional as F
sys.path.append('../../..')
from dalib.adaptation.mdd import ClassificationMarginDisparityDiscrepancy\
as MarginDisparityDiscrepancy, GeneralModule
import common.vision.datasets.digits as datasets
import common.vision.models.digits as models
from common.vision.transforms import ResizeImage
from common.utils.data import ForeverDataIterator
from common.utils.metric import accuracy, ConfusionMatrix
from common.utils.meter import AverageMeter, ProgressMeter
from common.utils.logger import CompleteLogger
from common.utils.analysis import collect_feature, tsne, a_distance
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(args: argparse.Namespace):
logger = CompleteLogger(args.log, args.phase)
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
cudnn.benchmark = True
# Data loading code
if args.num_channels == 3:
mode = 'RGB'
mean = std = [0.5, 0.5, 0.5]
else:
mode = 'L'
mean = std = [0.5, ]
normalize = T.Normalize(mean=mean, std=std)
train_transform = T.Compose([
ResizeImage(args.image_size),
# T.RandomRotation(10), # TODO need results
T.ToTensor(),
normalize
])
val_transform = T.Compose([
ResizeImage(args.image_size),
T.ToTensor(),
normalize
])
source_dataset = datasets.__dict__[args.source]
train_source_dataset = source_dataset(root=args.source_root, mode=mode, download=True, transform=train_transform)
train_source_loader = DataLoader(train_source_dataset, batch_size=args.batch_size,
shuffle=True, num_workers=args.workers, drop_last=True)
target_dataset = datasets.__dict__[args.target]
train_target_dataset = target_dataset(root=args.target_root, mode=mode, download=True, transform=train_transform)
train_target_loader = DataLoader(train_target_dataset, batch_size=args.batch_size,
shuffle=True, num_workers=args.workers, drop_last=True)
val_dataset = target_dataset(root=args.target_root, mode=mode, split='test', download=True, transform=val_transform)
val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)
train_source_iter = ForeverDataIterator(train_source_loader)
train_target_iter = ForeverDataIterator(train_target_loader)
# create model
print("=> using pre-trained model '{}'".format(args.arch))
arch = models.__dict__[args.arch]()
bottleneck = nn.Sequential(
nn.Flatten(),
nn.Linear(arch.bottleneck_dim, arch.bottleneck_dim),
nn.BatchNorm1d(arch.bottleneck_dim),
nn.ReLU(),
nn.Dropout(0.5)
)
head = arch.head()
adv_head = arch.head()
classifier = GeneralModule(arch.backbone(), arch.num_classes, bottleneck,
head, adv_head, finetune=False)
mdd = MarginDisparityDiscrepancy(args.margin).to(device)
# define optimizer and lr scheduler
optimizer = Adam(classifier.get_parameters(), args.lr, betas=args.betas, weight_decay=args.wd)
lr_scheduler = LambdaLR(optimizer, lambda x: args.lr * (1. + args.lr_gamma * float(x)) ** (-args.lr_decay))
# resume from the best checkpoint
if args.phase != 'train':
checkpoint = torch.load(logger.get_checkpoint_path('best'), map_location='cpu')
classifier.load_state_dict(checkpoint)
# analysis the model
if args.phase == 'analysis':
# extract features from both domains
feature_extractor = torch.nn.Sequential(classifier.backbone, classifier.bottleneck).to(device)
source_feature = collect_feature(train_source_loader, feature_extractor, device, 10)
target_feature = collect_feature(val_loader, feature_extractor, device, 10)
# plot t-SNE
tSNE_filename = osp.join(logger.visualize_directory, 'TSNE.png')
tsne.visualize(source_feature, target_feature, tSNE_filename)
print("Saving t-SNE to", tSNE_filename)
# calculate A-distance, which is a measure for distribution discrepancy
A_distance = a_distance.calculate(source_feature, target_feature, device)
print("A-distance =", A_distance)
return
if args.phase == 'test':
acc1 = validate(val_loader, classifier, args)
print(acc1)
return
# start training
best_acc1 = 0.
for epoch in range(args.epochs):
print(lr_scheduler.get_lr())
# train for one epoch
train(train_source_iter, train_target_iter, classifier, mdd, optimizer,
lr_scheduler, epoch, args)
# evaluate on validation set
acc1 = validate(val_loader, classifier, args)
# remember best acc@1 and save checkpoint
torch.save(classifier.state_dict(), logger.get_checkpoint_path('latest'))
if acc1 > best_acc1:
shutil.copy(logger.get_checkpoint_path('latest'), logger.get_checkpoint_path('best'))
best_acc1 = max(acc1, best_acc1)
print("best_acc1 = {:3.1f}".format(best_acc1))
logger.close()
def train(train_source_iter: ForeverDataIterator, train_target_iter: ForeverDataIterator,
model, mdd: MarginDisparityDiscrepancy,
optimizer: Adam, lr_scheduler: LambdaLR, epoch: int, args: argparse.Namespace):
batch_time = AverageMeter('Time', ':4.2f')
data_time = AverageMeter('Data', ':3.1f')
losses = AverageMeter('Loss', ':3.2f')
trans_losses = AverageMeter('Trans Loss', ':3.2f')
cls_accs = AverageMeter('Cls Acc', ':3.1f')
tgt_accs = AverageMeter('Tgt Acc', ':3.1f')
cls_adv_accs = AverageMeter('Cls Adv Acc', ':3.1f')
tgt_adv_accs = AverageMeter('Tgt Adv Acc', ':3.1f')
progress = ProgressMeter(
args.iters_per_epoch,
[batch_time, data_time, losses, trans_losses, cls_accs, tgt_accs, cls_adv_accs, tgt_adv_accs],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
mdd.train()
end = time.time()
for i in range(args.iters_per_epoch):
x_s, labels_s = next(train_source_iter)
x_t, labels_t = next(train_target_iter)
x_s = x_s.to(device)
x_t = x_t.to(device)
labels_s = labels_s.to(device)
labels_t = labels_t.to(device)
# measure data loading time
data_time.update(time.time() - end)
# compute output
x = torch.cat((x_s, x_t), dim=0)
outputs, outputs_adv = model(x)
y_s, y_t = outputs.chunk(2, dim=0)
y_s_adv, y_t_adv = outputs_adv.chunk(2, dim=0)
# compute cross entropy loss on source domain
cls_loss = F.cross_entropy(y_s, labels_s)
# compute margin disparity discrepancy between domains
# for adversarial classifier, minimize negative mdd is equal to maximize mdd
transfer_loss = -mdd(y_s, y_s_adv, y_t, y_t_adv)
loss = cls_loss + transfer_loss * args.trade_off
model.step()
cls_acc = accuracy(y_s, labels_s)[0]
tgt_acc = accuracy(y_t, labels_t)[0]
cls_adv_acc = accuracy(y_s_adv, labels_s)[0]
tgt_adv_acc = accuracy(y_t_adv, labels_t)[0]
losses.update(loss.item(), x_s.size(0))
cls_accs.update(cls_acc.item(), x_s.size(0))
tgt_accs.update(tgt_acc.item(), x_t.size(0))
cls_adv_accs.update(cls_adv_acc.item(), x_s.size(0))
tgt_adv_accs.update(tgt_adv_acc.item(), x_t.size(0))
trans_losses.update(transfer_loss.item(), x_s.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# lr_scheduler.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
def validate(val_loader: DataLoader, model, args: argparse.Namespace) -> float:
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
if args.per_class_eval:
classes = val_loader.dataset.classes
confmat = ConfusionMatrix(len(classes))
else:
confmat = None
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
images = images.to(device)
target = target.to(device)
# compute output
output, _ = model(images)
loss = F.cross_entropy(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
if confmat:
confmat.update(target, output.argmax(1))
losses.update(loss.item(), images.size(0))
top1.update(acc1.item(), images.size(0))
top5.update(acc5.item(), images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
if confmat:
print(confmat.format(classes))
return top1.avg
if __name__ == '__main__':
architecture_names = sorted(
name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name])
)
dataset_names = sorted(
name for name in datasets.__dict__
if not name.startswith("__") and callable(datasets.__dict__[name])
)
parser = argparse.ArgumentParser(description='Source Only for Unsupervised Domain Adaptation')
# dataset parameters
parser.add_argument('source_root', help='root path of the source dataset')
parser.add_argument('target_root', help='root path of the target dataset')
parser.add_argument('-s', '--source', help='source domain(s)')
parser.add_argument('-t', '--target', help='target domain(s)')
parser.add_argument('--image-size', type=int, default=28,
help='the size of input image')
parser.add_argument('--num-channels', default=1, choices=[1, 3],
type=int, help='the number of image channels')
# model parameters
parser.add_argument('-a', '--arch', metavar='ARCH', default='lenet',
choices=architecture_names,
help='backbone architecture: ' +
' | '.join(architecture_names) +
' (default: lenet)')
parser.add_argument('--margin', type=float, default=4., help="margin gamma")
parser.add_argument('--trade-off', default=1., type=float,
help='the trade-off hyper-parameter for transfer loss')
# training parameters
parser.add_argument('-b', '--batch-size', default=128, type=int,
metavar='N',
help='mini-batch size (default: 32)')
parser.add_argument('--lr', '--learning-rate', default=1e-4, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--lr-gamma', default=0.0002, type=float)
parser.add_argument('--lr-decay', default=0.75, type=float, help='parameter for lr scheduler')
parser.add_argument('--betas', default=(0.9, 0.999), nargs='+', help='betas')
parser.add_argument('--wd', '--weight-decay', default=0.0, type=float,
metavar='W', help='weight decay (default: 5e-4)')
parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=100, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('-i', '--iters-per-epoch', default=500, type=int,
help='Number of iterations per epoch')
parser.add_argument('-p', '--print-freq', default=100, type=int,
metavar='N', help='print frequency (default: 100)')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--per-class-eval', action='store_true',
help='whether output per-class accuracy during evaluation')
parser.add_argument("--log", type=str, default='mdd',
help="Where to save logs, checkpoints and debugging images.")
parser.add_argument("--phase", type=str, default='train', choices=['train', 'test', 'analysis'],
help="When phase is 'test', only test the model."
"When phase is 'analysis', only analysis the model.")
args = parser.parse_args()
print(args)
main(args)
| 41.10089 | 120 | 0.637571 | import random
import time
import warnings
import sys
import argparse
import shutil
import os.path as osp
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.optim import Adam
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
import torchvision.transforms as T
import torch.nn.functional as F
sys.path.append('../../..')
from dalib.adaptation.mdd import ClassificationMarginDisparityDiscrepancy\
as MarginDisparityDiscrepancy, GeneralModule
import common.vision.datasets.digits as datasets
import common.vision.models.digits as models
from common.vision.transforms import ResizeImage
from common.utils.data import ForeverDataIterator
from common.utils.metric import accuracy, ConfusionMatrix
from common.utils.meter import AverageMeter, ProgressMeter
from common.utils.logger import CompleteLogger
from common.utils.analysis import collect_feature, tsne, a_distance
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(args: argparse.Namespace):
logger = CompleteLogger(args.log, args.phase)
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
cudnn.benchmark = True
if args.num_channels == 3:
mode = 'RGB'
mean = std = [0.5, 0.5, 0.5]
else:
mode = 'L'
mean = std = [0.5, ]
normalize = T.Normalize(mean=mean, std=std)
train_transform = T.Compose([
ResizeImage(args.image_size),
(),
normalize
])
val_transform = T.Compose([
ResizeImage(args.image_size),
T.ToTensor(),
normalize
])
source_dataset = datasets.__dict__[args.source]
train_source_dataset = source_dataset(root=args.source_root, mode=mode, download=True, transform=train_transform)
train_source_loader = DataLoader(train_source_dataset, batch_size=args.batch_size,
shuffle=True, num_workers=args.workers, drop_last=True)
target_dataset = datasets.__dict__[args.target]
train_target_dataset = target_dataset(root=args.target_root, mode=mode, download=True, transform=train_transform)
train_target_loader = DataLoader(train_target_dataset, batch_size=args.batch_size,
shuffle=True, num_workers=args.workers, drop_last=True)
val_dataset = target_dataset(root=args.target_root, mode=mode, split='test', download=True, transform=val_transform)
val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)
train_source_iter = ForeverDataIterator(train_source_loader)
train_target_iter = ForeverDataIterator(train_target_loader)
print("=> using pre-trained model '{}'".format(args.arch))
arch = models.__dict__[args.arch]()
bottleneck = nn.Sequential(
nn.Flatten(),
nn.Linear(arch.bottleneck_dim, arch.bottleneck_dim),
nn.BatchNorm1d(arch.bottleneck_dim),
nn.ReLU(),
nn.Dropout(0.5)
)
head = arch.head()
adv_head = arch.head()
classifier = GeneralModule(arch.backbone(), arch.num_classes, bottleneck,
head, adv_head, finetune=False)
mdd = MarginDisparityDiscrepancy(args.margin).to(device)
optimizer = Adam(classifier.get_parameters(), args.lr, betas=args.betas, weight_decay=args.wd)
lr_scheduler = LambdaLR(optimizer, lambda x: args.lr * (1. + args.lr_gamma * float(x)) ** (-args.lr_decay))
if args.phase != 'train':
checkpoint = torch.load(logger.get_checkpoint_path('best'), map_location='cpu')
classifier.load_state_dict(checkpoint)
if args.phase == 'analysis':
feature_extractor = torch.nn.Sequential(classifier.backbone, classifier.bottleneck).to(device)
source_feature = collect_feature(train_source_loader, feature_extractor, device, 10)
target_feature = collect_feature(val_loader, feature_extractor, device, 10)
tSNE_filename = osp.join(logger.visualize_directory, 'TSNE.png')
tsne.visualize(source_feature, target_feature, tSNE_filename)
print("Saving t-SNE to", tSNE_filename)
A_distance = a_distance.calculate(source_feature, target_feature, device)
print("A-distance =", A_distance)
return
if args.phase == 'test':
acc1 = validate(val_loader, classifier, args)
print(acc1)
return
best_acc1 = 0.
for epoch in range(args.epochs):
print(lr_scheduler.get_lr())
train(train_source_iter, train_target_iter, classifier, mdd, optimizer,
lr_scheduler, epoch, args)
acc1 = validate(val_loader, classifier, args)
torch.save(classifier.state_dict(), logger.get_checkpoint_path('latest'))
if acc1 > best_acc1:
shutil.copy(logger.get_checkpoint_path('latest'), logger.get_checkpoint_path('best'))
best_acc1 = max(acc1, best_acc1)
print("best_acc1 = {:3.1f}".format(best_acc1))
logger.close()
def train(train_source_iter: ForeverDataIterator, train_target_iter: ForeverDataIterator,
model, mdd: MarginDisparityDiscrepancy,
optimizer: Adam, lr_scheduler: LambdaLR, epoch: int, args: argparse.Namespace):
batch_time = AverageMeter('Time', ':4.2f')
data_time = AverageMeter('Data', ':3.1f')
losses = AverageMeter('Loss', ':3.2f')
trans_losses = AverageMeter('Trans Loss', ':3.2f')
cls_accs = AverageMeter('Cls Acc', ':3.1f')
tgt_accs = AverageMeter('Tgt Acc', ':3.1f')
cls_adv_accs = AverageMeter('Cls Adv Acc', ':3.1f')
tgt_adv_accs = AverageMeter('Tgt Adv Acc', ':3.1f')
progress = ProgressMeter(
args.iters_per_epoch,
[batch_time, data_time, losses, trans_losses, cls_accs, tgt_accs, cls_adv_accs, tgt_adv_accs],
prefix="Epoch: [{}]".format(epoch))
model.train()
mdd.train()
end = time.time()
for i in range(args.iters_per_epoch):
x_s, labels_s = next(train_source_iter)
x_t, labels_t = next(train_target_iter)
x_s = x_s.to(device)
x_t = x_t.to(device)
labels_s = labels_s.to(device)
labels_t = labels_t.to(device)
data_time.update(time.time() - end)
x = torch.cat((x_s, x_t), dim=0)
outputs, outputs_adv = model(x)
y_s, y_t = outputs.chunk(2, dim=0)
y_s_adv, y_t_adv = outputs_adv.chunk(2, dim=0)
cls_loss = F.cross_entropy(y_s, labels_s)
transfer_loss = -mdd(y_s, y_s_adv, y_t, y_t_adv)
loss = cls_loss + transfer_loss * args.trade_off
model.step()
cls_acc = accuracy(y_s, labels_s)[0]
tgt_acc = accuracy(y_t, labels_t)[0]
cls_adv_acc = accuracy(y_s_adv, labels_s)[0]
tgt_adv_acc = accuracy(y_t_adv, labels_t)[0]
losses.update(loss.item(), x_s.size(0))
cls_accs.update(cls_acc.item(), x_s.size(0))
tgt_accs.update(tgt_acc.item(), x_t.size(0))
cls_adv_accs.update(cls_adv_acc.item(), x_s.size(0))
tgt_adv_accs.update(tgt_adv_acc.item(), x_t.size(0))
trans_losses.update(transfer_loss.item(), x_s.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
def validate(val_loader: DataLoader, model, args: argparse.Namespace) -> float:
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
model.eval()
if args.per_class_eval:
classes = val_loader.dataset.classes
confmat = ConfusionMatrix(len(classes))
else:
confmat = None
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
images = images.to(device)
target = target.to(device)
output, _ = model(images)
loss = F.cross_entropy(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
if confmat:
confmat.update(target, output.argmax(1))
losses.update(loss.item(), images.size(0))
top1.update(acc1.item(), images.size(0))
top5.update(acc5.item(), images.size(0))
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
if confmat:
print(confmat.format(classes))
return top1.avg
if __name__ == '__main__':
architecture_names = sorted(
name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name])
)
dataset_names = sorted(
name for name in datasets.__dict__
if not name.startswith("__") and callable(datasets.__dict__[name])
)
parser = argparse.ArgumentParser(description='Source Only for Unsupervised Domain Adaptation')
parser.add_argument('source_root', help='root path of the source dataset')
parser.add_argument('target_root', help='root path of the target dataset')
parser.add_argument('-s', '--source', help='source domain(s)')
parser.add_argument('-t', '--target', help='target domain(s)')
parser.add_argument('--image-size', type=int, default=28,
help='the size of input image')
parser.add_argument('--num-channels', default=1, choices=[1, 3],
type=int, help='the number of image channels')
parser.add_argument('-a', '--arch', metavar='ARCH', default='lenet',
choices=architecture_names,
help='backbone architecture: ' +
' | '.join(architecture_names) +
' (default: lenet)')
parser.add_argument('--margin', type=float, default=4., help="margin gamma")
parser.add_argument('--trade-off', default=1., type=float,
help='the trade-off hyper-parameter for transfer loss')
parser.add_argument('-b', '--batch-size', default=128, type=int,
metavar='N',
help='mini-batch size (default: 32)')
parser.add_argument('--lr', '--learning-rate', default=1e-4, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--lr-gamma', default=0.0002, type=float)
parser.add_argument('--lr-decay', default=0.75, type=float, help='parameter for lr scheduler')
parser.add_argument('--betas', default=(0.9, 0.999), nargs='+', help='betas')
parser.add_argument('--wd', '--weight-decay', default=0.0, type=float,
metavar='W', help='weight decay (default: 5e-4)')
parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=100, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('-i', '--iters-per-epoch', default=500, type=int,
help='Number of iterations per epoch')
parser.add_argument('-p', '--print-freq', default=100, type=int,
metavar='N', help='print frequency (default: 100)')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--per-class-eval', action='store_true',
help='whether output per-class accuracy during evaluation')
parser.add_argument("--log", type=str, default='mdd',
help="Where to save logs, checkpoints and debugging images.")
parser.add_argument("--phase", type=str, default='train', choices=['train', 'test', 'analysis'],
help="When phase is 'test', only test the model."
"When phase is 'analysis', only analysis the model.")
args = parser.parse_args()
print(args)
main(args)
| true | true |
1c37d528349c0e2504d4cd5a8e297d8f7bf3ae0e | 894 | py | Python | lib/btc.py | PiDisplay/PiDisplay | b0365ef76e24e7661ba5dcae48dcbb7262c3a57a | [
"MIT"
] | 3 | 2021-06-01T18:51:04.000Z | 2021-06-02T00:40:09.000Z | lib/btc.py | PiDisplay/PiDisplay | b0365ef76e24e7661ba5dcae48dcbb7262c3a57a | [
"MIT"
] | null | null | null | lib/btc.py | PiDisplay/PiDisplay | b0365ef76e24e7661ba5dcae48dcbb7262c3a57a | [
"MIT"
] | 2 | 2021-06-01T19:07:24.000Z | 2021-06-01T19:34:00.000Z | # A few basic helper functions for interfacing with Bitcoin Core
from os import getenv
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
class BtcRPC:
def __init__(self):
btcurl = "http://%s:%s@%s:%s" % (getenv('BITCOIN_RPC_USER'), getenv(
'BITCOIN_RPC_PASS'), getenv('BITCOIN_IP'), getenv('BITCOIN_RPC_PORT'))
self.connection = AuthServiceProxy(btcurl)
def connection_locked(self):
try:
self.get_blockchain_info()
return True
except JSONRPCException:
return False
def get_blockchain_info(self):
response = self.connection.getblockchaininfo()
return response
def get_sync_progress(self):
response = self.connection.getblockchaininfo()
return response["verificationprogress"] * 100
def get_connection(self):
return self.connection
| 30.827586 | 82 | 0.674497 |
from os import getenv
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
class BtcRPC:
def __init__(self):
btcurl = "http://%s:%s@%s:%s" % (getenv('BITCOIN_RPC_USER'), getenv(
'BITCOIN_RPC_PASS'), getenv('BITCOIN_IP'), getenv('BITCOIN_RPC_PORT'))
self.connection = AuthServiceProxy(btcurl)
def connection_locked(self):
try:
self.get_blockchain_info()
return True
except JSONRPCException:
return False
def get_blockchain_info(self):
response = self.connection.getblockchaininfo()
return response
def get_sync_progress(self):
response = self.connection.getblockchaininfo()
return response["verificationprogress"] * 100
def get_connection(self):
return self.connection
| true | true |
1c37d6d656cce791bd21776a49639d19298b0aaa | 245 | py | Python | BI-IOS/semester-project/webapp/beecon/campaigns/libs/cinemas/DataSource.py | josefdolezal/fit-cvut | 6b6abea4232b946246d33290718d6c5007926b63 | [
"MIT"
] | 20 | 2016-05-15T10:39:53.000Z | 2022-03-29T00:06:06.000Z | BI-IOS/semester-project/webapp/beecon/campaigns/libs/cinemas/DataSource.py | josefdolezal/fit-cvut | 6b6abea4232b946246d33290718d6c5007926b63 | [
"MIT"
] | 3 | 2017-05-27T16:44:01.000Z | 2019-01-02T21:02:59.000Z | BI-IOS/semester-project/webapp/beecon/campaigns/libs/cinemas/DataSource.py | josefdolezal/fit-cvut | 6b6abea4232b946246d33290718d6c5007926b63 | [
"MIT"
] | 11 | 2018-08-22T21:16:32.000Z | 2021-04-10T22:42:34.000Z | import requests
class CinemaCity:
def __init__( self, url ):
self._compile_url( url )
def movie_schedule( self ):
content = requests.get( self.url ).content
return content
def _compile_url( self, url ):
self.url = url
| 16.333333 | 46 | 0.669388 | import requests
class CinemaCity:
def __init__( self, url ):
self._compile_url( url )
def movie_schedule( self ):
content = requests.get( self.url ).content
return content
def _compile_url( self, url ):
self.url = url
| true | true |
1c37d8ccc994b2bd1d945e8270c804c1e0702049 | 182 | py | Python | 0-notes/job-search/Cracking the Coding Interview/C14Databases/questions/14.6-question.py | eengineergz/Lambda | 1fe511f7ef550aed998b75c18a432abf6ab41c5f | [
"MIT"
] | null | null | null | 0-notes/job-search/Cracking the Coding Interview/C14Databases/questions/14.6-question.py | eengineergz/Lambda | 1fe511f7ef550aed998b75c18a432abf6ab41c5f | [
"MIT"
] | null | null | null | 0-notes/job-search/Cracking the Coding Interview/C14Databases/questions/14.6-question.py | eengineergz/Lambda | 1fe511f7ef550aed998b75c18a432abf6ab41c5f | [
"MIT"
] | null | null | null | # 14.6 Entity-Relationship Diagram
# Draw an entity-relationship diagram for database with companies, people, and professionals.
# professionals = people who work for companies
| 36.4 | 93 | 0.785714 | true | true | |
1c37da71d4bd83672d5725fe6e8e0b080dc05d5f | 20,444 | py | Python | monai/engines/evaluator.py | themantalope/MONAI | 9378e52b9c2283fa71cf8572b08f274071753053 | [
"Apache-2.0"
] | 3 | 2020-07-02T18:39:36.000Z | 2021-06-16T09:35:53.000Z | monai/engines/evaluator.py | themantalope/MONAI | 9378e52b9c2283fa71cf8572b08f274071753053 | [
"Apache-2.0"
] | 28 | 2020-06-26T12:47:52.000Z | 2020-09-08T00:33:42.000Z | monai/engines/evaluator.py | Nic-Ma/MONAI | f398298b5aadc076102261a687a158f6ac17ad1c | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from torch.utils.data import DataLoader
from monai.config import IgniteInfo
from monai.engines.utils import IterationEvents, default_metric_cmp_fn, default_prepare_batch
from monai.engines.workflow import Workflow
from monai.inferers import Inferer, SimpleInferer
from monai.networks.utils import eval_mode, train_mode
from monai.transforms import Transform
from monai.utils import ForwardMode, ensure_tuple, min_version, optional_import
from monai.utils.enums import CommonKeys as Keys
from monai.utils.module import look_up_option
if TYPE_CHECKING:
from ignite.engine import Engine, EventEnum
from ignite.metrics import Metric
else:
Engine, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Engine")
Metric, _ = optional_import("ignite.metrics", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Metric")
EventEnum, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "EventEnum")
__all__ = ["Evaluator", "SupervisedEvaluator", "EnsembleEvaluator"]
class Evaluator(Workflow):
"""
Base class for all kinds of evaluators, inherits from Workflow.
Args:
device: an object representing the device on which to run.
val_data_loader: Ignite engine use data_loader to run, must be Iterable or torch.DataLoader.
epoch_length: number of iterations for one epoch, default to `len(val_data_loader)`.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function to parse image and label for current iteration.
iteration_update: the callable function for every iteration, expect to accept `engine`
and `batchdata` as input parameters. if not provided, use `self._iteration()` instead.
postprocessing: execute additional transformation for the model output data.
Typically, several Tensor based transforms composed by `Compose`.
key_val_metric: compute metric when every iteration completed, and save average value to
engine.state.metrics when epoch completed. key_val_metric is the main metric to compare and save the
checkpoint into files.
additional_metrics: more Ignite metrics that also attach to Ignite Engine.
metric_cmp_fn: function to compare current key metric with previous best key metric value,
it must accept 2 args (current_metric, previous_best) and return a bool result: if `True`, will update
`best_metric` and `best_metric_epoch` with current metric and epoch, default to `greater than`.
val_handlers: every handler is a set of Ignite Event-Handlers, must have `attach` function, like:
CheckpointHandler, StatsHandler, SegmentationSaver, etc.
amp: whether to enable auto-mixed-precision evaluation, default is False.
mode: model forward mode during evaluation, should be 'eval' or 'train',
which maps to `model.eval()` or `model.train()`, default to 'eval'.
event_names: additional custom ignite events that will register to the engine.
new events can be a list of str or `ignite.engine.events.EventEnum`.
event_to_attr: a dictionary to map an event to a state attribute, then add to `engine.state`.
for more details, check: https://pytorch.org/ignite/generated/ignite.engine.engine.Engine.html
#ignite.engine.engine.Engine.register_events.
decollate: whether to decollate the batch-first data to a list of data after model computation,
recommend `decollate=True` when `postprocessing` uses components from `monai.transforms`.
default to `True`.
"""
def __init__(
self,
device: torch.device,
val_data_loader: Union[Iterable, DataLoader],
epoch_length: Optional[int] = None,
non_blocking: bool = False,
prepare_batch: Callable = default_prepare_batch,
iteration_update: Optional[Callable] = None,
postprocessing: Optional[Transform] = None,
key_val_metric: Optional[Dict[str, Metric]] = None,
additional_metrics: Optional[Dict[str, Metric]] = None,
metric_cmp_fn: Callable = default_metric_cmp_fn,
val_handlers: Optional[Sequence] = None,
amp: bool = False,
mode: Union[ForwardMode, str] = ForwardMode.EVAL,
event_names: Optional[List[Union[str, EventEnum]]] = None,
event_to_attr: Optional[dict] = None,
decollate: bool = True,
) -> None:
super().__init__(
device=device,
max_epochs=1,
data_loader=val_data_loader,
epoch_length=epoch_length,
non_blocking=non_blocking,
prepare_batch=prepare_batch,
iteration_update=iteration_update,
postprocessing=postprocessing,
key_metric=key_val_metric,
additional_metrics=additional_metrics,
metric_cmp_fn=metric_cmp_fn,
handlers=val_handlers,
amp=amp,
event_names=event_names,
event_to_attr=event_to_attr,
decollate=decollate,
)
self.mode = look_up_option(mode, ForwardMode)
if mode == ForwardMode.EVAL:
self.mode = eval_mode
elif mode == ForwardMode.TRAIN:
self.mode = train_mode
else:
raise ValueError(f"unsupported mode: {mode}, should be 'eval' or 'train'.")
def run(self, global_epoch: int = 1) -> None:
"""
Execute validation/evaluation based on Ignite Engine.
Args:
global_epoch: the overall epoch if during a training. evaluator engine can get it from trainer.
"""
# init env value for current validation process
self.state.max_epochs = global_epoch
self.state.epoch = global_epoch - 1
self.state.iteration = 0
super().run()
def get_validation_stats(self) -> Dict[str, float]:
return {"best_validation_metric": self.state.best_metric, "best_validation_epoch": self.state.best_metric_epoch}
class SupervisedEvaluator(Evaluator):
"""
Standard supervised evaluation method with image and label(optional), inherits from evaluator and Workflow.
Args:
device: an object representing the device on which to run.
val_data_loader: Ignite engine use data_loader to run, must be Iterable, typically be torch.DataLoader.
network: network to evaluate in the evaluator, should be regular PyTorch `torch.nn.Module`.
epoch_length: number of iterations for one epoch, default to `len(val_data_loader)`.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function to parse image and label for current iteration.
iteration_update: the callable function for every iteration, expect to accept `engine`
and `batchdata` as input parameters. if not provided, use `self._iteration()` instead.
inferer: inference method that execute model forward on input data, like: SlidingWindow, etc.
postprocessing: execute additional transformation for the model output data.
Typically, several Tensor based transforms composed by `Compose`.
key_val_metric: compute metric when every iteration completed, and save average value to
engine.state.metrics when epoch completed. key_val_metric is the main metric to compare and save the
checkpoint into files.
additional_metrics: more Ignite metrics that also attach to Ignite Engine.
metric_cmp_fn: function to compare current key metric with previous best key metric value,
it must accept 2 args (current_metric, previous_best) and return a bool result: if `True`, will update
`best_metric` and `best_metric_epoch` with current metric and epoch, default to `greater than`.
val_handlers: every handler is a set of Ignite Event-Handlers, must have `attach` function, like:
CheckpointHandler, StatsHandler, SegmentationSaver, etc.
amp: whether to enable auto-mixed-precision evaluation, default is False.
mode: model forward mode during evaluation, should be 'eval' or 'train',
which maps to `model.eval()` or `model.train()`, default to 'eval'.
event_names: additional custom ignite events that will register to the engine.
new events can be a list of str or `ignite.engine.events.EventEnum`.
event_to_attr: a dictionary to map an event to a state attribute, then add to `engine.state`.
for more details, check: https://pytorch.org/ignite/generated/ignite.engine.engine.Engine.html
#ignite.engine.engine.Engine.register_events.
decollate: whether to decollate the batch-first data to a list of data after model computation,
recommend `decollate=True` when `postprocessing` uses components from `monai.transforms`.
default to `True`.
"""
def __init__(
self,
device: torch.device,
val_data_loader: Union[Iterable, DataLoader],
network: torch.nn.Module,
epoch_length: Optional[int] = None,
non_blocking: bool = False,
prepare_batch: Callable = default_prepare_batch,
iteration_update: Optional[Callable] = None,
inferer: Optional[Inferer] = None,
postprocessing: Optional[Transform] = None,
key_val_metric: Optional[Dict[str, Metric]] = None,
additional_metrics: Optional[Dict[str, Metric]] = None,
metric_cmp_fn: Callable = default_metric_cmp_fn,
val_handlers: Optional[Sequence] = None,
amp: bool = False,
mode: Union[ForwardMode, str] = ForwardMode.EVAL,
event_names: Optional[List[Union[str, EventEnum]]] = None,
event_to_attr: Optional[dict] = None,
decollate: bool = True,
) -> None:
super().__init__(
device=device,
val_data_loader=val_data_loader,
epoch_length=epoch_length,
non_blocking=non_blocking,
prepare_batch=prepare_batch,
iteration_update=iteration_update,
postprocessing=postprocessing,
key_val_metric=key_val_metric,
additional_metrics=additional_metrics,
metric_cmp_fn=metric_cmp_fn,
val_handlers=val_handlers,
amp=amp,
mode=mode,
event_names=event_names,
event_to_attr=event_to_attr,
decollate=decollate,
)
self.network = network
self.inferer = SimpleInferer() if inferer is None else inferer
def _iteration(self, engine: Engine, batchdata: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
callback function for the Supervised Evaluation processing logic of 1 iteration in Ignite Engine.
Return below items in a dictionary:
- IMAGE: image Tensor data for model input, already moved to device.
- LABEL: label Tensor data corresponding to the image, already moved to device.
- PRED: prediction result of model.
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
batchdata: input data for this iteration, usually can be dictionary or tuple of Tensor data.
Raises:
ValueError: When ``batchdata`` is None.
"""
if batchdata is None:
raise ValueError("Must provide batch data for current iteration.")
batch = self.prepare_batch(batchdata, engine.state.device, engine.non_blocking)
if len(batch) == 2:
inputs, targets = batch
args: Tuple = ()
kwargs: Dict = {}
else:
inputs, targets, args, kwargs = batch
# put iteration outputs into engine.state
engine.state.output = {Keys.IMAGE: inputs, Keys.LABEL: targets}
# execute forward computation
with self.mode(self.network):
if self.amp:
with torch.cuda.amp.autocast():
engine.state.output[Keys.PRED] = self.inferer(inputs, self.network, *args, **kwargs)
else:
engine.state.output[Keys.PRED] = self.inferer(inputs, self.network, *args, **kwargs)
engine.fire_event(IterationEvents.FORWARD_COMPLETED)
engine.fire_event(IterationEvents.MODEL_COMPLETED)
return engine.state.output
class EnsembleEvaluator(Evaluator):
"""
Ensemble evaluation for multiple models, inherits from evaluator and Workflow.
It accepts a list of models for inference and outputs a list of predictions for further operations.
Args:
device: an object representing the device on which to run.
val_data_loader: Ignite engine use data_loader to run, must be Iterable, typically be torch.DataLoader.
epoch_length: number of iterations for one epoch, default to `len(val_data_loader)`.
network: networks to evaluate in order in the evaluator, should be regular PyTorch `torch.nn.Module`.
pred_keys: the keys to store every prediction data.
the length must exactly match the number of networks.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function to parse image and label for current iteration.
iteration_update: the callable function for every iteration, expect to accept `engine`
and `batchdata` as input parameters. if not provided, use `self._iteration()` instead.
inferer: inference method that execute model forward on input data, like: SlidingWindow, etc.
postprocessing: execute additional transformation for the model output data.
Typically, several Tensor based transforms composed by `Compose`.
key_val_metric: compute metric when every iteration completed, and save average value to
engine.state.metrics when epoch completed. key_val_metric is the main metric to compare and save the
checkpoint into files.
additional_metrics: more Ignite metrics that also attach to Ignite Engine.
metric_cmp_fn: function to compare current key metric with previous best key metric value,
it must accept 2 args (current_metric, previous_best) and return a bool result: if `True`, will update
`best_metric` and `best_metric_epoch` with current metric and epoch, default to `greater than`.
val_handlers: every handler is a set of Ignite Event-Handlers, must have `attach` function, like:
CheckpointHandler, StatsHandler, SegmentationSaver, etc.
amp: whether to enable auto-mixed-precision evaluation, default is False.
mode: model forward mode during evaluation, should be 'eval' or 'train',
which maps to `model.eval()` or `model.train()`, default to 'eval'.
event_names: additional custom ignite events that will register to the engine.
new events can be a list of str or `ignite.engine.events.EventEnum`.
event_to_attr: a dictionary to map an event to a state attribute, then add to `engine.state`.
for more details, check: https://pytorch.org/ignite/generated/ignite.engine.engine.Engine.html
#ignite.engine.engine.Engine.register_events.
decollate: whether to decollate the batch-first data to a list of data after model computation,
recommend `decollate=True` when `postprocessing` uses components from `monai.transforms`.
default to `True`.
"""
def __init__(
self,
device: torch.device,
val_data_loader: Union[Iterable, DataLoader],
networks: Sequence[torch.nn.Module],
pred_keys: Sequence[str],
epoch_length: Optional[int] = None,
non_blocking: bool = False,
prepare_batch: Callable = default_prepare_batch,
iteration_update: Optional[Callable] = None,
inferer: Optional[Inferer] = None,
postprocessing: Optional[Transform] = None,
key_val_metric: Optional[Dict[str, Metric]] = None,
additional_metrics: Optional[Dict[str, Metric]] = None,
metric_cmp_fn: Callable = default_metric_cmp_fn,
val_handlers: Optional[Sequence] = None,
amp: bool = False,
mode: Union[ForwardMode, str] = ForwardMode.EVAL,
event_names: Optional[List[Union[str, EventEnum]]] = None,
event_to_attr: Optional[dict] = None,
decollate: bool = True,
) -> None:
super().__init__(
device=device,
val_data_loader=val_data_loader,
epoch_length=epoch_length,
non_blocking=non_blocking,
prepare_batch=prepare_batch,
iteration_update=iteration_update,
postprocessing=postprocessing,
key_val_metric=key_val_metric,
additional_metrics=additional_metrics,
metric_cmp_fn=metric_cmp_fn,
val_handlers=val_handlers,
amp=amp,
mode=mode,
event_names=event_names,
event_to_attr=event_to_attr,
decollate=decollate,
)
self.networks = ensure_tuple(networks)
self.pred_keys = ensure_tuple(pred_keys)
self.inferer = SimpleInferer() if inferer is None else inferer
def _iteration(self, engine: Engine, batchdata: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
callback function for the Supervised Evaluation processing logic of 1 iteration in Ignite Engine.
Return below items in a dictionary:
- IMAGE: image Tensor data for model input, already moved to device.
- LABEL: label Tensor data corresponding to the image, already moved to device.
- pred_keys[0]: prediction result of network 0.
- pred_keys[1]: prediction result of network 1.
- ... ...
- pred_keys[N]: prediction result of network N.
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
batchdata: input data for this iteration, usually can be dictionary or tuple of Tensor data.
Raises:
ValueError: When ``batchdata`` is None.
"""
if batchdata is None:
raise ValueError("Must provide batch data for current iteration.")
batch = self.prepare_batch(batchdata, engine.state.device, engine.non_blocking)
if len(batch) == 2:
inputs, targets = batch
args: Tuple = ()
kwargs: Dict = {}
else:
inputs, targets, args, kwargs = batch
# put iteration outputs into engine.state
engine.state.output = {Keys.IMAGE: inputs, Keys.LABEL: targets}
for idx, network in enumerate(self.networks):
with self.mode(network):
if self.amp:
with torch.cuda.amp.autocast():
engine.state.output.update(
{self.pred_keys[idx]: self.inferer(inputs, network, *args, **kwargs)}
)
else:
engine.state.output.update({self.pred_keys[idx]: self.inferer(inputs, network, *args, **kwargs)})
engine.fire_event(IterationEvents.FORWARD_COMPLETED)
engine.fire_event(IterationEvents.MODEL_COMPLETED)
return engine.state.output
| 51.496222 | 120 | 0.674134 |
from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from torch.utils.data import DataLoader
from monai.config import IgniteInfo
from monai.engines.utils import IterationEvents, default_metric_cmp_fn, default_prepare_batch
from monai.engines.workflow import Workflow
from monai.inferers import Inferer, SimpleInferer
from monai.networks.utils import eval_mode, train_mode
from monai.transforms import Transform
from monai.utils import ForwardMode, ensure_tuple, min_version, optional_import
from monai.utils.enums import CommonKeys as Keys
from monai.utils.module import look_up_option
if TYPE_CHECKING:
from ignite.engine import Engine, EventEnum
from ignite.metrics import Metric
else:
Engine, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Engine")
Metric, _ = optional_import("ignite.metrics", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Metric")
EventEnum, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "EventEnum")
__all__ = ["Evaluator", "SupervisedEvaluator", "EnsembleEvaluator"]
class Evaluator(Workflow):
def __init__(
self,
device: torch.device,
val_data_loader: Union[Iterable, DataLoader],
epoch_length: Optional[int] = None,
non_blocking: bool = False,
prepare_batch: Callable = default_prepare_batch,
iteration_update: Optional[Callable] = None,
postprocessing: Optional[Transform] = None,
key_val_metric: Optional[Dict[str, Metric]] = None,
additional_metrics: Optional[Dict[str, Metric]] = None,
metric_cmp_fn: Callable = default_metric_cmp_fn,
val_handlers: Optional[Sequence] = None,
amp: bool = False,
mode: Union[ForwardMode, str] = ForwardMode.EVAL,
event_names: Optional[List[Union[str, EventEnum]]] = None,
event_to_attr: Optional[dict] = None,
decollate: bool = True,
) -> None:
super().__init__(
device=device,
max_epochs=1,
data_loader=val_data_loader,
epoch_length=epoch_length,
non_blocking=non_blocking,
prepare_batch=prepare_batch,
iteration_update=iteration_update,
postprocessing=postprocessing,
key_metric=key_val_metric,
additional_metrics=additional_metrics,
metric_cmp_fn=metric_cmp_fn,
handlers=val_handlers,
amp=amp,
event_names=event_names,
event_to_attr=event_to_attr,
decollate=decollate,
)
self.mode = look_up_option(mode, ForwardMode)
if mode == ForwardMode.EVAL:
self.mode = eval_mode
elif mode == ForwardMode.TRAIN:
self.mode = train_mode
else:
raise ValueError(f"unsupported mode: {mode}, should be 'eval' or 'train'.")
def run(self, global_epoch: int = 1) -> None:
self.state.max_epochs = global_epoch
self.state.epoch = global_epoch - 1
self.state.iteration = 0
super().run()
def get_validation_stats(self) -> Dict[str, float]:
return {"best_validation_metric": self.state.best_metric, "best_validation_epoch": self.state.best_metric_epoch}
class SupervisedEvaluator(Evaluator):
def __init__(
self,
device: torch.device,
val_data_loader: Union[Iterable, DataLoader],
network: torch.nn.Module,
epoch_length: Optional[int] = None,
non_blocking: bool = False,
prepare_batch: Callable = default_prepare_batch,
iteration_update: Optional[Callable] = None,
inferer: Optional[Inferer] = None,
postprocessing: Optional[Transform] = None,
key_val_metric: Optional[Dict[str, Metric]] = None,
additional_metrics: Optional[Dict[str, Metric]] = None,
metric_cmp_fn: Callable = default_metric_cmp_fn,
val_handlers: Optional[Sequence] = None,
amp: bool = False,
mode: Union[ForwardMode, str] = ForwardMode.EVAL,
event_names: Optional[List[Union[str, EventEnum]]] = None,
event_to_attr: Optional[dict] = None,
decollate: bool = True,
) -> None:
super().__init__(
device=device,
val_data_loader=val_data_loader,
epoch_length=epoch_length,
non_blocking=non_blocking,
prepare_batch=prepare_batch,
iteration_update=iteration_update,
postprocessing=postprocessing,
key_val_metric=key_val_metric,
additional_metrics=additional_metrics,
metric_cmp_fn=metric_cmp_fn,
val_handlers=val_handlers,
amp=amp,
mode=mode,
event_names=event_names,
event_to_attr=event_to_attr,
decollate=decollate,
)
self.network = network
self.inferer = SimpleInferer() if inferer is None else inferer
def _iteration(self, engine: Engine, batchdata: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
if batchdata is None:
raise ValueError("Must provide batch data for current iteration.")
batch = self.prepare_batch(batchdata, engine.state.device, engine.non_blocking)
if len(batch) == 2:
inputs, targets = batch
args: Tuple = ()
kwargs: Dict = {}
else:
inputs, targets, args, kwargs = batch
engine.state.output = {Keys.IMAGE: inputs, Keys.LABEL: targets}
with self.mode(self.network):
if self.amp:
with torch.cuda.amp.autocast():
engine.state.output[Keys.PRED] = self.inferer(inputs, self.network, *args, **kwargs)
else:
engine.state.output[Keys.PRED] = self.inferer(inputs, self.network, *args, **kwargs)
engine.fire_event(IterationEvents.FORWARD_COMPLETED)
engine.fire_event(IterationEvents.MODEL_COMPLETED)
return engine.state.output
class EnsembleEvaluator(Evaluator):
def __init__(
self,
device: torch.device,
val_data_loader: Union[Iterable, DataLoader],
networks: Sequence[torch.nn.Module],
pred_keys: Sequence[str],
epoch_length: Optional[int] = None,
non_blocking: bool = False,
prepare_batch: Callable = default_prepare_batch,
iteration_update: Optional[Callable] = None,
inferer: Optional[Inferer] = None,
postprocessing: Optional[Transform] = None,
key_val_metric: Optional[Dict[str, Metric]] = None,
additional_metrics: Optional[Dict[str, Metric]] = None,
metric_cmp_fn: Callable = default_metric_cmp_fn,
val_handlers: Optional[Sequence] = None,
amp: bool = False,
mode: Union[ForwardMode, str] = ForwardMode.EVAL,
event_names: Optional[List[Union[str, EventEnum]]] = None,
event_to_attr: Optional[dict] = None,
decollate: bool = True,
) -> None:
super().__init__(
device=device,
val_data_loader=val_data_loader,
epoch_length=epoch_length,
non_blocking=non_blocking,
prepare_batch=prepare_batch,
iteration_update=iteration_update,
postprocessing=postprocessing,
key_val_metric=key_val_metric,
additional_metrics=additional_metrics,
metric_cmp_fn=metric_cmp_fn,
val_handlers=val_handlers,
amp=amp,
mode=mode,
event_names=event_names,
event_to_attr=event_to_attr,
decollate=decollate,
)
self.networks = ensure_tuple(networks)
self.pred_keys = ensure_tuple(pred_keys)
self.inferer = SimpleInferer() if inferer is None else inferer
def _iteration(self, engine: Engine, batchdata: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
if batchdata is None:
raise ValueError("Must provide batch data for current iteration.")
batch = self.prepare_batch(batchdata, engine.state.device, engine.non_blocking)
if len(batch) == 2:
inputs, targets = batch
args: Tuple = ()
kwargs: Dict = {}
else:
inputs, targets, args, kwargs = batch
engine.state.output = {Keys.IMAGE: inputs, Keys.LABEL: targets}
for idx, network in enumerate(self.networks):
with self.mode(network):
if self.amp:
with torch.cuda.amp.autocast():
engine.state.output.update(
{self.pred_keys[idx]: self.inferer(inputs, network, *args, **kwargs)}
)
else:
engine.state.output.update({self.pred_keys[idx]: self.inferer(inputs, network, *args, **kwargs)})
engine.fire_event(IterationEvents.FORWARD_COMPLETED)
engine.fire_event(IterationEvents.MODEL_COMPLETED)
return engine.state.output
| true | true |
1c37daaffae2c398d95511df9e2def643d477990 | 3,678 | py | Python | scripts/figure4/preprocessing_dream5_invitro.py | jiawu/Roller | a70e350905a59c2254dcefda7ab23c6417cf8f7d | [
"MIT"
] | null | null | null | scripts/figure4/preprocessing_dream5_invitro.py | jiawu/Roller | a70e350905a59c2254dcefda7ab23c6417cf8f7d | [
"MIT"
] | 2 | 2015-07-13T18:51:22.000Z | 2015-07-16T15:35:24.000Z | scripts/figure4/preprocessing_dream5_invitro.py | jiawu/Roller | a70e350905a59c2254dcefda7ab23c6417cf8f7d | [
"MIT"
] | null | null | null | import pandas as pd
import pdb
import scipy.stats as stats
def zscore_data(df):
p = df.values
z = pd.DataFrame(stats.zscore(p,axis=0,ddof=1),index=df.index, columns=df.columns)
z['Time'] = df['Time']
return(z)
db_path = '../data/invitro/net3_expression_data.tsv'
my_df = pd.read_csv(db_path, sep='\t')
my_df = my_df[~my_df['Time'].isnull()]
gp = my_df.groupby(['#Experiment','Time'])
#exp_list = [25,26,47,50,55,98, 105]
#my_df = my_df[my_df['#Experiment'].isin(exp_list)]
final_df = pd.DataFrame()
## Append certain rows with the same pertubation etc, alternating between repeats
#final_df = final_df.append(my_df[ (my_df['#Experiment'] == 25) & (my_df['Repeat'] == 1) ].iloc[:5])
#final_df = final_df.append(my_df[ (my_df['#Experiment'] == 25) & (my_df['Repeat'] == 2) ].iloc[:5])
#final_df = final_df.append(my_df[ (my_df['#Experiment'] == 26) & (my_df['Repeat'] == 1) ].iloc[:5])
#final_df = final_df.append(my_df[ (my_df['#Experiment'] == 26) & (my_df['Repeat'] == 2) ].iloc[:5])
final_df = final_df.append(my_df[ (my_df['#Experiment'] == 47) & (my_df['Perturbations'].isnull())].iloc[:5])
final_df = final_df.append(my_df[ (my_df['#Experiment'] == 47) & (my_df['Perturbations']=='P13')].iloc[:5])
final_df = final_df.append(my_df[ (my_df['#Experiment'] == 49) & (my_df['Perturbations'].isnull())].iloc[:5])
temp_t0 = my_df[ (my_df['#Experiment'] == 49) & (my_df['Perturbations'].isnull())].iloc[0,:]
final_df = final_df.append(temp_t0)
final_df = final_df.append(my_df[ (my_df['#Experiment'] == 49) & (my_df['Perturbations'] == 'P16')].iloc[:4])
final_df = final_df.append(temp_t0)
final_df = final_df.append(my_df[ (my_df['#Experiment'] == 49) & (my_df['Perturbations'] == 'P17')].iloc[:4])
final_df = final_df.append(my_df[ (my_df['#Experiment'] == 50) & (my_df['Perturbations'] =='P8') & (my_df['Repeat'] == 1) ].iloc[:5] )
final_df = final_df.append(my_df[ (my_df['#Experiment'] == 50) & (my_df['Perturbations'] =='P8') & (my_df['Repeat'] == 2) ].iloc[:5] )
final_df = final_df.append(my_df[ (my_df['#Experiment'] == 50) & (my_df['Perturbations'] =='P8') & (my_df['Repeat'] == 3) ].iloc[:5] )
#final_df = final_df.append(my_df[ (my_df['#Experiment'] == 55) & (my_df['Perturbations'] =='P24') & (my_df['Repeat'] == 1) ].iloc[:5] )
#final_df = final_df.append(my_df[ (my_df['#Experiment'] == 98) & (my_df['DeletedGenes'].isnull()) & (my_df['Repeat'] == 1 )].iloc[:5] )
#final_df = final_df.append(my_df[ (my_df['#Experiment'] == 98) & (my_df['DeletedGenes'].isnull()) & (my_df['Repeat'] == 2 )].iloc[:5] )
final_df = final_df.append(my_df[ (my_df['#Experiment'] == 105)].iloc[:30] )
gene_names = pd.read_csv('../data/invitro/net3_gene_ids.tsv', sep='\t')
node_list = ['G%d'% (x) for x in range(1, 4512)]
node_list2 = gene_names['Name'].str.lower().tolist()
unmapped_df = final_df[['Time']+node_list]
unmapped_df.columns = ['Time'] + node_list2
om_df = pd.read_csv('../data/invitro/iomranian_parsed_timeseries.tsv', sep='\t')
om_df = om_df[om_df['Time'] != 90]
intersecting_genes = set(om_df.columns.tolist()).intersection(set(unmapped_df.columns.tolist()))
intersecting_genes = sorted(list(intersecting_genes))
intersecting_genes.insert(0, intersecting_genes.pop(intersecting_genes.index('Time')))
mapped_df = unmapped_df[intersecting_genes]
norm_df = zscore_data(mapped_df)
# Change the time index so that it matches up with omranian...
x = [10,20,30,40,50]
t = [b for a in range(14) for b in x]
pdb.set_trace()
norm_df['Time'] = t
om_df_parsed = zscore_data(om_df[intersecting_genes])
om_df_parsed = om_df_parsed.append(norm_df)
om_df_parsed.to_csv('../data/invitro/iomranian_parsed_timeseries.tsv', index=False, sep='\t')
| 43.270588 | 136 | 0.669657 | import pandas as pd
import pdb
import scipy.stats as stats
def zscore_data(df):
p = df.values
z = pd.DataFrame(stats.zscore(p,axis=0,ddof=1),index=df.index, columns=df.columns)
z['Time'] = df['Time']
return(z)
db_path = '../data/invitro/net3_expression_data.tsv'
my_df = pd.read_csv(db_path, sep='\t')
my_df = my_df[~my_df['Time'].isnull()]
gp = my_df.groupby(['#Experiment','Time'])
final_df = pd.DataFrame()
erturbations'].isnull())].iloc[:5])
final_df = final_df.append(my_df[ (my_df['#Experiment'] == 47) & (my_df['Perturbations']=='P13')].iloc[:5])
final_df = final_df.append(my_df[ (my_df['#Experiment'] == 49) & (my_df['Perturbations'].isnull())].iloc[:5])
temp_t0 = my_df[ (my_df['#Experiment'] == 49) & (my_df['Perturbations'].isnull())].iloc[0,:]
final_df = final_df.append(temp_t0)
final_df = final_df.append(my_df[ (my_df['#Experiment'] == 49) & (my_df['Perturbations'] == 'P16')].iloc[:4])
final_df = final_df.append(temp_t0)
final_df = final_df.append(my_df[ (my_df['#Experiment'] == 49) & (my_df['Perturbations'] == 'P17')].iloc[:4])
final_df = final_df.append(my_df[ (my_df['#Experiment'] == 50) & (my_df['Perturbations'] =='P8') & (my_df['Repeat'] == 1) ].iloc[:5] )
final_df = final_df.append(my_df[ (my_df['#Experiment'] == 50) & (my_df['Perturbations'] =='P8') & (my_df['Repeat'] == 2) ].iloc[:5] )
final_df = final_df.append(my_df[ (my_df['#Experiment'] == 50) & (my_df['Perturbations'] =='P8') & (my_df['Repeat'] == 3) ].iloc[:5] )
final_df = final_df.append(my_df[ (my_df['#Experiment'] == 105)].iloc[:30] )
gene_names = pd.read_csv('../data/invitro/net3_gene_ids.tsv', sep='\t')
node_list = ['G%d'% (x) for x in range(1, 4512)]
node_list2 = gene_names['Name'].str.lower().tolist()
unmapped_df = final_df[['Time']+node_list]
unmapped_df.columns = ['Time'] + node_list2
om_df = pd.read_csv('../data/invitro/iomranian_parsed_timeseries.tsv', sep='\t')
om_df = om_df[om_df['Time'] != 90]
intersecting_genes = set(om_df.columns.tolist()).intersection(set(unmapped_df.columns.tolist()))
intersecting_genes = sorted(list(intersecting_genes))
intersecting_genes.insert(0, intersecting_genes.pop(intersecting_genes.index('Time')))
mapped_df = unmapped_df[intersecting_genes]
norm_df = zscore_data(mapped_df)
x = [10,20,30,40,50]
t = [b for a in range(14) for b in x]
pdb.set_trace()
norm_df['Time'] = t
om_df_parsed = zscore_data(om_df[intersecting_genes])
om_df_parsed = om_df_parsed.append(norm_df)
om_df_parsed.to_csv('../data/invitro/iomranian_parsed_timeseries.tsv', index=False, sep='\t')
| true | true |
1c37dab1d05e53faea5ed821d46f44a67edb994a | 1,831 | py | Python | apps/users/admin.py | christianalcantara/book_backend | 5c98aad01a1ea7d7985cafa14c6de7eb3d0b48af | [
"MIT"
] | 1 | 2021-02-23T00:55:14.000Z | 2021-02-23T00:55:14.000Z | apps/users/admin.py | christianalcantara/book_backend | 5c98aad01a1ea7d7985cafa14c6de7eb3d0b48af | [
"MIT"
] | 1 | 2021-02-23T00:33:05.000Z | 2021-02-23T00:33:05.000Z | apps/users/admin.py | christianalcantara/book_backend | 5c98aad01a1ea7d7985cafa14c6de7eb3d0b48af | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import Group
from apps.users.forms import UserChangeForm, UserCreationForm
from apps.users.models import User
class UserAdmin(BaseUserAdmin):
form = UserChangeForm
add_form = UserCreationForm
# The fields to be used in displaying the User model.
# These override the definitions on the base UserAdmin
# that reference specific fields on auth.User.
list_display = ["full_name", "email"]
fieldsets = [
["Auth", {"fields": ["email", "password"]}],
["Personal info", {"fields": ["last_name", "first_name", "avatar"]}],
[
"Settings",
{
"fields": [
"groups",
"is_admin",
"is_active",
"is_staff",
"is_superuser",
"is_customer",
]
},
],
["Important dates", {"fields": ["last_login", "registered_at"]}],
]
# add_fieldsets is not a standard ModelAdmin attribute. UserAdmin
# overrides get_fieldsets to use this attribute when creating a user.
add_fieldsets = [
[
None,
{
"classes": ["wide"],
"fields": [
"email",
"first_name",
"last_name",
"password1",
"password2",
],
},
],
]
search_fields = ["email"]
ordering = ["email"]
readonly_fields = ["last_login", "registered_at"]
# Now register the new UserAdmin...
admin.site.register(User, UserAdmin)
# Unregister the Group model from admin.
admin.site.unregister(Group)
| 30.016393 | 77 | 0.534134 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import Group
from apps.users.forms import UserChangeForm, UserCreationForm
from apps.users.models import User
class UserAdmin(BaseUserAdmin):
form = UserChangeForm
add_form = UserCreationForm
list_display = ["full_name", "email"]
fieldsets = [
["Auth", {"fields": ["email", "password"]}],
["Personal info", {"fields": ["last_name", "first_name", "avatar"]}],
[
"Settings",
{
"fields": [
"groups",
"is_admin",
"is_active",
"is_staff",
"is_superuser",
"is_customer",
]
},
],
["Important dates", {"fields": ["last_login", "registered_at"]}],
]
add_fieldsets = [
[
None,
{
"classes": ["wide"],
"fields": [
"email",
"first_name",
"last_name",
"password1",
"password2",
],
},
],
]
search_fields = ["email"]
ordering = ["email"]
readonly_fields = ["last_login", "registered_at"]
admin.site.register(User, UserAdmin)
admin.site.unregister(Group)
| true | true |
1c37dbc948feee822d32407b551e6cb846df0a97 | 8,535 | py | Python | gslib/addlhelp/encoding.py | stanhu/gsutil | e8403ed5e07caed3027455c7b883fef733612360 | [
"Apache-2.0"
] | 649 | 2015-01-08T01:50:15.000Z | 2022-03-31T08:33:38.000Z | gslib/addlhelp/encoding.py | stanhu/gsutil | e8403ed5e07caed3027455c7b883fef733612360 | [
"Apache-2.0"
] | 798 | 2015-01-02T07:46:09.000Z | 2022-03-31T20:37:19.000Z | gslib/addlhelp/encoding.py | stanhu/gsutil | e8403ed5e07caed3027455c7b883fef733612360 | [
"Apache-2.0"
] | 315 | 2015-01-02T10:26:53.000Z | 2022-03-27T02:18:58.000Z | # -*- coding: utf-8 -*-
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Additional help about CRC32C and installing crcmod."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gslib.help_provider import HelpProvider
_DETAILED_HELP_TEXT = ("""
<B>OVERVIEW</B>
To reduce the chance for `filename encoding interoperability problems
<https://en.wikipedia.org/wiki/Filename#Encoding_indication_interoperability>`_
gsutil uses `UTF-8 <https://en.wikipedia.org/wiki/UTF-8>`_ character encoding
when uploading and downloading files. Because UTF-8 is in widespread (and
growing) use, for most users nothing needs to be done to use UTF-8. Users with
files stored in other encodings (such as
`Latin 1 <https://en.wikipedia.org/wiki/ISO/IEC_8859-1>`_) must convert those
filenames to UTF-8 before attempting to upload the files.
The most common place where users who have filenames that use some other
encoding encounter a gsutil error is while uploading files using the recursive
(-R) option on the gsutil cp , mv, or rsync commands. When this happens you'll
get an error like this:
CommandException: Invalid Unicode path encountered
('dir1/dir2/file_name_with_\\xf6n_bad_chars').
gsutil cannot proceed with such files present.
Please remove or rename this file and try again.
Note that the invalid Unicode characters have been hex-encoded in this error
message because otherwise trying to print them would result in another
error.
If you encounter such an error you can either remove the problematic file(s)
or try to rename them and re-run the command. If you have a modest number of
such files the simplest thing to do is to think of a different name for the
file and manually rename the file (using local filesystem tools). If you have
too many files for that to be practical, you can use a bulk rename tool or
script.
Unicode errors for valid Unicode filepaths can be caused by lack of Python
locale configuration on Linux and Mac OSes. If your file paths are Unicode
and you get encoding errors, ensure the LANG environment variable is set
correctly. Typically, the LANG variable should be set to something like
"en_US.UTF-8" or "de_DE.UTF-8".
Note also that there's no restriction on the character encoding used in file
content - it can be UTF-8, a different encoding, or non-character
data (like audio or video content). The gsutil UTF-8 character encoding
requirement applies only to filenames.
<B>USING UNICODE FILENAMES ON WINDOWS</B>
Windows support for Unicode in the command shell (cmd.exe or powershell) is
somewhat painful, because Windows uses a Windows-specific character encoding
called `cp1252 <https://en.wikipedia.org/wiki/Windows-1252>`_. To use Unicode
characters you need to run this command in the command shell before the first
time you use gsutil in that shell:
chcp 65001
If you neglect to do this before using gsutil, the progress messages while
uploading files with Unicode names or listing buckets with Unicode object
names will look garbled (i.e., with different glyphs than you expect in the
output). If you simply run the chcp command and re-run the gsutil command, the
output should no longer look garbled.
gsutil attempts to translate between cp1252 encoding and UTF-8 in the main
places that Unicode encoding/decoding problems have been encountered to date
(traversing the local file system while uploading files, and printing Unicode
names while listing buckets). However, because gsutil must perform
translation, it is likely there are other erroneous edge cases when using
Windows with Unicode. If you encounter problems, you might consider instead
using cygwin (on Windows) or Linux or macOS - all of which support Unicode.
<B>USING UNICODE FILENAMES ON MACOS</B>
macOS stores filenames in decomposed form (also known as
`NFD normalization <https://en.wikipedia.org/wiki/Unicode_equivalence>`_).
For example, if a filename contains an accented "e" character, that character
will be converted to an "e" followed by an accent before being saved to the
filesystem. As a consequence, it's possible to have different name strings
for files uploaded from an operating system that doesn't enforce decomposed
form (like Ubuntu) from one that does (like macOS).
The following example shows how this behavior could lead to unexpected
results. Say you create a file with non-ASCII characters on Ubuntu. Ubuntu
stores that filename in its composed form. When you upload the file to the
cloud, it is stored as named. But if you use gsutil rysnc to bring the file to
a macOS machine and edit the file, then when you use gsutil rsync to bring
this version back to the cloud, you end up with two different objects, instead
of replacing the original. This is because macOS converted the filename to
a decomposed form, and Cloud Storage sees this as a different object name.
<B>CROSS-PLATFORM ENCODING PROBLEMS OF WHICH TO BE AWARE</B>
Using UTF-8 for all object names and filenames will ensure that gsutil doesn't
encounter character encoding errors while operating on the files.
Unfortunately, it's still possible that files uploaded / downloaded this way
can have interoperability problems, for a number of reasons unrelated to
gsutil. For example:
- Windows filenames are case-insensitive, while Google Cloud Storage, Linux,
and macOS are not. Thus, for example, if you have two filenames on Linux
differing only in case and upload both to Google Cloud Storage and then
subsequently download them to Windows, you will end up with just one file
whose contents came from the last of these files to be written to the
filesystem.
- macOS performs character encoding decomposition based on tables stored in
the OS, and the tables change between Unicode versions. Thus the encoding
used by an external library may not match that performed by the OS. It is
possible that two object names may translate to a single local filename.
- Windows console support for Unicode is difficult to use correctly.
For a more thorough list of such issues see `this presentation
<http://www.i18nguy.com/unicode/filename-issues-iuc33.pdf>`_
These problems mostly arise when sharing data across platforms (e.g.,
uploading data from a Windows machine to Google Cloud Storage, and then
downloading from Google Cloud Storage to a machine running macOS).
Unfortunately these problems are a consequence of the lack of a filename
encoding standard, and users need to be aware of the kinds of problems that
can arise when copying filenames across platforms.
There is one precaution users can exercise to prevent some of these problems:
When using the Windows console specify wildcards or folders (using the -R
option) rather than explicitly named individual files.
<B>CONVERTING FILENAMES TO UNICODE</B>
Open-source tools are available to convert filenames for non-Unicode files.
For example, to convert from latin1 (a common Windows encoding) to Unicode,
you can use
`Windows iconv <http://gnuwin32.sourceforge.net/packages/libiconv.htm>`_.
For Unix-based systems, you can use
`libiconv <https://www.gnu.org/software/libiconv/>`_.
""")
class CommandOptions(HelpProvider):
"""Additional help about filename encoding and interoperability problems."""
# Help specification. See help_provider.py for documentation.
help_spec = HelpProvider.HelpSpec(
help_name='encoding',
help_name_aliases=[
'encodings',
'utf8',
'utf-8',
'latin1',
'unicode',
'interoperability',
],
help_type='additional_help',
help_one_line_summary='Filename encoding and interoperability problems',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
| 49.051724 | 81 | 0.765436 |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gslib.help_provider import HelpProvider
_DETAILED_HELP_TEXT = ("""
<B>OVERVIEW</B>
To reduce the chance for `filename encoding interoperability problems
<https://en.wikipedia.org/wiki/Filename#Encoding_indication_interoperability>`_
gsutil uses `UTF-8 <https://en.wikipedia.org/wiki/UTF-8>`_ character encoding
when uploading and downloading files. Because UTF-8 is in widespread (and
growing) use, for most users nothing needs to be done to use UTF-8. Users with
files stored in other encodings (such as
`Latin 1 <https://en.wikipedia.org/wiki/ISO/IEC_8859-1>`_) must convert those
filenames to UTF-8 before attempting to upload the files.
The most common place where users who have filenames that use some other
encoding encounter a gsutil error is while uploading files using the recursive
(-R) option on the gsutil cp , mv, or rsync commands. When this happens you'll
get an error like this:
CommandException: Invalid Unicode path encountered
('dir1/dir2/file_name_with_\\xf6n_bad_chars').
gsutil cannot proceed with such files present.
Please remove or rename this file and try again.
Note that the invalid Unicode characters have been hex-encoded in this error
message because otherwise trying to print them would result in another
error.
If you encounter such an error you can either remove the problematic file(s)
or try to rename them and re-run the command. If you have a modest number of
such files the simplest thing to do is to think of a different name for the
file and manually rename the file (using local filesystem tools). If you have
too many files for that to be practical, you can use a bulk rename tool or
script.
Unicode errors for valid Unicode filepaths can be caused by lack of Python
locale configuration on Linux and Mac OSes. If your file paths are Unicode
and you get encoding errors, ensure the LANG environment variable is set
correctly. Typically, the LANG variable should be set to something like
"en_US.UTF-8" or "de_DE.UTF-8".
Note also that there's no restriction on the character encoding used in file
content - it can be UTF-8, a different encoding, or non-character
data (like audio or video content). The gsutil UTF-8 character encoding
requirement applies only to filenames.
<B>USING UNICODE FILENAMES ON WINDOWS</B>
Windows support for Unicode in the command shell (cmd.exe or powershell) is
somewhat painful, because Windows uses a Windows-specific character encoding
called `cp1252 <https://en.wikipedia.org/wiki/Windows-1252>`_. To use Unicode
characters you need to run this command in the command shell before the first
time you use gsutil in that shell:
chcp 65001
If you neglect to do this before using gsutil, the progress messages while
uploading files with Unicode names or listing buckets with Unicode object
names will look garbled (i.e., with different glyphs than you expect in the
output). If you simply run the chcp command and re-run the gsutil command, the
output should no longer look garbled.
gsutil attempts to translate between cp1252 encoding and UTF-8 in the main
places that Unicode encoding/decoding problems have been encountered to date
(traversing the local file system while uploading files, and printing Unicode
names while listing buckets). However, because gsutil must perform
translation, it is likely there are other erroneous edge cases when using
Windows with Unicode. If you encounter problems, you might consider instead
using cygwin (on Windows) or Linux or macOS - all of which support Unicode.
<B>USING UNICODE FILENAMES ON MACOS</B>
macOS stores filenames in decomposed form (also known as
`NFD normalization <https://en.wikipedia.org/wiki/Unicode_equivalence>`_).
For example, if a filename contains an accented "e" character, that character
will be converted to an "e" followed by an accent before being saved to the
filesystem. As a consequence, it's possible to have different name strings
for files uploaded from an operating system that doesn't enforce decomposed
form (like Ubuntu) from one that does (like macOS).
The following example shows how this behavior could lead to unexpected
results. Say you create a file with non-ASCII characters on Ubuntu. Ubuntu
stores that filename in its composed form. When you upload the file to the
cloud, it is stored as named. But if you use gsutil rysnc to bring the file to
a macOS machine and edit the file, then when you use gsutil rsync to bring
this version back to the cloud, you end up with two different objects, instead
of replacing the original. This is because macOS converted the filename to
a decomposed form, and Cloud Storage sees this as a different object name.
<B>CROSS-PLATFORM ENCODING PROBLEMS OF WHICH TO BE AWARE</B>
Using UTF-8 for all object names and filenames will ensure that gsutil doesn't
encounter character encoding errors while operating on the files.
Unfortunately, it's still possible that files uploaded / downloaded this way
can have interoperability problems, for a number of reasons unrelated to
gsutil. For example:
- Windows filenames are case-insensitive, while Google Cloud Storage, Linux,
and macOS are not. Thus, for example, if you have two filenames on Linux
differing only in case and upload both to Google Cloud Storage and then
subsequently download them to Windows, you will end up with just one file
whose contents came from the last of these files to be written to the
filesystem.
- macOS performs character encoding decomposition based on tables stored in
the OS, and the tables change between Unicode versions. Thus the encoding
used by an external library may not match that performed by the OS. It is
possible that two object names may translate to a single local filename.
- Windows console support for Unicode is difficult to use correctly.
For a more thorough list of such issues see `this presentation
<http://www.i18nguy.com/unicode/filename-issues-iuc33.pdf>`_
These problems mostly arise when sharing data across platforms (e.g.,
uploading data from a Windows machine to Google Cloud Storage, and then
downloading from Google Cloud Storage to a machine running macOS).
Unfortunately these problems are a consequence of the lack of a filename
encoding standard, and users need to be aware of the kinds of problems that
can arise when copying filenames across platforms.
There is one precaution users can exercise to prevent some of these problems:
When using the Windows console specify wildcards or folders (using the -R
option) rather than explicitly named individual files.
<B>CONVERTING FILENAMES TO UNICODE</B>
Open-source tools are available to convert filenames for non-Unicode files.
For example, to convert from latin1 (a common Windows encoding) to Unicode,
you can use
`Windows iconv <http://gnuwin32.sourceforge.net/packages/libiconv.htm>`_.
For Unix-based systems, you can use
`libiconv <https://www.gnu.org/software/libiconv/>`_.
""")
class CommandOptions(HelpProvider):
help_spec = HelpProvider.HelpSpec(
help_name='encoding',
help_name_aliases=[
'encodings',
'utf8',
'utf-8',
'latin1',
'unicode',
'interoperability',
],
help_type='additional_help',
help_one_line_summary='Filename encoding and interoperability problems',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
| true | true |
1c37dc2b9a245831e5c847624b92e7308a27130d | 151 | py | Python | rootfs/usr/lib/python3/dist-packages/numpy/distutils/compat.py | kappaIO-Dev/kappaIO-sdk-armhf-crosscompile | 66fc5fc21e6235f7a3be72a7ccac68e2224b7fb2 | [
"MIT"
] | 343 | 2015-01-07T05:58:44.000Z | 2022-03-15T14:55:21.000Z | rootfs/usr/lib/python3/dist-packages/numpy/distutils/compat.py | kappaIO-Dev/kappaIO-sdk-armhf-crosscompile | 66fc5fc21e6235f7a3be72a7ccac68e2224b7fb2 | [
"MIT"
] | 61 | 2015-03-19T18:20:21.000Z | 2019-10-23T12:58:23.000Z | rootfs/usr/lib/python3/dist-packages/numpy/distutils/compat.py | kappaIO-Dev/kappaIO-sdk-armhf-crosscompile | 66fc5fc21e6235f7a3be72a7ccac68e2224b7fb2 | [
"MIT"
] | 66 | 2015-01-20T15:35:05.000Z | 2021-11-25T16:49:41.000Z | """Small modules to cope with python 2 vs 3 incompatibilities inside
numpy.distutils
"""
import sys
def get_exception():
return sys.exc_info()[1]
| 18.875 | 68 | 0.741722 | import sys
def get_exception():
return sys.exc_info()[1]
| true | true |
1c37dcad788bd730e9bf79ccc6bb167532afc494 | 2,963 | py | Python | Stage/backend/Datatables.py | zuoziji/transaction | 7a59817a699d9df32e13d43edda630520af7860d | [
"Apache-2.0"
] | null | null | null | Stage/backend/Datatables.py | zuoziji/transaction | 7a59817a699d9df32e13d43edda630520af7860d | [
"Apache-2.0"
] | 9 | 2021-02-08T20:19:53.000Z | 2022-03-11T23:16:46.000Z | Stage/backend/Datatables.py | zuoziji/transaction | 7a59817a699d9df32e13d43edda630520af7860d | [
"Apache-2.0"
] | 2 | 2019-03-03T14:27:54.000Z | 2019-07-22T09:00:35.000Z | # -*- coding: utf-8 -*-
from config_fh import get_db_engine
connection = get_db_engine().raw_connection()
cursor = connection.cursor()
class DataTablesServer(object):
def __init__(self, request, columns, index, table):
self.columns = columns
self.index = index
self.table = table
self.request_values = request.values
self.dbh = cursor
self.resultData = None
self.cadinalityFiltered = 0
self.cadinality = 0
self.run_queries()
def output_result(self):
output = {}
output['sEcho'] = str(int(self.request_values['sEcho']))
output['iTotalRecords'] = str(self.cardinality)
output['iTotalDisplayRecords'] = str(self.cadinalityFiltered)
aaData_rows = []
for row in self.resultData:
aaData_row = {}
for i in range(len(self.columns)):
aaData_row[self.columns[i]] = row[i]
aaData_rows.append(aaData_row)
output['aaData'] = aaData_rows
return output
def run_queries(self):
dataCursor = self.dbh
dataCursor.execute("""
SELECT SQL_CALC_FOUND_ROWS %(columns)s
FROM %(table)s %(where)s %(order)s %(limit)s""" % dict(
columns=', '.join(self.columns), table=self.table, where=self.filtering(), order=self.ordering(),
limit=self.paging()
))
self.resultData = dataCursor.fetchall()
cadinalityFilteredCursor = self.dbh
cadinalityFilteredCursor.execute("""
SELECT FOUND_ROWS()
""")
self.cadinalityFiltered = cadinalityFilteredCursor.fetchone()[0]
cadinalityCursor = self.dbh
cadinalityCursor.execute("""SELECT COUNT(%s) FROM %s""" % (self.index, self.table))
self.cardinality = cadinalityCursor.fetchone()[0]
def filtering(self):
filter = ""
if (self.request_values.has_key('sSearch')) and (self.request_values['sSearch'] != ""):
filter = "WHERE "
for i in range(len(self.columns)):
filter += "%s LIKE '%%%s%%' OR " % (self.columns[i], self.request_values['sSearch'])
filter = filter[:-3]
return filter
def ordering(self):
order = ""
if (self.request_values['iSortCol_0'] != "") and (int(self.request_values['iSortingCols']) > 0):
order = "ORDER BY "
for i in range(int(self.request_values['iSortingCols'])):
order += "%s %s, " % (self.columns[int(self.request_values['iSortCol_' + str(i)])],
self.request_values['sSortDir_' + str(i)])
return order[:-2]
def paging(self):
limit = ""
if (self.request_values['iDisplayStart'] != "") and (self.request_values['iDisplayLength'] != -1):
limit = "LIMIT %s, %s" % (self.request_values['iDisplayStart'], self.request_values['iDisplayLength'])
return limit
| 38.480519 | 114 | 0.585218 |
from config_fh import get_db_engine
connection = get_db_engine().raw_connection()
cursor = connection.cursor()
class DataTablesServer(object):
def __init__(self, request, columns, index, table):
self.columns = columns
self.index = index
self.table = table
self.request_values = request.values
self.dbh = cursor
self.resultData = None
self.cadinalityFiltered = 0
self.cadinality = 0
self.run_queries()
def output_result(self):
output = {}
output['sEcho'] = str(int(self.request_values['sEcho']))
output['iTotalRecords'] = str(self.cardinality)
output['iTotalDisplayRecords'] = str(self.cadinalityFiltered)
aaData_rows = []
for row in self.resultData:
aaData_row = {}
for i in range(len(self.columns)):
aaData_row[self.columns[i]] = row[i]
aaData_rows.append(aaData_row)
output['aaData'] = aaData_rows
return output
def run_queries(self):
dataCursor = self.dbh
dataCursor.execute("""
SELECT SQL_CALC_FOUND_ROWS %(columns)s
FROM %(table)s %(where)s %(order)s %(limit)s""" % dict(
columns=', '.join(self.columns), table=self.table, where=self.filtering(), order=self.ordering(),
limit=self.paging()
))
self.resultData = dataCursor.fetchall()
cadinalityFilteredCursor = self.dbh
cadinalityFilteredCursor.execute("""
SELECT FOUND_ROWS()
""")
self.cadinalityFiltered = cadinalityFilteredCursor.fetchone()[0]
cadinalityCursor = self.dbh
cadinalityCursor.execute("""SELECT COUNT(%s) FROM %s""" % (self.index, self.table))
self.cardinality = cadinalityCursor.fetchone()[0]
def filtering(self):
filter = ""
if (self.request_values.has_key('sSearch')) and (self.request_values['sSearch'] != ""):
filter = "WHERE "
for i in range(len(self.columns)):
filter += "%s LIKE '%%%s%%' OR " % (self.columns[i], self.request_values['sSearch'])
filter = filter[:-3]
return filter
def ordering(self):
order = ""
if (self.request_values['iSortCol_0'] != "") and (int(self.request_values['iSortingCols']) > 0):
order = "ORDER BY "
for i in range(int(self.request_values['iSortingCols'])):
order += "%s %s, " % (self.columns[int(self.request_values['iSortCol_' + str(i)])],
self.request_values['sSortDir_' + str(i)])
return order[:-2]
def paging(self):
limit = ""
if (self.request_values['iDisplayStart'] != "") and (self.request_values['iDisplayLength'] != -1):
limit = "LIMIT %s, %s" % (self.request_values['iDisplayStart'], self.request_values['iDisplayLength'])
return limit
| true | true |
1c37deb30c87f7186536d3181d22fa842c9470ef | 119 | py | Python | executiveorder/__init__.py | chrisengelsma/executive_orders | 5f0c7102b9abce3d44b54e5dd4c57bd0bb404037 | [
"MIT"
] | 2 | 2017-03-23T02:26:05.000Z | 2017-08-24T02:07:17.000Z | executiveorder/__init__.py | chrisengelsma/executive_orders | 5f0c7102b9abce3d44b54e5dd4c57bd0bb404037 | [
"MIT"
] | null | null | null | executiveorder/__init__.py | chrisengelsma/executive_orders | 5f0c7102b9abce3d44b54e5dd4c57bd0bb404037 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from executiveorder import *
from util import *
from processors import *
| 19.833333 | 28 | 0.697479 |
from executiveorder import *
from util import *
from processors import *
| true | true |
1c37df03de31a22741263c2f30a64b198279daf0 | 3,874 | py | Python | rs/localization_files/ES.py | alexander-marquardt/lexalink | d554f3a00699c8a4cdf1b28dd033655f929470fa | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2017-02-09T07:12:25.000Z | 2017-02-09T07:12:25.000Z | rs/localization_files/ES.py | alexander-marquardt/lexalink | d554f3a00699c8a4cdf1b28dd033655f929470fa | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | rs/localization_files/ES.py | alexander-marquardt/lexalink | d554f3a00699c8a4cdf1b28dd033655f929470fa | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
################################################################################
# LexaLink Copyright information - do not remove this copyright notice
# Copyright (C) 2012
#
# Lexalink - a free social network and dating platform for the Google App Engine.
#
# Original author: Alexander Marquardt
# Documentation and additional information: http://www.LexaLink.com
# Git source code repository: https://github.com/lexalink/LexaLink.git
#
# Please consider contributing your enhancements and modifications to the LexaLink community,
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
# Spain
ES_regions = [
(( u'AN', u'Andalucía'),
[(u'AL', u'Almería'),
(u'CA', u'Cádiz'),
(u'CO', u'Córdoba'),
(u'GR', u'Granada'),
(u'H', u'Huelva'),
(u'J', u'Jaén'),
(u'MA', u'Málaga'),
(u'SE', u'Sevilla')]),
((u'AR', u'Aragón'),
[(u'HU', u'Huesca'),
(u'TE', u'Teruel'),
(u'Z', u'Zaragoza')]),
((u'O', u'Asturias'),
[(u'O', u'Asturias')]),
((u'IB', u'Beleares'),
# the following region codes are invented (for islands)-- these are not official
# regions. But doesn't matter for purposes of indexing into the database.
[(u'MA', u'Mallorca'),
(u'ME', u'Menorca'),
(u'IB', u'Ibiza'),
(u'FO', u'Formentera')]),
((u'CN', u'Canarias'),
[(u'GC', u'Las Palmas'),
(u'TF', u'Santa Cruz de Tenerife')]),
((u'S', u'Cantabria'),
[(u'S', u'Cantabria')]),
((u'CL', u'Castilla y León'),
[(u'AV', u'Avila'),
(u'BU', u'Burgos'),
(u'LE', u'León'),
(u'P', u'Palencia'),
(u'SA', u'Salamanca'),
(u'SG', u'Segovia'),
(u'SO', u'Soria'),
(u'VA', u'Valladolid'),
(u'ZA', u'Zamora')]),
((u'CM', u'Castilla la Mancha'),
[(u'AB', u'Albacete'),
(u'CR', u'Ciudad Real'),
(u'LE', u'León'),
(u'CU', u'Cuenca'),
(u'GU', u'Guadalajara'),
(u'TO', u'Toledo')]),
((u'CT', u'Cataluña'),
[(u'B', u'Barcelona'),
(u'GI', u'Girona'),
(u'L', u'Lleida'),
(u'T', u'Tarragona')]),
((u'CE', u'Ceuta'),
[(u'CE', u'Ceuta')]),
((u'EX', u'Extremadura'),
[(u'BA', u'Badajoz'),
(u'CC', u'Cáceres')]),
((u'GA', u'Galicia'),
[(u'C', u'A Coruña'),
(u'LU', u'Lugo'),
(u'OR', u'Orense'),
(u'PO', u'Pontevedra')]),
((u'LO', u'La Rioja'),
[(u'LO', u'La Rioja')]),
((u'M', u'Madrid'),
[(u'M', u'Área Metropolitana'),
(u'1', u'Comarca de Las Vegas'),
(u'2', u'Comarca Sur'),
(u'3', u'Cuenca Alta del Manzanares'),
(u'4', u'Cuenca del Guadarrama'),
(u'5', u'Cuenca del Henares'),
(u'6', u'Cuenca del Medio Jarama'),
(u'7', u'Sierra Norte'),
(u'8', u'Sierra Oeste'),
]),
((u'ML', u'Melilla'),
[(u'ML', u'Melilla')]),
((u'MU', u'Murcia'),
[(u'MU', u'Murcia')]),
((u'NA', u'Navarra'),
[(u'NA', u'Navarra')]),
((u'PV', u'País Vasco'),
[(u'VI', u'Álava'),
(u'SS', u'Giupúzcoa'),
(u'BI', u'Vizcaya')]),
((u'CV', u'Valencia'),
[(u'A', u'Alicante'),
(u'CS', u'Castellón'),
(u'V', u'Valencia')])
] | 29.12782 | 94 | 0.497935 | true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.