text stringlengths 0 1.05M | meta dict |
|---|---|
# ADAPTED FROM https://github.com/openai/gym-http-api
import requests
import six.moves.urllib.parse as urlparse
import json
import os
import pkg_resources
import sys
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class Client(object):
"""
Gym client to interface with gym_http_server
"""
def __init__(self, remote_base):
self.remote_base = remote_base
self.session = requests.Session()
self.session.headers.update({'Content-type': 'application/json'})
self.instance_id = None
def _parse_server_error_or_raise_for_status(self, resp):
j = {}
try:
j = resp.json()
except:
# Most likely json parse failed because of network error, not server error (server
# sends its errors in json). Don't let parse exception go up, but rather raise default
# error.
resp.raise_for_status()
if resp.status_code != 200 and "message" in j: # descriptive message from server side
raise ServerError(message=j["message"], status_code=resp.status_code)
resp.raise_for_status()
return j
def _post_request(self, route, data):
url = urlparse.urljoin(self.remote_base, route)
logger.info("POST {}\n{}".format(url, json.dumps(data)))
resp = self.session.post(urlparse.urljoin(self.remote_base, route),
data=json.dumps(data))
return self._parse_server_error_or_raise_for_status(resp)
def _get_request(self, route):
url = urlparse.urljoin(self.remote_base, route)
logger.info("GET {}".format(url))
resp = self.session.get(url)
return self._parse_server_error_or_raise_for_status(resp)
def env_create(self, token, env_id = "Run"):
route = '/v1/envs/'
data = {'env_id': env_id,
'token': token,
'version': pkg_resources.get_distribution("osim-rl").version }
try:
resp = self._post_request(route, data)
except ServerError as e:
sys.exit(e.message)
self.instance_id = resp['instance_id']
self.env_monitor_start("tmp", force=True)
return self.env_reset()
def env_reset(self):
route = '/v1/envs/{}/reset/'.format(self.instance_id)
resp = self._post_request(route, None)
observation = resp['observation']
return observation
def env_step(self, action, render=False):
route = '/v1/envs/{}/step/'.format(self.instance_id)
data = {'action': action, 'render': render}
resp = self._post_request(route, data)
observation = resp['observation']
reward = resp['reward']
done = resp['done']
info = resp['info']
return [observation, reward, done, info]
def env_monitor_start(self, directory,
force=False, resume=False, video_callable=False):
route = '/v1/envs/{}/monitor/start/'.format(self.instance_id)
data = {'directory': directory,
'force': force,
'resume': resume,
'video_callable': video_callable}
self._post_request(route, data)
def submit(self):
route = '/v1/envs/{}/monitor/close/'.format(self.instance_id)
result = self._post_request(route, None)
if result['reward']:
print("Your total reward from this submission: %f" % result['reward'])
else:
print("There was an error in your submission. Please contact administrators.")
route = '/v1/envs/{}/close/'.format(self.instance_id)
self.env_close()
def env_close(self):
route = '/v1/envs/{}/close/'.format(self.instance_id)
self._post_request(route, None)
class ServerError(Exception):
def __init__(self, message, status_code=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
| {
"repo_name": "JackieTseng/Learning_to_run",
"path": "osim/http/client.py",
"copies": "1",
"size": "4026",
"license": "mit",
"hash": -6742848888585377000,
"line_mean": 36.6261682243,
"line_max": 98,
"alpha_frac": 0.5956284153,
"autogenerated": false,
"ratio": 3.8823529411764706,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.996427779368698,
"avg_score": 0.002740712557898313,
"num_lines": 107
} |
# adapted from https://github.com/pybind/cmake_example
import os
import re
import sys
import platform
import subprocess
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from distutils.version import LooseVersion
def get_env():
sp = sys.path[1].split("/")
if "envs" in sp:
return "/".join(sp[:sp.index("envs") + 2])
else:
return "/".join(sp[:-1])
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable,
'-DCMAKE_PREFIX_PATH=' + get_env()]
cfg = 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
if sys.maxsize > 2 ** 32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j2']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''),
self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
setup(
name='custom_potential_example',
version='0.0.1',
author='Moritz Hoffmann',
ext_modules=[CMakeExtension('custom_potential_example')],
cmdclass=dict(build_ext=CMakeBuild),
zip_safe=False,
)
| {
"repo_name": "readdy/readdy",
"path": "examples/custom_potential/setup.py",
"copies": "1",
"size": "2719",
"license": "bsd-3-clause",
"hash": 5019624706463943000,
"line_mean": 33.858974359,
"line_max": 98,
"alpha_frac": 0.566016918,
"autogenerated": false,
"ratio": 3.8403954802259888,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4906412398225989,
"avg_score": null,
"num_lines": null
} |
# Adapted from https://github.com/rlcode/per/blob/master/SumTree.py
import numpy
# SumTree
# a binary tree data structure where the parent’s value is the sum of its children
class SumTree:
write = 0
def __init__(self, capacity):
self.capacity = capacity
self.tree = numpy.zeros(2 * capacity - 1)
self.data = numpy.zeros(capacity, dtype=object)
self.n_entries = 0
self.pending_idx = set()
# update to the root node
def _propagate(self, idx, change):
parent = (idx - 1) // 2
self.tree[parent] += change
if parent != 0:
self._propagate(parent, change)
# find sample on leaf node
def _retrieve(self, idx, s):
left = 2 * idx + 1
right = left + 1
if left >= len(self.tree):
return idx
if s <= self.tree[left]:
return self._retrieve(left, s)
else:
return self._retrieve(right, s - self.tree[left])
def total(self):
return self.tree[0]
# store priority and sample
def add(self, p, data):
idx = self.write + self.capacity - 1
self.pending_idx.add(idx)
self.data[self.write] = data
self.update(idx, p)
self.write += 1
if self.write >= self.capacity:
self.write = 0
if self.n_entries < self.capacity:
self.n_entries += 1
# update priority
def update(self, idx, p):
if idx not in self.pending_idx:
return
self.pending_idx.remove(idx)
change = p - self.tree[idx]
self.tree[idx] = p
self._propagate(idx, change)
# get priority and sample
def get(self, s):
idx = self._retrieve(0, s)
dataIdx = idx - self.capacity + 1
self.pending_idx.add(idx)
return (idx, self.tree[idx], dataIdx) | {
"repo_name": "ShangtongZhang/DeepRL",
"path": "deep_rl/utils/sum_tree.py",
"copies": "1",
"size": "1856",
"license": "mit",
"hash": 6522820849383296000,
"line_mean": 26.6865671642,
"line_max": 82,
"alpha_frac": 0.5604099245,
"autogenerated": false,
"ratio": 3.708,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47684099245,
"avg_score": null,
"num_lines": null
} |
# Adapted from https://github.com/rmcgibbo/npcuda-example and
# https://github.com/cupy/cupy/blob/master/cupy_setup_build.py
import logging
import os
import sys
from distutils import ccompiler, errors, msvccompiler, unixccompiler
from setuptools.command.build_ext import build_ext as setuptools_build_ext
def find_in_path(name, path):
"Find a file in a search path"
# adapted fom http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/
for dir in path.split(os.pathsep):
binpath = os.path.join(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
def locate_cuda():
"""Locate the CUDA environment on the system
If a valid cuda installation is found this returns a dict with keys 'home', 'nvcc', 'include',
and 'lib64' and values giving the absolute path to each directory.
Starts by looking for the CUDAHOME env variable. If not found, everything is based on finding
'nvcc' in the PATH.
If nvcc can't be found, this returns None
"""
nvcc_bin = "nvcc"
if sys.platform.startswith("win"):
nvcc_bin = "nvcc.exe"
# first check if the CUDAHOME env variable is in use
nvcc = None
if "CUDAHOME" in os.environ:
home = os.environ["CUDAHOME"]
nvcc = os.path.join(home, "bin", nvcc_bin)
elif "CUDA_PATH" in os.environ:
home = os.environ["CUDA_PATH"]
nvcc = os.path.join(home, "bin", nvcc_bin)
# otherwise, search the PATH for NVCC
if not nvcc or not os.path.exists(nvcc):
nvcc = find_in_path(nvcc_bin, os.environ["PATH"])
if nvcc is None:
logging.warning(
"The nvcc binary could not be located in your $PATH. Either add it to "
"your path, or set $CUDAHOME to enable CUDA extensions"
)
return None
home = os.path.dirname(os.path.dirname(nvcc))
if not os.path.exists(os.path.join(home, "include")):
logging.warning("Failed to find cuda include directory, attempting /usr/local/cuda")
home = "/usr/local/cuda"
cudaconfig = {
"home": home,
"nvcc": nvcc,
"include": os.path.join(home, "include"),
"lib64": os.path.join(home, "lib64"),
}
post_args = [
"-arch=sm_60",
"-gencode=arch=compute_50,code=sm_50",
"-gencode=arch=compute_52,code=sm_52",
"-gencode=arch=compute_60,code=sm_60",
"-gencode=arch=compute_61,code=sm_61",
"-gencode=arch=compute_70,code=sm_70",
"-gencode=arch=compute_70,code=compute_70",
"--ptxas-options=-v",
"--extended-lambda",
"-O2",
]
if sys.platform == "win32":
cudaconfig["lib64"] = os.path.join(home, "lib", "x64")
post_args += ["-Xcompiler", "/MD"]
else:
post_args += ["-c", "--compiler-options", "'-fPIC'"]
for k, v in cudaconfig.items():
if not os.path.exists(v):
logging.warning("The CUDA %s path could not be located in %s", k, v)
return None
cudaconfig["post_args"] = post_args
return cudaconfig
# This code to build .cu extensions with nvcc is taken from cupy:
# https://github.com/cupy/cupy/blob/master/cupy_setup_build.py
class _UnixCCompiler(unixccompiler.UnixCCompiler):
src_extensions = list(unixccompiler.UnixCCompiler.src_extensions)
src_extensions.append(".cu")
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
# For sources other than CUDA C ones, just call the super class method.
if os.path.splitext(src)[1] != ".cu":
return unixccompiler.UnixCCompiler._compile(
self, obj, src, ext, cc_args, extra_postargs, pp_opts
)
# For CUDA C source files, compile them with NVCC.
_compiler_so = self.compiler_so
try:
nvcc_path = CUDA["nvcc"]
post_args = CUDA["post_args"]
# TODO? base_opts = build.get_compiler_base_options()
self.set_executable("compiler_so", nvcc_path)
return unixccompiler.UnixCCompiler._compile(
self, obj, src, ext, cc_args, post_args, pp_opts
)
finally:
self.compiler_so = _compiler_so
class _MSVCCompiler(msvccompiler.MSVCCompiler):
_cu_extensions = [".cu"]
src_extensions = list(unixccompiler.UnixCCompiler.src_extensions)
src_extensions.extend(_cu_extensions)
def _compile_cu(
self,
sources,
output_dir=None,
macros=None,
include_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
depends=None,
):
# Compile CUDA C files, mainly derived from UnixCCompiler._compile().
macros, objects, extra_postargs, pp_opts, _build = self._setup_compile(
output_dir, macros, include_dirs, sources, depends, extra_postargs
)
compiler_so = CUDA["nvcc"]
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
post_args = CUDA["post_args"]
for obj in objects:
try:
src, _ = _build[obj]
except KeyError:
continue
try:
self.spawn([compiler_so] + cc_args + [src, "-o", obj] + post_args)
except errors.DistutilsExecError as e:
raise errors.CompileError(str(e))
return objects
def compile(self, sources, **kwargs):
# Split CUDA C sources and others.
cu_sources = []
other_sources = []
for source in sources:
if os.path.splitext(source)[1] == ".cu":
cu_sources.append(source)
else:
other_sources.append(source)
# Compile source files other than CUDA C ones.
other_objects = msvccompiler.MSVCCompiler.compile(self, other_sources, **kwargs)
# Compile CUDA C sources.
cu_objects = self._compile_cu(cu_sources, **kwargs)
# Return compiled object filenames.
return other_objects + cu_objects
class cuda_build_ext(setuptools_build_ext):
"""Custom `build_ext` command to include CUDA C source files."""
def run(self):
if CUDA is not None:
def wrap_new_compiler(func):
def _wrap_new_compiler(*args, **kwargs):
try:
return func(*args, **kwargs)
except errors.DistutilsPlatformError:
if not sys.platform == "win32":
CCompiler = _UnixCCompiler
else:
CCompiler = _MSVCCompiler
return CCompiler(None, kwargs["dry_run"], kwargs["force"])
return _wrap_new_compiler
ccompiler.new_compiler = wrap_new_compiler(ccompiler.new_compiler)
# Intentionally causes DistutilsPlatformError in
# ccompiler.new_compiler() function to hook.
self.compiler = "nvidia"
setuptools_build_ext.run(self)
CUDA = locate_cuda()
build_ext = cuda_build_ext if CUDA else setuptools_build_ext
| {
"repo_name": "benfred/implicit",
"path": "cuda_setup.py",
"copies": "1",
"size": "7190",
"license": "mit",
"hash": -2543849631468683000,
"line_mean": 33.5673076923,
"line_max": 98,
"alpha_frac": 0.5899860918,
"autogenerated": false,
"ratio": 3.6796315250767657,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47696176168767657,
"avg_score": null,
"num_lines": null
} |
# Adapted from https://github.com/robtandy/randomdict
# As version there has an outstanding bug
from collections import MutableMapping
import random
class RandomDict(MutableMapping):
def __init__(self, *args, **kwargs):
""" Create RandomDict object with contents specified by arguments.
Any argument
:param *args: dictionaries whose contents get added to this dict
:param **kwargs: key, value pairs will be added to this dict
"""
# mapping of keys to array positions
self.keys = {}
self.values = []
self.last_index = -1
self.update(*args, **kwargs)
def __setitem__(self, key, val):
if key in self.keys:
i = self.keys[key]
self.values[i] = (key, val)
else:
self.last_index += 1
i = self.last_index
self.values.append((key, val))
self.keys[key] = i
def __delitem__(self, key):
if not key in self.keys:
raise KeyError
# index of item to delete is i
i = self.keys[key]
# last item in values array is
move_key, move_val = self.values.pop()
if i != self.last_index:
# we move the last item into its location
self.values[i] = (move_key, move_val)
self.keys[move_key] = i
# else it was the last item and we just throw
# it away
# shorten array of values
self.last_index -= 1
# remove deleted key
del self.keys[key]
def __getitem__(self, key):
if not key in self.keys:
raise KeyError
i = self.keys[key]
return self.values[i][1]
def __iter__(self):
return iter(self.keys)
def __len__(self):
return self.last_index + 1
def random_key(self):
""" Return a random key from this dictionary in O(1) time """
if len(self) == 0:
raise KeyError("RandomDict is empty")
i = random.randint(0, self.last_index)
return self.values[i][0]
def random_value(self):
""" Return a random value from this dictionary in O(1) time """
return self[self.random_key()]
def random_item(self):
""" Return a random key-value pair from this dictionary in O(1) time """
k = self.random_key()
return k, self[k]
| {
"repo_name": "mattea/mattea-utils",
"path": "matteautils/randomdict.py",
"copies": "1",
"size": "2008",
"license": "apache-2.0",
"hash": -7060658257533913000,
"line_mean": 24.1,
"line_max": 74,
"alpha_frac": 0.6628486056,
"autogenerated": false,
"ratio": 3.065648854961832,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4228497460561832,
"avg_score": null,
"num_lines": null
} |
# Adapted from https://github.com/sparticlesteve/cosmoflow-benchmark/blob/master/models/cosmoflow_v1.py
"""Model specification for CosmoFlow This module contains the v1
implementation of the benchmark model. It is deprecated now and being
replaced with the updated, more configurable architecture currently
defined in cosmoflow.py.
"""
import tensorflow as tf
import tensorflow.keras.layers as layers
def scale_1p2(x):
"""Simple scaling function for Lambda layers.
Just multiplies the input by 1.2. Useful for extending the coverage of a
tanh activation for targets in the range [-1,1].
"""
return x * 1.2
def build_model(input_shape, target_size, dropout=0):
conv_args = dict(kernel_size=2, padding="valid")
model = tf.keras.models.Sequential(
[
layers.Conv3D(16, input_shape=input_shape, **conv_args),
layers.LeakyReLU(),
layers.MaxPool3D(pool_size=2),
#
layers.Conv3D(16, **conv_args),
layers.LeakyReLU(),
layers.MaxPool3D(pool_size=2),
#
layers.Conv3D(16, **conv_args),
layers.LeakyReLU(),
layers.MaxPool3D(pool_size=2),
#
layers.Conv3D(16, **conv_args),
layers.LeakyReLU(),
layers.MaxPool3D(pool_size=2),
#
layers.Conv3D(16, **conv_args),
layers.LeakyReLU(),
layers.MaxPool3D(pool_size=2),
#
layers.Flatten(),
layers.Dropout(dropout),
#
layers.Dense(128),
layers.LeakyReLU(),
layers.Dropout(dropout),
#
layers.Dense(64),
layers.LeakyReLU(),
layers.Dropout(dropout),
#
layers.Dense(target_size, activation="tanh"),
layers.Lambda(scale_1p2),
]
)
return model
def get_kernel(params):
"""Construct the CosmoFlow 3D CNN model"""
model = build_model(params["input_shape"], params["target_size"], params["dropout"])
model.compile(loss="mse", optimizer="sgd")
return model
| {
"repo_name": "undertherain/benchmarker",
"path": "benchmarker/kernels/cosmoflow/tensorflow.py",
"copies": "1",
"size": "2148",
"license": "mpl-2.0",
"hash": 4417416568213855000,
"line_mean": 28.8333333333,
"line_max": 103,
"alpha_frac": 0.5856610801,
"autogenerated": false,
"ratio": 3.768421052631579,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4854082132731579,
"avg_score": null,
"num_lines": null
} |
# adapted from https://github.com/tannewt/agohunterdouglas/agohunterdouglas.py
# license from that project included in this repo as well
import time
import socket
import json
import re
import sys
import subprocess
import logging
from colorlog import ColoredFormatter
LOG_LEVEL = logging.ERROR
LOGFORMAT = "%(log_color)s[%(levelname)s] %(asctime)s %(name)s : %(message)s%(reset)s"
LOG = None
def get_log(level=LOG_LEVEL):
global LOG
if not LOG:
formatter = ColoredFormatter(LOGFORMAT)
stream = logging.StreamHandler()
stream.setFormatter(formatter)
logging.root.setLevel(level)
stream.setLevel(level)
LOG = logging.getLogger(__name__)
LOG.addHandler(stream)
LOG.setLevel(level)
return LOG
HD_GATEWAY_PORT = 522
TIMEOUT = 10
DB_FILE = "hunterdouglas.json"
TEMP_FILE = "input.txt"
DB = {}
def net_com(message, sentinel=None):
check_db()
content = None
get_log().debug("sending message: %s", message)
if not 'comtype' in DB or "socket" == DB['comtype']:
get_log().debug("using socket communication")
content = socket_com(message, sentinel)
else:
get_log().debug("using netcat communication")
content = nc_com(message, sentinel)
get_log().debug("received message:")
get_log().debug(content)
return content
def socket_com(message, sentinel=None, sock=None):
content = None
try:
if not sock:
sock = create_socket()
sock.sendall(message)
content = recv_until(sock, sentinel)
except socket.error:
pass
finally:
if sock:
sock.close()
return content
def nc_com(message, sentinel=None):
check_db()
content = None
lcmd = ['/usr/bin/nc', '-i2', DB['server'], str(HD_GATEWAY_PORT)]
get_log().debug("executing %s", lcmd)
with open(TEMP_FILE, 'w+') as fp:
fp.write(message+"\n")
with open(TEMP_FILE, 'r') as fp:
content = subprocess.check_output(lcmd, stderr=subprocess.STDOUT, stdin=fp)
return content
def check_db():
global DB
if not "rooms" in DB:
load_db()
def save_db():
global DB
with open(DB_FILE, 'w+') as fp:
json.dump(DB, fp)
def load_db():
global DB
try:
with open(DB_FILE, 'r') as fp:
DB = json.load(fp)
except IOError:
pass
def set_server(server):
global DB
DB['server'] = server
save_db()
def is_alive(sock):
return socket_com("$dmy", "ack", sock)
def create_socket():
global DB
check_db()
try:
sock = socket.create_connection((DB['server'], HD_GATEWAY_PORT), timeout=TIMEOUT)
helo = recv_until(sock, 'Shade Controller')
except socket.error:
sock.close()
sock = None
return sock
def set_multi_shade(internal_ids, hd_value):
# space separated list of ids or array of ids
if not isinstance(internal_ids, list):
internal_ids = internal_ids.split(' ')
for each_id in internal_ids:
set_shade(each_id, hd_value)
time.sleep(2)
def set_room(internal_id, hd_value):
check_db()
room_ids = []
for name in DB['shades']:
shade = DB['shades'][name]
if internal_id == shade['room']:
room_ids.append(shade['id'])
set_multi_shade(room_ids,hd_value)
return None
def set_shade(internal_id, hd_value):
sock = create_socket()
if "up" == hd_value:
hd_value = 255
elif "down" == hd_value:
hd_value = 0
else:
if hd_value.isdigit():
hd_value = min(int(round(int(hd_value)*255.0/100)),255)
else:
hd_value = -1
if 0 > hd_value or 255 < hd_value:
return None
content = net_com("$pss%s-04-%03d" % (internal_id, hd_value), "done")
return content + net_com("$rls", "act00-00-")
def set_scene(internal_id):
return net_com("$inm%s-" % (internal_id), "act00-00-")
def recv_until(sock, sentinel=None):
info = ""
while True:
try:
chunk = sock.recv(1)
except socket.timeout:
break
info += chunk
if info.endswith(sentinel): break
if not chunk: break
return info
def find_by_id(type, id):
global DB
check_db()
for key in DB[type].keys():
if DB[type][key]['id'] == id:
return key
return None
def find_by_name(kind, name):
global DB
check_db()
result = []
name = name.lower()
for key in DB[kind].keys():
if name in DB[kind][key]['search']:
result.append(DB[kind][key])
return result
def init(params=None):
global DB
check_db()
comtype = 'socket'
if 'comtype' in DB:
comtype = DB['comtype']
if params:
# <server-ip> <init-method (alt)>
params = params.split(' ')
get_log().debug("processing with params : %s", params)
if(len(params) > 0):
set_server(params[0])
if(len(params) > 1 and 'alt' == params[1]):
comtype = 'alt'
DB['comtype'] = comtype
if not 'server' in DB or not DB['server']:
msg = "Platinum Gateway IP is not set. Please set it using pl_update <ip>"
return msg
sock = create_socket()
if not sock:
msg = "Cannot reach Platinum Gateway. Please recheck IP and set"
return msg
else:
sock.close()
info = net_com("$dat", "upd01-")
if not info:
msg = "Unable to get data about windows and scenes from Gateway"
return msg
DB['rooms'] = {}
DB['scenes'] = {}
DB['shades'] = {}
prefix = None
lines = re.split(r'[\n\r]+', info)
for line in lines:
line = line.strip()
if not prefix:
prefix = line[:2]
elif not line.startswith(prefix):
continue
else:
line = line[2:]
if line.startswith("$cr"):
# name of room
room_id = line[3:5]
room_name = line.split('-')[-1].strip()
DB['rooms'][room_name] = {'name':room_name, 'id':room_id, 'search':room_name.lower()}
elif line.startswith("$cm"):
# name of scene
scene_id = line[3:5]
scene_name = line.split('-')[-1].strip()
DB['scenes'][scene_name] = {'name':scene_name, 'id':scene_id, 'search':scene_name.lower()}
elif line.startswith("$cs"):
# name of a shade
parts = line.split('-')
shade_id = line[3:5]
shade_name = parts[-1].strip()
room_id = parts[1]
DB['shades'][shade_name] = {'name':shade_name, 'id':shade_id, 'search':shade_name.lower(), 'room': room_id}
elif line.startswith("$cp"):
# state of a shade
shade_id = line[3:5]
state = line[-4:-1]
state = str(int((int(state) / 255.) * 16))
shade = find_by_id('shades',shade_id)
if shade:
DB['shades'][shade]['state'] = state
save_db()
return "Window Cache Updated"
def main():
get_log(logging.DEBUG)
print init(" ".join(sys.argv[1:]))
if __name__ == "__main__":
main()
| {
"repo_name": "schwark/alfred-hunterdouglas",
"path": "hunterdouglas.py",
"copies": "1",
"size": "6552",
"license": "mit",
"hash": -4036965793380962000,
"line_mean": 23.0919117647,
"line_max": 113,
"alpha_frac": 0.6153846154,
"autogenerated": false,
"ratio": 3.169811320754717,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42851959361547176,
"avg_score": null,
"num_lines": null
} |
# Adapted from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/learn/python/learn/datasets/mnist.py
import numpy as np
class DataSet(object):
def __init__(self, x, labels):
if len(x.shape) > 2:
x = np.reshape(x, [x.shape[0], -1])
assert(x.shape[0] == labels.shape[0])
x = x.astype(np.float32)
self._x = x
self._x_batch = np.copy(x)
self._labels = labels
self._labels_batch = np.copy(labels)
self._num_examples = x.shape[0]
self._index_in_epoch = 0
@property
def x(self):
return self._x
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
def reset_batch(self):
self._index_in_epoch = 0
self._x_batch = np.copy(self._x)
self._labels_batch = np.copy(self._labels)
def next_batch(self, batch_size):
assert batch_size <= self._num_examples
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Shuffle the data
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._x_batch = self._x_batch[perm, :]
self._labels_batch = self._labels_batch[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
end = self._index_in_epoch
return self._x_batch[start:end], self._labels_batch[start:end]
def filter_dataset(X, Y, pos_class, neg_class):
"""
Filters out elements of X and Y that aren't one of pos_class or neg_class
then transforms labels of Y so that +1 = pos_class, -1 = neg_class.
"""
assert(X.shape[0] == Y.shape[0])
assert(len(Y.shape) == 1)
Y = Y.astype(int)
pos_idx = Y == pos_class
neg_idx = Y == neg_class
Y[pos_idx] = 1
Y[neg_idx] = -1
idx_to_keep = pos_idx | neg_idx
X = X[idx_to_keep, ...]
Y = Y[idx_to_keep]
return (X, Y)
def find_distances(target, X, theta=None):
assert len(X.shape) == 2, "X must be 2D, but it is currently %s" % len(X.shape)
target = np.reshape(target, -1)
assert X.shape[1] == len(target), \
"X (%s) and target (%s) must have same feature dimension" % (X.shape[1], len(target))
if theta is None:
return np.linalg.norm(X - target, axis=1)
else:
theta = np.reshape(theta, -1)
# Project onto theta
return np.abs((X - target).dot(theta)) | {
"repo_name": "kohpangwei/influence-release",
"path": "influence/dataset.py",
"copies": "1",
"size": "2619",
"license": "mit",
"hash": 4394282622930959400,
"line_mean": 27.1720430108,
"line_max": 123,
"alpha_frac": 0.5597556319,
"autogenerated": false,
"ratio": 3.3151898734177214,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43749455053177216,
"avg_score": null,
"num_lines": null
} |
# Adapted from https://github.com/tindie/pydiscourse
import logging
import requests
from django.conf import settings
from requests.exceptions import HTTPError
log = logging.getLogger('pydiscourse.client')
NOTIFICATION_WATCHING = 3
NOTIFICATION_TRACKING = 2
NOTIFICATION_NORMAL = 1
NOTIFICATION_MUTED = 0
class DiscourseError(HTTPError):
""" A generic error while attempting to communicate with Discourse """
class DiscourseServerError(DiscourseError):
""" The Discourse Server encountered an error while processing the request """
class DiscourseClientError(DiscourseError):
""" An invalid request has been made """
class DiscourseClient(object):
""" A basic client for the Discourse API that implements the raw API
This class will attempt to remain roughly similar to the discourse_api rails API
"""
def __init__(self, host, api_username, api_key, timeout=None):
self.host = host
self.api_username = api_username
self.api_key = api_key
self.timeout = timeout
def user(self, username):
return self._get('/users/{0}.json'.format(username))['user']
def create_user(self, name, username, email, password, **kwargs):
""" active='true', to avoid sending activation emails
"""
r = self._get('/users/hp.json')
challenge = r['challenge'][::-1] # reverse challenge, discourse security check
confirmations = r['value']
return self._post('/users', name=name, username=username, email=email,
password=password, password_confirmation=confirmations, challenge=challenge, **kwargs)
def trust_level(self, userid, level):
return self._put('/admin/users/{0}/trust_level'.format(userid), level=level)
def suspend(self, userid, duration, reason):
return self._put('/admin/users/{0}/suspend'.format(userid), duration=duration, reason=reason)
def list_users(self, type, **kwargs):
""" optional user search: filter='test@example.com' or filter='scott' """
return self._get('/admin/users/list/{0}.json'.format(type), **kwargs)
def update_avatar_from_url(self, username, url, **kwargs):
return self._post('/users/{0}/preferences/avatar'.format(username), file=url, **kwargs)
def update_avatar_image(self, username, img, **kwargs):
files = {'file': img}
return self._post('/users/{0}/preferences/avatar'.format(username), files=files, **kwargs)
def toggle_gravatar(self, username, state=True, **kwargs):
url = '/users/{0}/preferences/avatar/toggle'.format(username)
if bool(state):
kwargs['use_uploaded_avatar'] = 'true'
else:
kwargs['use_uploaded_avatar'] = 'false'
return self._put(url, **kwargs)
def pick_avatar(self, username, gravatar=True, generated=False, **kwargs):
url = '/users/{0}/preferences/avatar/pick'.format(username)
return self._put(url, **kwargs)
def update_email(self, username, email, **kwargs):
return self._put('/users/{0}/preferences/email'.format(username), email=email, **kwargs)
def update_user(self, username, **kwargs):
return self._put('/users/{0}'.format(username), **kwargs)
def update_username(self, username, new_username, **kwargs):
return self._put('/users/{0}/preferences/username'.format(username), username=new_username, **kwargs)
def set_preference(self, username=None, **kwargs):
if username is None:
username = self.api_username
return self._put(u'/users/{0}'.format(username), **kwargs)
def generate_api_key(self, userid, **kwargs):
return self._post('/admin/users/{0}/generate_api_key'.format(userid), **kwargs)
def delete_user(self, userid, **kwargs):
"""
block_email='true'
block_ip='false'
block_urls='false'
"""
return self._delete('/admin/users/{0}.json'.format(userid), **kwargs)
def users(self, filter=None, **kwargs):
if filter is None:
filter = 'active'
return self._get('/admin/users/list/{0}.json'.format(filter), **kwargs)
def private_messages(self, username=None, **kwargs):
if username is None:
username = self.api_username
return self._get('/topics/private-messages/{0}.json'.format(username), **kwargs)
def private_messages_unread(self, username=None, **kwargs):
if username is None:
username = self.api_username
return self._get('/topics/private-messages-unread/{0}.json'.format(username), **kwargs)
def hot_topics(self, **kwargs):
return self._get('/hot.json', **kwargs)
def latest_topics(self, **kwargs):
return self._get('/latest.json', **kwargs)
def new_topics(self, **kwargs):
return self._get('/new.json', **kwargs)
def topic(self, slug, topic_id, **kwargs):
return self._get('/t/{0}/{1}.json'.format(slug, topic_id), **kwargs)
def post(self, topic_id, post_id, **kwargs):
return self._get('/t/{0}/{1}.json'.format(topic_id, post_id), **kwargs)
def posts(self, topic_id, post_ids=None, **kwargs):
""" Get a set of posts from a topic
post_ids: a list of post ids from the topic stream
"""
if post_ids:
kwargs['post_ids[]'] = post_ids
return self._get('/t/{0}/posts.json'.format(topic_id), **kwargs)
def create_topic(self, title, category, timeout, price, requester_handle, project_id, **kwargs):
""" Create a new topic
title: string
category: integer
"""
if category is not None:
kwargs['category'] = category
if title is not None:
kwargs['title'] = title
if price is None:
price = 0.0
preview_url = "%s/task-feed/%d" % (settings.SITE_HOST, project_id)
return self.create_post(content="**Title**: [%s](%s) \n"
"**Requester**: @%s\n"
# "**Tasks available** : %d %0A"
"**Price** : USD %.2f \n"
"**Timeout** : %s \n" % (title, preview_url, requester_handle, price, timeout),
**kwargs)
def topic_timings(self, topic_id, time, timings={}, **kwargs):
""" Set time spent reading a post
time: overall time for the topic
timings = { post_number: ms }
A side effect of this is to mark the post as read
"""
kwargs['topic_id'] = topic_id
kwargs['topic_time'] = time
for post_num, timing in timings.items():
kwargs['timings[{0}]'.format(post_num)] = timing
return self._post('/topics/timings', **kwargs)
def watch_topic(self, topic_id, **kwargs):
kwargs['notification_level'] = NOTIFICATION_WATCHING
return self._post('/t/{0}/notifications'.format(topic_id), **kwargs)
def topic_posts(self, topic_id, **kwargs):
return self._get('/t/{0}/posts.json'.format(topic_id), **kwargs)
def create_post(self, content, **kwargs):
""" int: topic_id the topic to reply too
"""
return self._post('/posts', raw=content, **kwargs)
def update_post(self, post_id, content, edit_reason='', **kwargs):
kwargs['post[raw]'] = content
kwargs['post[edit_reason]'] = edit_reason
return self._put('/posts/{0}'.format(post_id), **kwargs)
def topics_by(self, username, **kwargs):
url = '/topics/created-by/{0}.json'.format(username)
return self._get(url, **kwargs)['topic_list']['topics']
def invite_user_to_topic(self, user_email, topic_id):
kwargs = {
'email': user_email,
'topic_id': topic_id,
}
return self._post('/t/{0}/invite.json'.format(topic_id), **kwargs)
def search(self, term, **kwargs):
kwargs['term'] = term
return self._get('/search.json', **kwargs)
def create_category(self, name, color, text_color='FFFFFF', permissions=None, parent=None, **kwargs):
""" permissions - dict of 'everyone', 'admins', 'moderators', 'staff' with values of
"""
kwargs['name'] = name
kwargs['color'] = color
kwargs['text_color'] = text_color
if permissions is None and 'permissions' not in kwargs:
permissions = {'everyone': '1'}
for key, value in permissions.items():
kwargs['permissions[{0}]'.format(key)] = value
if parent:
parent_id = None
for category in self.categories():
if category['name'] == parent:
parent_id = category['id']
continue
if not parent_id:
raise DiscourseClientError(u'{0} not found'.format(parent))
kwargs['parent_category_id'] = parent_id
return self._post('/categories', **kwargs)
def categories(self, **kwargs):
return self._get('/categories.json', **kwargs)['category_list']['categories']
def category(self, name, parent=None, **kwargs):
if parent:
name = u'{0}/{1}'.format(parent, name)
return self._get(u'/category/{0}.json'.format(name), **kwargs)
def site_settings(self, **kwargs):
for setting, value in kwargs.items():
setting = setting.replace(' ', '_')
self._request('PUT', '/admin/site_settings/{0}'.format(setting), {setting: value})
def _get(self, path, **kwargs):
return self._request('GET', path, kwargs)
def _put(self, path, **kwargs):
return self._request('PUT', path, kwargs)
def _post(self, path, **kwargs):
return self._request('POST', path, kwargs)
def _delete(self, path, **kwargs):
return self._request('DELETE', path, kwargs)
def _request(self, verb, path, params):
params['api_key'] = self.api_key
if 'api_username' not in params:
params['api_username'] = self.api_username
url = self.host + path
response = requests.request(verb, url, allow_redirects=False, params=params, timeout=self.timeout)
log.debug('response %s: %s', response.status_code, repr(response.text))
if not response.ok:
try:
msg = u','.join(response.json()['errors'])
except (ValueError, TypeError, KeyError):
if response.reason:
msg = response.reason
else:
msg = u'{0}: {1}'.format(response.status_code, response.text)
if 400 <= response.status_code < 500:
raise DiscourseClientError(msg, response=response)
raise DiscourseServerError(msg, response=response)
if response.status_code == 302:
raise DiscourseError('Unexpected Redirect, invalid api key or host?', response=response)
json_content = 'application/json; charset=utf-8'
content_type = response.headers['content-type']
if content_type != json_content:
# some calls return empty html documents
if response.content == ' ':
return None
raise DiscourseError('Invalid Response, expecting "{0}" got "{1}"'.format(
json_content, content_type), response=response)
try:
decoded = response.json()
except ValueError:
raise DiscourseError('failed to decode response', response=response)
if 'errors' in decoded:
message = decoded.get('message')
if not message:
message = u','.join(decoded['errors'])
raise DiscourseError(message, response=response)
return decoded
| {
"repo_name": "crowdresearch/crowdsource-platform",
"path": "crowdsourcing/discourse.py",
"copies": "2",
"size": "11818",
"license": "mit",
"hash": 5220444826404106000,
"line_mean": 36.8782051282,
"line_max": 119,
"alpha_frac": 0.5903706211,
"autogenerated": false,
"ratio": 4.023833844058563,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5614204465158563,
"avg_score": null,
"num_lines": null
} |
## Adapted from https://github.com/tylin/coco-caption/blob/master/pycocoevalcap/eval.py (by tylin)
import os.path as osp
import sys
this_dir = osp.dirname(osp.realpath(__file__))
sys.path.append(osp.join(this_dir, 'coco-caption/pycocoevalcap'))
from tokenizer.ptbtokenizer import PTBTokenizer
from bleu.bleu import Bleu
from meteor.meteor import Meteor
from rouge.rouge import Rouge
from cider.cider import Cider
from collections import defaultdict
def to_coco(kvs, keys):
res = defaultdict(list)
for k in keys:
clist = kvs[k]
for c in clist:
res[k].append({'caption':c})
return res
def load_sentences(target_path):
sentences = defaultdict(list)
with open(target_path) as reader:
for line in reader:
fields = line.strip().split('\t')
sentences[fields[0]].append(fields[1])
return sentences
def load_list(target_path):
res = []
with open(target_path) as reader:
for line in reader:
res.append(line.strip())
return res
def main():
import sys
res_path = sys.argv[1]
gt_path = osp.join(this_dir, 'tgif-v1.0.tsv')
test_list_path = osp.join(this_dir, 'splits', 'test.txt')
test_keys = load_list(test_list_path)
all_sents = load_sentences(gt_path)
res = load_sentences(res_path)
# make sure res has and only has single sentence
# for all testing keys
gts = {}
for key in test_keys:
gts[key] = all_sents[key]
if key in res:
res[key] = [res[key][0]]
else:
res[key] = [""]
# =================================================
# Convert to COCO format
# =================================================
gts = to_coco(gts, res.keys())
res = to_coco(res, res.keys())
# =================================================
# Set up scorers
# =================================================
print 'tokenization...'
tokenizer = PTBTokenizer()
gts = tokenizer.tokenize(gts)
res = tokenizer.tokenize(res)
# =================================================
# Set up scorers
# =================================================
print 'setting up scorers...'
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(),"METEOR"),
(Rouge(), "ROUGE_L"),
(Cider(), "CIDEr")
]
# =================================================
# Compute scores
# =================================================
eval = {}
for scorer, method in scorers:
print 'computing %s score...'%(scorer.method())
score, scores = scorer.compute_score(gts, res)
if type(method) == list:
for sc, scs, m in zip(score, scores, method):
print "%s: %0.3f"%(m, sc)
else:
print "%s: %0.3f"%(method, score)
if __name__ == "__main__":
main()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| {
"repo_name": "raingo/TGIF-Release",
"path": "data/eval.py",
"copies": "1",
"size": "2975",
"license": "bsd-3-clause",
"hash": -4404713437627284000,
"line_mean": 27.8834951456,
"line_max": 98,
"alpha_frac": 0.5045378151,
"autogenerated": false,
"ratio": 3.6016949152542375,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4606232730354238,
"avg_score": null,
"num_lines": null
} |
# adapted from https://lsandig.org/blog/2014/08/apollon-python/en/
from cmath import sqrt
import math
class Circle(object):
"""
A circle represented by center point as complex number and radius.
"""
def __init__ ( self, mx, my, r ):
"""
@param mx: x center coordinate
@param my: y center coordinate
@param r: radius
"""
self.r = r
self.mx = mx
self.my = my
self.m = (mx +my*1j)
def curvature (self):
return 1/self.r
def outerTangentCircle( circle1, circle2, circle3 ):
"""
Takes three externally tangent circles and calculates the fourth one enclosing them.
"""
cur1 = circle1.curvature()
cur2 = circle2.curvature()
cur3 = circle3.curvature()
m1 = circle1.m
m2 = circle2.m
m3 = circle3.m
cur4 = -2 * sqrt( cur1*cur2 + cur2*cur3 + cur1 * cur3 ) + cur1 + cur2 + cur3
m4 = ( -2 * sqrt( cur1*m1*cur2*m2 + cur2*m2*cur3*m3 + cur1*m1*cur3*m3 ) + cur1*m1 + cur2*m2 + cur3*m3 ) / cur4
circle4 = Circle( m4.real, m4.imag, 1/cur4 )
return circle4
def tangentCirclesFromRadii( r2, r3, r4 ):
"""
Takes three radii and calculates the corresponding externally
tangent circles as well as a fourth one enclosing them. The
enclosing circle is the first one.
"""
circle2 = Circle( 0, 0, r2 )
circle3 = Circle( r2 + r3, 0, r3 )
m4x = (r2*r2 + r2*r4 + r2*r3 - r3*r4) / (r2 + r3)
m4y = sqrt( (r2 + r4) * (r2 + r4) - m4x*m4x )
circle4 = Circle( m4x, m4y, r4 )
circle1 = outerTangentCircle( circle2, circle3, circle4 )
return ( circle1, circle2, circle3, circle4 )
def secondSolution( fixed, c1, c2, c3 ):
"""
If given four tangent circles, calculate the other one that is tangent
to the last three.
@param fixed: The fixed circle touches the other three, but not
the one to be calculated.
@param c1, c2, c3: Three circles to which the other tangent circle
is to be calculated.
"""
curf = fixed.curvature()
cur1 = c1.curvature()
cur2 = c2.curvature()
cur3 = c3.curvature()
curn = 2 * (cur1 + cur2 + cur3) - curf
mn = (2 * (cur1*c1.m + cur2*c2.m + cur3*c3.m) - curf*fixed.m ) / curn
return Circle( mn.real, mn.imag, 1/curn )
def recurse(circles, depth, maxDepth, genCircles):
"""Recursively calculate the smaller circles of the AG up to the
given depth. Note that for depth n we get 2*3^{n+1} circles.
@param maxDepth: Maximal depth of the recursion.
@type maxDepth: int
@param circles: 4-Tuple of circles for which the second
solutions are calculated
@type circles: (L{Circle}, L{Circle}, L{Circle}, L{Circle})
@param depth: Current depth
@type depth: int
"""
if( depth == maxDepth):
return
(c1, c2, c3, c4) = circles
if( depth == 0 ):
# First recursive step, this is the only time we need to
# calculate 4 new circles.
#del genCircles[4:]
cspecial = secondSolution( c1, c2, c3, c4 )
genCircles.append( cspecial )
recurse( (cspecial, c2, c3, c4), 1, maxDepth, genCircles )
cn2 = secondSolution( c2, c1, c3, c4 )
genCircles.append( cn2 )
cn3 = secondSolution( c3, c1, c2, c4 )
genCircles.append( cn3 )
cn4 = secondSolution( c4, c1, c2, c3 )
genCircles.append( cn4 )
recurse( (cn2, c1, c3, c4), depth+1, maxDepth, genCircles )
recurse( (cn3, c1, c2, c4), depth+1, maxDepth, genCircles )
recurse( (cn4, c1, c2, c3), depth+1, maxDepth, genCircles )
def get_circle(circle, segments):
# Define stroke geometry
points = []
angle = 2*math.pi/segments # angle in radians
r = float(circle.r.real + circle.r.imag)
mx = float(circle.mx.real + circle.mx.imag)
my = float(circle.my.real + circle.my.imag)
for i in range(segments):
x = r*math.cos(angle*i)
y = r*math.sin(angle*i)
z = 0
points.append((x, y, z))
return points
start = tangentCirclesFromRadii( 1/c1, 1/c2, 1/c3 )
gen_circles =list(start)
recurse(list(start), 0, max_depth, gen_circles)
res_circles = [get_circle(c, segments) for c in gen_circles]
centers = [(float(circle.mx.real + circle.mx.imag), float(circle.my.real + circle.my.imag), 0) for circle in gen_circles]
radiuses = [float(circle.r.real + circle.r.imag) for circle in gen_circles] | {
"repo_name": "5agado/data-science-learning",
"path": "graphics/blender/apollonian_gaskets.py",
"copies": "1",
"size": "4357",
"license": "apache-2.0",
"hash": -1936496077419181800,
"line_mean": 32.7829457364,
"line_max": 121,
"alpha_frac": 0.6206105118,
"autogenerated": false,
"ratio": 2.8892572944297084,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40098678062297083,
"avg_score": null,
"num_lines": null
} |
# Adapted from https://pygame.org/wiki/Spritesheet
import pygame
class spritesheetmatrix(object):
def __init__(self, filename, rows=3, cols=4, colorkey=None):
try:
self.rows = int(rows)
self.cols = int(cols)
self.colorkey = colorkey
self.sprite_tuples = []
self.sheet = pygame.image.load(filename)
dimensions = self.sheet.get_rect().size
self.sprite_width = int(dimensions[0] / cols)
self.sprite_height = int(dimensions[1] / rows)
self.sheet = self.sheet.convert()
self.create_rects()
self.sprites = self.images_at(colorkey)
except:
print("Unable to load spritesheet img: " + filename)
raise SystemExit('Problem opening file')
def create_rects(self):
for row in range(0, self.rows):
for col in range(0, self.cols):
self.sprite_tuples.append((col * self.sprite_width, row * self.sprite_height, self.sprite_width, self.sprite_height))
def image_at(self, rectangle, colorkey=None):
rect = pygame.Rect(rectangle)
image = pygame.Surface(rect.size).convert()
image.blit(self.sheet, (0, 0), rect)
if colorkey is not None:
if colorkey is -1:
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey, pygame.RLEACCEL)
return image
def images_at(self, colorkey=None):
return [self.image_at(rect, colorkey) for rect in self.sprite_tuples]
def load_strip(self, rect, image_count, colorkey = None):
"""Loads a strip of images"""
tups = [(rect[0] + rect[2] * x, rect[1], rect[2], rect[3])
for x in range(image_count)]
return self.images_at(tups, colorkey)
def get_sprite(self, index):
"""Loads a strip of images"""
if index in range (0,self.get_num_sprites()):
return self.sprites[index]
return 0
def get_sprite_width(self):
return self.sprite_width
def get_sprite_height(self):
return self.sprite_height
def get_num_sprites(self):
return int(self.rows * self.cols)
def get_forward_sprites(self):
return self.sprites[0::self.cols]
def get_backward_sprites(self):
return self.sprites[2::self.cols]
def get_right_sprites(self):
return self.sprites[1::self.cols]
def get_left_sprites(self):
return self.sprites[3::self.cols] | {
"repo_name": "kzwatkins/spritesheetmatrix",
"path": "spritesheetmatrix.py",
"copies": "1",
"size": "2510",
"license": "apache-2.0",
"hash": 3897192291214126600,
"line_mean": 30.7848101266,
"line_max": 133,
"alpha_frac": 0.5924302789,
"autogenerated": false,
"ratio": 3.696612665684831,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4789042944584831,
"avg_score": null,
"num_lines": null
} |
# Adapted from https://pymotw.com/2/socket/multicast.html
import socket
import struct
import sys
import threading
import time
from mve.utils import eprint
def register(component_name, multicast_ip='224.3.29.71', multicast_port=10000):
"""Discovers and registers with a UDP Server
Returns the IP address of the server. This
function issues a message over UDP multicast
and looks for a response. The message is in
the form of "register:component_name".
Arguments:
component_name -- The name of the thing registering
"""
message = 'register:%s' % component_name
multicast_group = (multicast_ip, multicast_port)
# Create the datagram socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Set a timeout so the socket does not block indefinitely when trying
# to receive data.
sock.settimeout(0.2)
# Set the time-to-live for messages to 1 so they do not go past the
# local network segment.
ttl = struct.pack('b', 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
try:
# Look for responses from all recipients
while True:
# Send data to the multicast group
eprint('sending "%s"' % message)
sent = sock.sendto(message, multicast_group)
eprint('waiting to receive')
try:
data, server = sock.recvfrom(16)
return (data, server[0])
except socket.timeout:
eprint('timed out, still waiting')
time.sleep(2)
else:
eprint('received "%s" from %s' % (data, server))
finally:
eprint('closing socket')
sock.close()
def udp_server(callback, multicast_ip='224.3.29.71', server_port=10000):
"""Listens and responds to UPD registration messages
This function opens a UDP multicast server and listen.
When it receives a message, it passage it to callback.
If callback returns something other than None, it sends
that response out.
Arguments:
callback -- the callback for determining responses, takes the form (data, address)
multicast_ip -- the ip for the multicast group (default='224.3.29.71')
server_port -- the server port for the multicast (default=10000)
"""
multicast_group = multicast_ip
server_address = ('', server_port)
# Create the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
# Receive/respond loop
while True:
eprint('\nwaiting to receive message')
data, address = sock.recvfrom(1024)
if data.startswith("register:"):
eprint('received %s bytes from %s' % (len(data), address[0]))
eprint(data)
eprint('sending acknowledgement to', address)
response = callback(data, address)
if response is not None:
sock.sendto(response, address)
else:
eprint("response was none. not replying")
else:
eprint('received invalid data (%s) from %s' % (data, address[0]))
def start_udp_server(callback, isDaemon=True, multicast_ip='224.3.29.71', server_port=10000):
"""Start the DDP server on a separate thread
Arguments:
callback -- function in the form of (data, sender) to determine responses
isDaemon -- whether python should exit if this is the only thread left
multicast_ip -- the ip for the multicast group (default='224.3.29.71')
server_port -- the server port for the multicast (default=10000)
"""
t = threading.Thread(target=udp_server, args=(callback, multicast_ip, server_port))
t.setDaemon(isDaemon)
t.start()
return t
| {
"repo_name": "man-vs-electron/mve",
"path": "lib/mve/udp.py",
"copies": "1",
"size": "4089",
"license": "apache-2.0",
"hash": 8343672617435985000,
"line_mean": 33.075,
"line_max": 93,
"alpha_frac": 0.6400097823,
"autogenerated": false,
"ratio": 4.044510385756676,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5184520168056677,
"avg_score": null,
"num_lines": null
} |
# Adapted from https://stanford.edu/~mwaskom/software/seaborn/examples/network_correlations.html
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
sns.set(context="paper", font="monospace")
# Load the datset of correlations between cortical brain networks
df = pd.read_csv("/Users/willettk/Astronomy/Research/GalaxyZoo/gz_reduction_sandbox/data/full_decals/decals_network.csv",header=[0,1,2],index_col=0)
used_types = ["frac",]
used_columns = (df.columns.get_level_values("answer_type").isin(used_types))
df2 = df.loc[:, used_columns]
corrmat = df2.corr()
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(12, 9))
# Draw the heatmap using seaborn
sns.heatmap(corrmat, vmax=1.0, square=True)
plt.xticks(rotation=90)
plt.yticks(rotation=0)
plt.xlabel('')
plt.ylabel('')
plt.title('GZ-DECaLS')
# Use matplotlib directly to emphasize known networks (ie, tasks)
task_numbers = corrmat.columns.get_level_values("task_no")
for i, task_no in enumerate(task_numbers):
if i and task_no != task_numbers[i - 1]:
ax.axhline(len(task_numbers) - i, c="k")
ax.axvline(i, c="k")
f.tight_layout()
plt.savefig('../plots/heatmap.pdf')
| {
"repo_name": "willettk/decals",
"path": "python/heatmap.py",
"copies": "1",
"size": "1202",
"license": "mit",
"hash": -5203481863561205000,
"line_mean": 29.05,
"line_max": 148,
"alpha_frac": 0.7221297837,
"autogenerated": false,
"ratio": 2.828235294117647,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4050365077817647,
"avg_score": null,
"num_lines": null
} |
# Adapted from http://stackoverflow.com/questions/10017859/how-to-build-a-simple-http-post-server
# Thank you!
import sys
import BaseHTTPServer
import cgi
class MyHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_POST(self):
ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))
postvars = {}
try:
if ctype == 'application/x-www-form-urlencoded':
length = int(self.headers.getheader('content-length'))
postvars = cgi.parse_qs(self.rfile.read(length), keep_blank_values=1)
print "Client posted", postvars
self.send_response(200)
"""
self.send_header("Content-type", "text")
self.send_header("Content-length", str(len(body))) """
self.end_headers()
# self.wfile.write(body)
except Exception, e:
print "Error", repr(e)
def httpd(handler_class=MyHandler, server_address = ('127.0.0.1', 8000)):
try:
print "Server started"
srvr = BaseHTTPServer.HTTPServer(server_address, handler_class)
srvr.serve_forever() # serve_forever
except KeyboardInterrupt:
srvr.socket.close()
if __name__ == "__main__":
httpd(server_address = (sys.argv[1], int(sys.argv[2])))
| {
"repo_name": "aaaaalbert/repy-doodles",
"path": "httpserver_with_post.py",
"copies": "1",
"size": "1281",
"license": "unlicense",
"hash": 8718431688572975000,
"line_mean": 31.8461538462,
"line_max": 97,
"alpha_frac": 0.6221701795,
"autogenerated": false,
"ratio": 3.69164265129683,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48138128307968303,
"avg_score": null,
"num_lines": null
} |
# Adapted from http://stackoverflow.com/questions/110803/dirty-fields-in-django
from copy import deepcopy
from django.core.exceptions import ValidationError
from django.db.models.signals import post_save, m2m_changed
from .compare import raw_compare, compare_states
from .compat import (is_db_expression, save_specific_fields,
is_deferred, is_buffer, get_m2m_with_model, remote_field)
class DirtyFieldsMixin(object):
compare_function = (raw_compare, {})
# This mode has been introduced to handle some situations like this one:
# https://github.com/romgar/django-dirtyfields/issues/73
ENABLE_M2M_CHECK = False
def __init__(self, *args, **kwargs):
super(DirtyFieldsMixin, self).__init__(*args, **kwargs)
post_save.connect(
reset_state, sender=self.__class__,
dispatch_uid='{name}-DirtyFieldsMixin-sweeper'.format(
name=self.__class__.__name__))
if self.ENABLE_M2M_CHECK:
self._connect_m2m_relations()
reset_state(sender=self.__class__, instance=self)
def _connect_m2m_relations(self):
for m2m_field, model in get_m2m_with_model(self.__class__):
m2m_changed.connect(
reset_state, sender=remote_field(m2m_field).through,
dispatch_uid='{name}-DirtyFieldsMixin-sweeper-m2m'.format(
name=self.__class__.__name__))
def _as_dict(self, check_relationship, include_primary_key=True):
all_field = {}
for field in self._meta.fields:
if field.primary_key and not include_primary_key:
continue
if remote_field(field):
if not check_relationship:
continue
if is_deferred(self, field):
continue
field_value = getattr(self, field.attname)
# If current field value is an expression, we are not evaluating it
if is_db_expression(field_value):
continue
try:
# Store the converted value for fields with conversion
field_value = field.to_python(field_value)
except ValidationError:
# The current value is not valid so we cannot convert it
pass
if is_buffer(field_value):
# psycopg2 returns uncopyable type buffer for bytea
field_value = str(field_value)
# Explanation of copy usage here :
# https://github.com/romgar/django-dirtyfields/commit/efd0286db8b874b5d6bd06c9e903b1a0c9cc6b00
all_field[field.name] = deepcopy(field_value)
return all_field
def _as_dict_m2m(self):
if self.pk:
m2m_fields = dict([
(f.attname, set([
obj.pk for obj in getattr(self, f.attname).all()
]))
for f, model in get_m2m_with_model(self.__class__)
])
return m2m_fields
return {}
def get_dirty_fields(self, check_relationship=False, check_m2m=None, verbose=False):
if self._state.adding:
# If the object has not yet been saved in the database, all fields are considered dirty
# for consistency (see https://github.com/romgar/django-dirtyfields/issues/65 for more details)
pk_specified = self.pk is not None
initial_dict = self._as_dict(check_relationship, include_primary_key=pk_specified)
return initial_dict
if check_m2m is not None and not self.ENABLE_M2M_CHECK:
raise ValueError("You can't check m2m fields if ENABLE_M2M_CHECK is set to False")
modified_fields = compare_states(self._as_dict(check_relationship),
self._original_state,
self.compare_function)
if check_m2m:
modified_m2m_fields = compare_states(check_m2m,
self._original_m2m_state,
self.compare_function)
modified_fields.update(modified_m2m_fields)
if not verbose:
# Keeps backward compatibility with previous function return
modified_fields = {key: value['saved'] for key, value in modified_fields.items()}
return modified_fields
def is_dirty(self, check_relationship=False, check_m2m=None):
return {} != self.get_dirty_fields(check_relationship=check_relationship,
check_m2m=check_m2m)
def save_dirty_fields(self):
dirty_fields = self.get_dirty_fields(check_relationship=True)
save_specific_fields(self, dirty_fields)
def reset_state(sender, instance, **kwargs):
# original state should hold all possible dirty fields to avoid
# getting a `KeyError` when checking if a field is dirty or not
update_fields = kwargs.pop('update_fields', {})
new_state = instance._as_dict(check_relationship=True)
if update_fields:
for field in update_fields:
instance._original_state[field] = new_state[field]
else:
instance._original_state = new_state
if instance.ENABLE_M2M_CHECK:
instance._original_m2m_state = instance._as_dict_m2m()
| {
"repo_name": "jdotjdot/django-dirtyfields",
"path": "src/dirtyfields/dirtyfields.py",
"copies": "1",
"size": "5337",
"license": "bsd-3-clause",
"hash": -4575186713593828000,
"line_mean": 39.7404580153,
"line_max": 107,
"alpha_frac": 0.5979014428,
"autogenerated": false,
"ratio": 4.089655172413793,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5187556615213793,
"avg_score": null,
"num_lines": null
} |
# Adapted from http://stackoverflow.com/questions/110803/dirty-fields-in-django
from django.db import models
from django.db.models.signals import pre_save, post_save
from django.contrib.contenttypes.models import ContentType
import random, string, hashlib, time
import six
def id_generator():
return hashlib.md5(str(time.time()).encode('utf-8')).hexdigest() + str(random.randint(1, 100))
def changed(changes, field):
if changes.get(field):
val = lambda s: changes[field][s]
if val('new') != val('old'):
return True
return False
class DirtyField(object):
def __init__(self, instance):
self.instance = instance
def get_m2m_relations(self):
r = []
for field, model in self.instance._meta.get_m2m_with_model():
if isinstance(field, models.ManyToManyField):
r.append(field)
return r
def get_source(self, name, value):
return getattr(self.instance, self.instance.sources[name][value])
def get_dirty_fields(self, source='default'):
new_state = self.get_source(source, 'lookup')()
changed_fields = {}
if self.instance._state.adding:
changed_fields = self.get_field_values(source=source, initial_state=True)
for key,value in six.iteritems(self.get_source(source, 'state')):
if value != new_state[key]:
changed_fields.update({key:value})
return changed_fields
def as_value(self, value):
return value() if (value and callable(value)) else value
def field_has_default_value(self, field_name, source='default'):
for field in self.get_source(source, 'fields'):
if field_name == field.name:
if field.default:
return field
return False
def get_field_values(self, source='default', initial_state=False):
changed_fields = {}
for k in self.get_source(source, 'fields')():
name = k.name if (not isinstance(k, six.string_types)) else k
default = k.default if (not isinstance(k, six.string_types)) else None
field_value = getattr(self.instance, name, None)
if field_value:
if initial_state:
changed_fields[name] = self.as_value(self.get_source(source, 'state').get(name))
else:
field_value = self.as_value(field_value)
default_value = self.as_value(default)
if field_value != default_value:
changed_fields[name] = field_value
return changed_fields
def is_dirty(self, source='default'):
if not self.instance.pk:
return True
return {} != self.get_dirty_fields(source=source)
class DirtyFieldMixin(object):
sources = {'default': {'state': '_original_state', 'lookup': '_as_dict', 'fields': '_get_fields'}}
def __init__(self, *args, **kwargs):
self.dirtyfield = DirtyField(instance=self)
self._dirtyfields_copy = {}
super(DirtyFieldMixin, self).__init__(*args, **kwargs)
genuid = lambda s: '%s._%s_state_%s'%(self.__class__.__name__, s, id_generator())
pre_save.connect(self._presave_state,
sender=self.__class__,
dispatch_uid=genuid('state'))
post_save.connect(self._reset_state,
sender=self.__class__,
dispatch_uid=genuid('reset'))
self._reset_state(initialize=True)
def _as_dict(self, *args, **kwargs):
fields = dict([
(f.attname, getattr(self, f.attname))
for f in self._get_fields()
])
return fields
def _get_fields(self):
return self._meta.local_fields
def _reset_state(self, *args, **kwargs):
for source, v in six.iteritems(self.sources):
setattr(self, v['state'], getattr(self, v['lookup'])(**kwargs))
def _presave_state(self, sender, instance, **kwargs):
self.update_dirtyfields_copy()
def get_changes(self, source='default', dirty_fields=None):
changes = {}
if dirty_fields is None:
dirty_fields = self.dirtyfield.get_dirty_fields(source=source)
for field, old in six.iteritems(dirty_fields):
field_value = getattr(self, field)
changes[field] = {'old': old, 'new': field_value}
return changes
def update_dirtyfields_copy(self):
self._dirtyfields_copy = self.get_changes()
def get_dirtyfields_copy(self):
return self._dirtyfields_copy
class TypedDirtyFieldMixin(DirtyFieldMixin):
def get_content_type(self):
return ContentType.objects.get_for_model(self)
| {
"repo_name": "futurice/django-dirtyfield",
"path": "djangodirtyfield/mixin.py",
"copies": "1",
"size": "4723",
"license": "bsd-3-clause",
"hash": 343151871745479360,
"line_mean": 37.0887096774,
"line_max": 102,
"alpha_frac": 0.5991954266,
"autogenerated": false,
"ratio": 3.8872427983539093,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9968580846945652,
"avg_score": 0.003571475601651408,
"num_lines": 124
} |
# Adapted from http://stackoverflow.com/questions/110803/dirty-fields-in-django
from django.db.models.signals import post_save
class DirtyFieldsMixin(object):
def __init__(self, *args, **kwargs):
super(DirtyFieldsMixin, self).__init__(*args, **kwargs)
post_save.connect(
self._reset_state, sender=self.__class__,
dispatch_uid='%s._reset_state' % self.__class__.__name__)
self._reset_state()
def _as_dict(self):
fields = dict([
(f.attname, getattr(self, f.attname))
for f in self._meta.local_fields
])
m2m_fields = dict([])
# Can't access the m2m fields until the model is initialized
if self.pk:
m2m_fields = dict([
(f.attname, set([
obj.id for obj in getattr(self, f.attname).all()
]))
for f in self._meta.local_many_to_many
])
return fields, m2m_fields
def _reset_state(self, *args, **kwargs):
self._original_state, self._original_m2m_state = self._as_dict()
def get_dirty_fields(self):
new_state, new_m2m_state = self._as_dict()
changed_fields = dict([
(key, value)
for key, value in self._original_state.iteritems()
if value != new_state[key]
])
changed_m2m_fields = dict([
(key, value)
for key, value in self._original_m2m_state.iteritems()
if sorted(value) != sorted(new_m2m_state[key])
])
changed_fields.update(changed_m2m_fields)
return changed_fields
def is_dirty(self):
# in order to be dirty we need to have been saved at least once, so we
# check for a primary key and we need our dirty fields to not be empty
if not self.pk:
return True
return {} != self.get_dirty_fields()
| {
"repo_name": "stanhu/django-dirtyfields",
"path": "dirtyfields/dirtyfields.py",
"copies": "1",
"size": "1950",
"license": "bsd-3-clause",
"hash": 7348611343877332000,
"line_mean": 36.5,
"line_max": 80,
"alpha_frac": 0.5461538462,
"autogenerated": false,
"ratio": 3.8844621513944224,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9922577951535543,
"avg_score": 0.0016076092117758786,
"num_lines": 52
} |
# Adapted from http://stackoverflow.com/questions/110803/dirty-fields-in-django
from django import VERSION
from django.db.models.signals import post_save
class DirtyFieldsMixin(object):
def __init__(self, *args, **kwargs):
super(DirtyFieldsMixin, self).__init__(*args, **kwargs)
post_save.connect(reset_state, sender=self.__class__,
dispatch_uid='%s-DirtyFieldsMixin-sweeper' % self.__class__.__name__)
reset_state(sender=self.__class__, instance=self)
def _as_dict(self):
return dict((f.name, f.to_python(getattr(self, f.attname))) for f in self._meta.local_fields)
def get_dirty_fields(self):
"""
Returns a list of keys that have changed
"""
new_state = self._as_dict()
return tuple(k for k, v in self._original_state.iteritems() if v != new_state[k])
@property
def is_dirty(self):
# in order to be dirty we need to have been saved at least once, so we
# check for a primary key and we need our dirty fields to not be empty
if self._state.adding:
return True
return bool(self.get_dirty_fields())
def reset_state(sender, instance, **kwargs):
instance._original_state = instance._as_dict()
# Django 1.5 added support for updating only specified fields, this fails in
# older versions.
if VERSION >= (1, 5):
def save(self, *args, **kwargs):
kwargs['update_fields'] = self.get_dirty_fields()
return super(DirtyFieldsMixin, self).save(*args, **kwargs)
DirtyFieldsMixin.save = save
| {
"repo_name": "georgemarshall/django-dirtyfields",
"path": "src/dirtyfields/dirtyfields.py",
"copies": "1",
"size": "1579",
"license": "bsd-3-clause",
"hash": 2485454510257850000,
"line_mean": 36.5952380952,
"line_max": 101,
"alpha_frac": 0.6402786574,
"autogenerated": false,
"ratio": 3.7595238095238095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48998024669238094,
"avg_score": null,
"num_lines": null
} |
# Adapted from http://stackoverflow.com/questions/110803/dirty-fields-in-django
from django import VERSION
from django.db.models.signals import post_save, pre_save
def reset_instance(instance, *args, **kwargs):
"""
Called on the post_save signal. Calls the instance's _reset_state method
"""
instance._reset_state()
class DirtyFieldsMixin(object):
"""
Gives dirty field tracking ability to models, also implements a save_dirty
method which updates only the dirty fields using QuerySet.update - useful
for multi-process or multi-worker setups where save() will actually update
all fields, potentially overriding changes by other workers while the
current worker has the object open.
"""
def __init__(self, *args, **kwargs):
super(DirtyFieldsMixin, self).__init__(*args, **kwargs)
dispatch_uid = '%s-DirtyFieldsMixin-sweeper' % self.__class__.__name__
post_save.connect(reset_instance, sender=self.__class__,
dispatch_uid=dispatch_uid)
self._reset_state()
def _reset_state(self, *args, **kwargs):
self._original_state = self._as_dict()
def _as_dict(self):
# For relations, saves all fk values too so that we can update fk by
# id, e.g. obj.foreignkey_id = 4
if self._deferred:
return {}
return dict([(f.name, f.to_python(getattr(self, f.attname))) for f in self._meta.local_fields])
def get_changed_values(self):
return dict([(field, getattr(self, field)) for field in self.dirty_fields])
@property
def dirty_fields(self):
"""
Returns a list of keys that have changed
"""
if self._deferred:
raise TypeError('Cant be used with deferred objects')
new_state = self._as_dict()
return tuple(k for k, v in self._original_state.iteritems() if v != new_state[k])
@property
def is_dirty(self):
if self._state.adding:
return True
return bool(self.dirty_fields)
def save_dirty(self):
"""
An alternative to save, instead writing every field again, only updates
the dirty fields via QuerySet.update
"""
if not self.pk:
self.save()
updated = 1
else:
changed_values = self.get_changed_values()
if len(changed_values.keys()) == 0:
return False
pre_save.send(sender=self.__class__, instance=self)
# Detect if updating relationship field_ids directly
# If related field object itself has changed then the field_id
# also changes, in which case we detect and ignore the field_id
# change, otherwise we'll reload the object again later unnecessarily
rel_fields = dict([(f.column, f) for f in self._meta.fields if f.rel])
updated_rel_ids = []
for field_name in changed_values.keys():
if field_name in rel_fields.keys():
rel_field = rel_fields[field_name]
value = changed_values[rel_field.column]
obj_value = getattr(self, rel_field.name).pk
del changed_values[rel_field.column]
changed_values[rel_field.name] = value
if value != obj_value:
updated_rel_ids.append(rel_field.column)
# Maps db column names back to field names if they differ
field_map = dict([(f.column, f.name) for f in self._meta.fields if f.db_column])
for field_from, field_to in field_map.iteritems():
if field_from in changed_values:
changed_values[field_to] = changed_values[field_from]
del changed_values[field_from]
updated = self.__class__.objects.filter(pk=self.pk).update(**changed_values)
# Reload updated relationships
for field_name in updated_rel_ids:
field = rel_fields[field_name]
field_pk = getattr(self, field_name)
rel_obj = field.related.parent_model.objects.get(pk=field_pk)
setattr(self, field.name, rel_obj)
self._reset_state()
post_save.send(sender=self.__class__, instance=self, created=False)
return updated == 1
# Django 1.5 added support for updating only specified fields, this fails in
# older versions.
if VERSION >= (1, 5):
def save(self, *args, **kwargs):
if not self._state.adding:
kwargs['update_fields'] = self.dirty_fields
return super(DirtyFieldsMixin, self).save(*args, **kwargs)
DirtyFieldsMixin.save = save
| {
"repo_name": "mattcaldwell/django-dirtyfields",
"path": "dirtyfields/dirtyfields.py",
"copies": "1",
"size": "4724",
"license": "bsd-3-clause",
"hash": -2903478219675516000,
"line_mean": 39.3760683761,
"line_max": 103,
"alpha_frac": 0.5990685859,
"autogenerated": false,
"ratio": 4.165784832451499,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006740445776220878,
"num_lines": 117
} |
# Adapted from http://stackoverflow.com/questions/110803/dirty-fields-in-django
from django.db.models.signals import post_save
from django.db.models.fields.related import ManyToManyField
class DirtyFieldsMixin(object):
check_relationship = False
def __init__(self, *args, **kwargs):
super(DirtyFieldsMixin, self).__init__(*args, **kwargs)
post_save.connect(
reset_state, sender=self.__class__,
dispatch_uid='{name}-DirtyFieldsMixin-sweeper'.format(
name=self.__class__.__name__))
reset_state(sender=self.__class__, instance=self)
def _as_dict(self):
all_field = {}
for field in self._meta.local_fields:
if not field.rel:
all_field[field.name] = getattr(self, field.name)
return all_field
def get_dirty_fields(self, check_relationship=None):
if check_relationship is None:
check_relationship = self.check_relationship
if check_relationship:
# We want to check every field, including foreign keys and
# one-to-one fields,
new_state = entire_model_to_dict(self)
else:
new_state = self._as_dict()
all_modify_field = {}
for key, value in new_state.iteritems():
original_value = self._original_state[key]
if value != original_value:
all_modify_field[key] = original_value
return all_modify_field
def is_dirty(self, check_relationship=None, fieldslist=None):
if check_relationship is None:
check_relationship = self.check_relationship
# in order to be dirty we need to have been saved at least once, so we
# check for a primary key and we need our dirty fields to not be empty
if not self.pk:
return True
df = self.get_dirty_fields(check_relationship=check_relationship)
if fieldslist:
return bool([k for k in df if k in fieldslist])
else:
return bool(df)
class DirtyFieldsWithRelationshipChecksMixin(DirtyFieldsMixin):
check_relationship = True
def reset_state(sender, instance, **kwargs):
instance._original_state = entire_model_to_dict(instance)
def entire_model_to_dict(instance, fields=None, exclude=None):
opts = instance._meta
data = {}
for f in list(opts.concrete_fields) + list(opts.virtual_fields):
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
data[f.name] = f.value_from_object(instance)
return data
| {
"repo_name": "ivelum/django-dirtyfields",
"path": "src/dirtyfields/dirtyfields.py",
"copies": "1",
"size": "2624",
"license": "bsd-3-clause",
"hash": 168473684608636770,
"line_mean": 33.5263157895,
"line_max": 79,
"alpha_frac": 0.6261432927,
"autogenerated": false,
"ratio": 4.074534161490683,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 76
} |
# Adapted from http://stackoverflow.com/questions/110803/dirty-fields-in-django
from django.db.models.signals import post_save
import copy
class DirtyFieldsMixin(object):
def __init__(self, *args, **kwargs):
super(DirtyFieldsMixin, self).__init__(*args, **kwargs)
post_save.connect(reset_state,
sender=self.__class__,
dispatch_uid='{}-DirtyFieldsMixin-sweeper'.format(
self.__class__.__name__))
reset_state(sender=self.__class__, instance=self)
def _as_dict(self, do_copy=False):
if do_copy:
getter = lambda value: copy.copy(value)
else:
getter = lambda value: value
return dict([(f.name, getter(getattr(self, f.name)))
for f in self._meta.local_fields
if not f.rel])
def get_dirty_fields(self):
new_state = self._as_dict()
return dict([(key, value)
for key, value in self._original_state.items()
if value != new_state[key]])
def is_dirty(self):
# in order to be dirty we need to have been saved at least once, so we
# check for a primary key and we need our dirty fields to not be empty
if not self.pk:
return True
return {} != self.get_dirty_fields()
def reset_state(sender, instance, **kwargs):
instance._original_state = instance._as_dict(True)
| {
"repo_name": "ActivKonnect/django-dirtyfields",
"path": "src/dirtyfields/dirtyfields.py",
"copies": "1",
"size": "1477",
"license": "bsd-3-clause",
"hash": 2182352157063341800,
"line_mean": 34.1666666667,
"line_max": 79,
"alpha_frac": 0.5666892349,
"autogenerated": false,
"ratio": 4.068870523415978,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010385955507906728,
"num_lines": 42
} |
# Adapted from http://stackoverflow.com/questions/110803/dirty-fields-in-django
from django.db.models.signals import post_save
class DirtyFieldsMixin(object):
def __init__(self, *args, **kwargs):
super(DirtyFieldsMixin, self).__init__(*args, **kwargs)
post_save.connect(reset_state, sender=self.__class__,
dispatch_uid='%s-DirtyFieldsMixin-sweeper' % self.__class__.__name__)
reset_state(sender=self.__class__, instance=self)
def _as_dict(self):
return dict([(f.name, getattr(self, f.name)) for f in self._meta.local_fields if not f.rel])
def get_dirty_fields(self):
new_state = self._as_dict()
return dict([(key, value) for key, value in self._original_state.items() if value != new_state[key]])
def is_dirty(self):
# in order to be dirty we need to have been saved at least once, so we
# check for a primary key and we need our dirty fields to not be empty
if not self.pk:
return True
return {} != self.get_dirty_fields()
def reset_state(sender, instance, **kwargs):
instance._original_state = instance._as_dict()
| {
"repo_name": "akadan47/django-dirtyfields",
"path": "src/dirtyfields/dirtyfields.py",
"copies": "1",
"size": "1161",
"license": "bsd-3-clause",
"hash": 4384129551136021000,
"line_mean": 37.7,
"line_max": 109,
"alpha_frac": 0.6347975883,
"autogenerated": false,
"ratio": 3.6857142857142855,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9813800458191853,
"avg_score": 0.0013422831644866614,
"num_lines": 30
} |
# Adapted from http://stackoverflow.com/questions/110803/dirty-fields-in-django
import copy
from django.db.models.signals import post_save
import django.dispatch
def _iter_fields(obj):
for field in obj._meta.local_fields:
if not field.rel:
yield field, field.to_python(getattr(obj, field.name))
def _to_dict(obj):
return {
field.name: copy.copy(value)
for field, value in _iter_fields(obj)
}
def _changes(obj, new_state):
return {
key: value
for key, value in obj._original_state.iteritems()
if new_state[key] != value
}
dirty_save = django.dispatch.Signal(
providing_args=["instance", "original_data", "changes"])
def _reset_state(sender, instance, **kwargs):
new_state = _to_dict(instance)
changes = _changes(instance, new_state)
if changes:
dirty_save.send(
sender=sender,
original_data=instance._original_state,
changes=changes
)
instance._original_state = new_state
class DirtyFieldsMixin(object):
def __init__(self, *args, **kwargs):
super(DirtyFieldsMixin, self).__init__(*args, **kwargs)
self._original_state = _to_dict(self)
post_save.connect(
_reset_state,
sender=self.__class__,
dispatch_uid='{0}-DirtyFieldsMixin-sweeper'.format(
self.__class__.__name__
)
)
_reset_state(sender=self.__class__, instance=self)
| {
"repo_name": "public/django-dirtyfields",
"path": "src/dirtyfields/dirtyfields.py",
"copies": "1",
"size": "1499",
"license": "bsd-3-clause",
"hash": 1744444564312487200,
"line_mean": 23.5737704918,
"line_max": 79,
"alpha_frac": 0.6010673783,
"autogenerated": false,
"ratio": 3.7196029776674937,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9820670355967493,
"avg_score": 0,
"num_lines": 61
} |
# Adapted from http://stackoverflow.com/questions/110803
# and https://github.com/callowayproject/django-dirtyfields
# and https://github.com/smn/django-dirtyfields
import copy
from django import VERSION
from django.conf import settings
from django.db import router
from django.db.models import signals
def stale_copy(value):
# no copy for primitives
if isinstance(value, int) or isinstance(value, bool) or isinstance(value, basestring):
return value
# deepcopy for things like JSONField (where the object reference is sticky)
return copy.deepcopy(value)
class StaleFieldsMixin(object):
"""
Gives stale field tracking ability to models, also implements a save_stale
method which updates only the stale fields using QuerySet.update - useful
for multi-process or multi-worker setups where save() will actually update
all fields, potentially overriding changes by other workers while the
current worker has the object open.
"""
_is_stale_enabled = True
_original_state = {}
def _reset_stale_state(self):
self._original_state = self._as_dict()
def _as_dict(self):
# For relations, saves all fk values too so that we can update fk by
# id, e.g. obj.foreignkey_id = 4
if self._deferred:
return {}
return {f.name: stale_copy(f.to_python(getattr(self, f.attname))) for f in self._meta.fields}
def get_changed_values(self):
new_state = self._as_dict()
return {name: new_state[name] for name, old_value in self._original_state.iteritems() if old_value != new_state[name]}
@property
def stale_fields(self):
"""
Returns a list of keys that have changed
"""
if self._deferred:
raise TypeError('Cant be used with deferred objects')
return tuple(self.get_changed_values().keys())
dirty_fields = stale_fields
@property
def is_stale(self):
if self._state.adding:
return True
return bool(self.stale_fields)
is_dirty = is_stale
def save_stale(self, raw=False, using=None, signal_anyways=False):
"""
An alternative to save, instead writing every field again, only updates
the stale fields via QuerySet.update
"""
updated = 0
if not self.pk:
self.save(using=using)
updated = 1
else:
# some copied from django/db/models/base.py
using = using or router.db_for_write(self.__class__, instance=self)
changed_values = self.get_changed_values()
if changed_values or signal_anyways:
signals.pre_save.send(sender=self.__class__, instance=self, raw=raw, using=using)
# nothing has changed, don't do anything at all
if changed_values:
# detect if updating relationship field_ids directly
# if related field object itself has changed then the field_id
# also changes, in which case we detect and ignore the field_id
# change, otherwise we'll reload the object again later unnecessarily
rel_fields = {f.column: f for f in self._meta.fields if f.rel}
updated_rel_ids = []
for field_name in changed_values.keys():
if field_name in rel_fields.keys():
rel_field = rel_fields[field_name]
value = changed_values[rel_field.column]
obj_value = getattr(self, rel_field.name).pk
del changed_values[rel_field.column]
changed_values[rel_field.name] = value
if value != obj_value:
updated_rel_ids.append(rel_field.column)
# maps db column names back to field names if they differ
field_map = {f.column: f.name for f in self._meta.fields if f.db_column}
for field_from, field_to in field_map.iteritems():
if field_from in changed_values:
changed_values[field_to] = changed_values[field_from]
del changed_values[field_from]
# apply auto_now values if present
for field in self._meta.fields:
if hasattr(field, 'auto_now') and field.auto_now and field.name not in changed_values:
new_value = field.pre_save(self, False)
changed_values[field.name] = new_value
setattr(self, field.name, new_value)
updated = self.__class__.objects.filter(pk=self.pk).update(**changed_values)
# Reload updated relationships
for field_name in updated_rel_ids:
field = rel_fields[field_name]
field_pk = getattr(self, field_name)
rel_obj = field.related.parent_model.objects.get(pk=field_pk)
setattr(self, field.name, rel_obj)
if changed_values or signal_anyways:
signals.post_save.send(sender=self.__class__, instance=self, created=False, raw=raw, using=using)
return updated == 1
save_dirty = save_stale
def reset_instance(sender, instance, **kwargs):
"""
Called on the post_save signal.
"""
if hasattr(instance, '_reset_stale_state'):
instance._reset_stale_state()
signals.post_save.connect(reset_instance)
signals.post_init.connect(reset_instance)
def get_raw_method(method):
"""
Allows you to attach other class methods or random
functions to other classes properly.
"""
import types
if type(method) == types.FunctionType:
method = staticmethod(method)
elif type(method) == types.MethodType:
method = method.__func__
return method
def auto_add_to_model(sender, **kwargs):
"""
Applies these to models.
"""
attrs = ['_original_state', '_reset_stale_state', '_as_dict',
'get_changed_values', 'stale_fields', 'is_stale',
'save_stale', 'dirty_fields', 'is_dirty', 'save_dirty',
'_is_stale_enabled']
if not (isinstance(sender, StaleFieldsMixin) or getattr(sender, '_is_stale_enabled', False)):
for attr in attrs:
method = get_raw_method(getattr(StaleFieldsMixin, attr))
sender.add_to_class(attr, method)
if getattr(settings, 'AUTO_STALE_FIELDS', False):
signals.class_prepared.connect(auto_add_to_model)
# Django 1.5 added support for updating only specified fields, this fails in
# older versions.
if VERSION >= (1, 5):
def save(self, *args, **kwargs):
if not self._state.adding:
kwargs['update_fields'] = self.stale_fields
return super(StaleFieldsMixin, self).save(*args, **kwargs)
StaleFieldsMixin.save = save
| {
"repo_name": "zapier/django-stalefields",
"path": "stalefields/stalefields.py",
"copies": "1",
"size": "6898",
"license": "bsd-3-clause",
"hash": -6044791152309148000,
"line_mean": 38.4171428571,
"line_max": 126,
"alpha_frac": 0.6064076544,
"autogenerated": false,
"ratio": 4.137972405518896,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5244380059918896,
"avg_score": null,
"num_lines": null
} |
# Adapted from https://www.kaggle.com/c/cdiscount-image-classification-challenge/discussion/41021
import os
import cv2
import time
import glob
import numpy as np
import pandas as pd
# Import torch-related functions
import torch
import torch.nn.functional as F
from torch.autograd import Variable
# Dataset
PROJECT_NAME = "Cdiscount Image Classification"
PROJECT_FOLDER_PATH = os.path.join(os.path.expanduser("~"), "Documents/Dataset", PROJECT_NAME)
HENGCHERKENG_FOLDER_PATH = os.path.join(PROJECT_FOLDER_PATH, "HengCherKeng")
EXTRACTED_DATASET_FOLDER_PATH = os.path.join(PROJECT_FOLDER_PATH, "extracted")
TEST_FOLDER_PATH = os.path.join(EXTRACTED_DATASET_FOLDER_PATH, "test")
SUBMISSION_FOLDER_PATH = os.path.join(PROJECT_FOLDER_PATH, "submission")
# Add the HengCherKeng folder to the path
import sys
sys.path.append(HENGCHERKENG_FOLDER_PATH)
from inception_v3 import Inception3 # @UnresolvedImport pylint: disable=import-error
from excited_inception_v3 import SEInception3 # @UnresolvedImport pylint: disable=import-error
MODEL_NAME_TO_MODEL_DETAILS_DICT = {"Inception3": (Inception3, "LB=0.69565_inc3_00075000_model.pth"),
"SEInception3": (SEInception3, "LB=0.69673_se-inc3_00026000_model.pth")}
MODEL_NAME = "SEInception3"
MODEL_FUNCTION, MODEL_FILE_NAME = MODEL_NAME_TO_MODEL_DETAILS_DICT[MODEL_NAME]
# Hyperparameters for the neural network
HEIGHT, WIDTH = 180, 180
NUM_CLASSES = 5270
# Save top N predictions to disk
TOP_N_PREDICTIONS = 5
# Save predictions to disk when there are N entries
SAVE_EVERY_N_ENTRIES = 1000
def pytorch_image_to_tensor_transform(image):
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = image.transpose((2, 0, 1))
tensor = torch.from_numpy(image).float().div(255) # @UndefinedVariable
tensor[0] = (tensor[0] - mean[0]) / std[0]
tensor[1] = (tensor[1] - mean[1]) / std[1]
tensor[2] = (tensor[2] - mean[2]) / std[2]
return tensor
def image_to_tensor_transform(image):
tensor = pytorch_image_to_tensor_transform(image)
tensor[0] = tensor[0] * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
tensor[1] = tensor[1] * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
tensor[2] = tensor[2] * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
return tensor
def append_entries_to_file(entry_list, file_path):
file_content = pd.DataFrame(entry_list)
file_content.to_csv(file_path, header=None, index=False, mode="a", float_format="%.2f", encoding="utf-8")
def load_text_file(file_path, sep=",", header="infer", usecols=None, quoting=0, chunksize=1e4, encoding="utf-8"):
file_content = pd.read_csv(file_path, sep=sep, header=header, usecols=usecols, quoting=quoting, chunksize=chunksize, encoding=encoding)
for chunk in file_content:
for data in chunk.itertuples(index=False):
yield data
def get_predictions_for_each_product(prediction_file_path_list):
accumulated_product_id = None
accumulated_label_index_and_prob_value_list = []
prediction_file_data_generator_list = [load_text_file(prediction_file_path, header=None) for prediction_file_path in prediction_file_path_list]
for data_tuple in zip(*prediction_file_data_generator_list):
# Unpack the values
product_id = data_tuple[0][0]
label_index_and_prob_value_list = []
for data in data_tuple:
label_index_and_prob_value_list += list(data[2:])
# Append the record if product_id is the same
if product_id == accumulated_product_id:
accumulated_label_index_and_prob_value_list += label_index_and_prob_value_list
continue
# Yield the accumulated records
if len(accumulated_label_index_and_prob_value_list) > 0:
yield accumulated_product_id, accumulated_label_index_and_prob_value_list
# Update the accumulated records
accumulated_product_id = product_id
accumulated_label_index_and_prob_value_list = label_index_and_prob_value_list
# Yield the accumulated records
if len(accumulated_label_index_and_prob_value_list) > 0:
yield accumulated_product_id, accumulated_label_index_and_prob_value_list
def get_submission_from_prediction(prediction_file_path_list, label_index_to_category_id_dict, ensemble_func):
for product_id, label_index_and_prob_value_list in get_predictions_for_each_product(prediction_file_path_list):
label_index_to_prob_value_list_dict = {}
label_index_list = label_index_and_prob_value_list[0::2]
prob_value_list = label_index_and_prob_value_list[1::2]
for label_index, prob_value in zip(label_index_list, prob_value_list):
if label_index not in label_index_to_prob_value_list_dict:
label_index_to_prob_value_list_dict[label_index] = []
label_index_to_prob_value_list_dict[label_index].append(prob_value)
label_index_and_chosen_prob_value_array = np.array([(label_index, ensemble_func(prob_value_list)) for label_index, prob_value_list in label_index_to_prob_value_list_dict.items()])
chosen_label_index = label_index_and_chosen_prob_value_array[np.argmax(label_index_and_chosen_prob_value_array[:, 1]), 0].astype(np.int)
chosen_category_id = label_index_to_category_id_dict[chosen_label_index]
yield product_id, chosen_category_id
def run():
print("Creating folders ...")
os.makedirs(SUBMISSION_FOLDER_PATH, exist_ok=True)
print("Loading {} ...".format(MODEL_NAME))
net = MODEL_FUNCTION(in_shape=(3, HEIGHT, WIDTH), num_classes=NUM_CLASSES)
net.load_state_dict(torch.load(os.path.join(HENGCHERKENG_FOLDER_PATH, MODEL_FILE_NAME)))
net.cuda().eval()
prediction_file_path = os.path.join(SUBMISSION_FOLDER_PATH, "{}_prediction_{}.csv".format(MODEL_NAME, time.strftime("%c")).replace(" ", "_").replace(":", "_"))
open(prediction_file_path, "w").close()
print("Prediction will be saved to {}".format(prediction_file_path))
entry_list = []
image_file_path_list = sorted(glob.glob(os.path.join(TEST_FOLDER_PATH, "*/*.jpg")))
for image_file_path in image_file_path_list:
# Read image
image = cv2.imread(image_file_path)
x = image_to_tensor_transform(image)
x = Variable(x.unsqueeze(0), volatile=True).cuda()
# Inference
logits = net(x)
probs = F.softmax(logits)
probs = probs.cpu().data.numpy().reshape(-1)
# Get the top N predictions
top_n_index_array = probs.argsort()[-TOP_N_PREDICTIONS:][::-1]
top_n_prob_array = probs[top_n_index_array]
# Append the results
entry = [tuple(os.path.basename(image_file_path).split(".")[0].split("_"))] + [*zip(top_n_index_array, top_n_prob_array)]
entry_list.append([np.int64(item) if isinstance(item, str) else item for item_list in entry for item in item_list])
# Save predictions to disk
if len(entry_list) >= SAVE_EVERY_N_ENTRIES:
append_entries_to_file(entry_list, prediction_file_path)
entry_list = []
# Save predictions to disk
if len(entry_list) > 0:
append_entries_to_file(entry_list, prediction_file_path)
entry_list = []
print("Loading label_index_to_category_id_dict ...")
label_index_to_category_id_dict = dict(pd.read_csv(os.path.join(HENGCHERKENG_FOLDER_PATH, "label_index_to_category_id.csv"), header=None).itertuples(index=False))
print("Generating submission files from prediction files ...")
for ensemble_name, ensemble_func in zip(["min", "max", "mean", "median"], [np.min, np.max, np.mean, np.median]):
submission_file_path = os.path.join(SUBMISSION_FOLDER_PATH, "ensembling_{}_{}.csv".format(ensemble_name, time.strftime("%c")).replace(" ", "_").replace(":", "_"))
with open(submission_file_path, "w") as submission_file_object:
submission_file_object.write("_id,category_id\n")
print("Submission will be saved to {}".format(submission_file_path))
entry_list = []
for entry in get_submission_from_prediction([prediction_file_path], label_index_to_category_id_dict, ensemble_func):
# Append the results
entry_list.append(entry)
# Save submissions to disk
if len(entry_list) >= SAVE_EVERY_N_ENTRIES:
append_entries_to_file(entry_list, submission_file_path)
entry_list = []
# Save submissions to disk
if len(entry_list) > 0:
append_entries_to_file(entry_list, submission_file_path)
entry_list = []
print("All done!")
if __name__ == "__main__":
run()
| {
"repo_name": "nixingyang/Kaggle-Face-Verification",
"path": "Cdiscount Image Classification/inference_HengCherKeng.py",
"copies": "1",
"size": "8676",
"license": "mit",
"hash": 7654389912968058000,
"line_mean": 45.6451612903,
"line_max": 187,
"alpha_frac": 0.6680497925,
"autogenerated": false,
"ratio": 3.2240802675585285,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43921300600585284,
"avg_score": null,
"num_lines": null
} |
# Adapted from: https://www.tensorflow.org/beta/tutorials/distribute/multi_worker_with_estimator
from __future__ import absolute_import, division, print_function, unicode_literals
def main_fun(args, ctx):
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflowonspark import TFNode
tfds.disable_progress_bar()
class StopFeedHook(tf.estimator.SessionRunHook):
"""SessionRunHook to terminate InputMode.SPARK RDD feeding if the training loop exits before the entire RDD is consumed."""
def __init__(self, feed):
self.feed = feed
def end(self, session):
self.feed.terminate()
self.feed.next_batch(1)
BATCH_SIZE = args.batch_size
LEARNING_RATE = args.learning_rate
tf_feed = TFNode.DataFeed(ctx.mgr)
def rdd_generator():
while not tf_feed.should_stop():
batch = tf_feed.next_batch(1)
if len(batch) > 0:
example = batch[0]
image = np.array(example[0]).astype(np.float32) / 255.0
image = np.reshape(image, (28, 28, 1))
label = np.array(example[1]).astype(np.float32)
label = np.reshape(label, (1,))
yield (image, label)
else:
return
def input_fn(mode, input_context=None):
if mode == tf.estimator.ModeKeys.TRAIN:
# Note: Spark is responsible for sharding/repeating/shuffling the data via RDD
ds = tf.data.Dataset.from_generator(rdd_generator, (tf.float32, tf.float32), (tf.TensorShape([28, 28, 1]), tf.TensorShape([1])))
return ds.batch(BATCH_SIZE)
else:
# read evaluation data from tensorflow_datasets directly
def scale(image, label):
image = tf.cast(image, tf.float32) / 255.0
return image, label
mnist = tfds.load(name='mnist', with_info=True, as_supervised=True)
ds = mnist['test']
if input_context:
ds = ds.shard(input_context.num_input_pipelines, input_context.input_pipeline_id)
return ds.map(scale).batch(BATCH_SIZE)
def serving_input_receiver_fn():
features = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, 28, 28, 1], name='conv2d_input')
receiver_tensors = {'conv2d_input': features}
return tf.estimator.export.ServingInputReceiver(receiver_tensors, receiver_tensors)
def model_fn(features, labels, mode):
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
logits = model(features, training=False)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {'logits': logits}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
optimizer = tf.compat.v1.train.GradientDescentOptimizer(
learning_rate=LEARNING_RATE)
loss = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)(labels, logits)
loss = tf.reduce_sum(input_tensor=loss) * (1. / BATCH_SIZE)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss=loss)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=optimizer.minimize(
loss, tf.compat.v1.train.get_or_create_global_step()))
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
config = tf.estimator.RunConfig(train_distribute=strategy, save_checkpoints_steps=100)
classifier = tf.estimator.Estimator(
model_fn=model_fn, model_dir=args.model_dir, config=config)
# exporter = tf.estimator.FinalExporter("serving", serving_input_receiver_fn=serving_input_receiver_fn)
# Note: MultiWorkerMirroredStrategy (CollectiveAllReduceStrategy) is synchronous,
# so we need to ensure that all workers complete training before any of them run out of data from the RDD.
# And given that Spark RDD partitions (and partition sizes) can be non-evenly divisible by num_workers,
# we'll just stop training at 90% of the total expected number of steps.
steps = 60000 * args.epochs / args.batch_size
steps_per_worker = steps / ctx.num_workers
max_steps_per_worker = steps_per_worker * 0.9
tf.estimator.train_and_evaluate(
classifier,
train_spec=tf.estimator.TrainSpec(input_fn=input_fn, max_steps=max_steps_per_worker, hooks=[StopFeedHook(tf_feed)]),
eval_spec=tf.estimator.EvalSpec(input_fn=input_fn)
# eval_spec=tf.estimator.EvalSpec(input_fn=input_fn, exporters=exporter)
)
if ctx.job_name == 'chief':
print("Exporting saved_model to {}".format(args.export_dir))
classifier.export_saved_model(args.export_dir, serving_input_receiver_fn)
if __name__ == "__main__":
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf
from pyspark.sql.types import IntegerType
from tensorflowonspark import dfutil
from tensorflowonspark.pipeline import TFEstimator, TFModel
import argparse
sc = SparkContext(conf=SparkConf().setAppName("mnist_estimator"))
spark = SparkSession(sc)
executors = sc._conf.get("spark.executor.instances")
num_executors = int(executors) if executors is not None else 1
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", help="number of records per batch", type=int, default=64)
parser.add_argument("--buffer_size", help="size of shuffle buffer", type=int, default=10000)
parser.add_argument("--cluster_size", help="number of nodes in the cluster", type=int, default=num_executors)
parser.add_argument("--epochs", help="number of epochs", type=int, default=3)
parser.add_argument("--format", help="example format: (csv|tfr)", choices=["csv", "tfr"], default="csv")
parser.add_argument("--images_labels", help="path to MNIST images and labels in parallelized format")
parser.add_argument("--learning_rate", help="learning rate", type=float, default=1e-3)
parser.add_argument("--mode", help="train|inference", choices=["train", "inference"], default="train")
parser.add_argument("--model_dir", help="path to save checkpoint", default="mnist_model")
parser.add_argument("--export_dir", help="path to export saved_model", default="mnist_export")
parser.add_argument("--output", help="HDFS path to save predictions", type=str, default="predictions")
parser.add_argument("--tensorboard", help="launch tensorboard process", action="store_true")
args = parser.parse_args()
print("args:", args)
if args.format == 'tfr':
# load TFRecords as a DataFrame
df = dfutil.loadTFRecords(sc, args.images_labels)
else: # args.format == 'csv':
# create RDD of input data
def parse(ln):
vec = [int(x) for x in ln.split(',')]
return (vec[1:], vec[0])
images_labels = sc.textFile(args.images_labels).map(parse)
df = spark.createDataFrame(images_labels, ['image', 'label'])
df.show()
if args.mode == 'train':
estimator = TFEstimator(main_fun, args) \
.setInputMapping({'image': 'image', 'label': 'label'}) \
.setModelDir(args.model_dir) \
.setExportDir(args.export_dir) \
.setClusterSize(args.cluster_size) \
.setTensorboard(args.tensorboard) \
.setEpochs(args.epochs) \
.setBatchSize(args.batch_size) \
.setGraceSecs(60)
model = estimator.fit(df)
else: # args.mode == 'inference':
# using a trained/exported model
model = TFModel(args) \
.setInputMapping({'image': 'conv2d_input'}) \
.setOutputMapping({'logits': 'prediction'}) \
.setSignatureDefKey('serving_default') \
.setExportDir(args.export_dir) \
.setBatchSize(args.batch_size)
def argmax_fn(l):
return max(range(len(l)), key=lambda i: l[i])
argmax = udf(argmax_fn, IntegerType())
preds = model.transform(df).withColumn('argmax', argmax('prediction'))
preds.show()
preds.write.json(args.output)
| {
"repo_name": "yahoo/TensorFlowOnSpark",
"path": "examples/mnist/estimator/mnist_pipeline.py",
"copies": "1",
"size": "8062",
"license": "apache-2.0",
"hash": 119764033514177900,
"line_mean": 40.3435897436,
"line_max": 134,
"alpha_frac": 0.6875465145,
"autogenerated": false,
"ratio": 3.5452946350043977,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47328411495043976,
"avg_score": null,
"num_lines": null
} |
# Adapted from: https://www.tensorflow.org/beta/tutorials/distribute/multi_worker_with_estimator
def main_fun(args, ctx):
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflowonspark import TFNode
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
tfds.disable_progress_bar()
class StopFeedHook(tf.estimator.SessionRunHook):
"""SessionRunHook to terminate InputMode.SPARK RDD feeding if the training loop exits before the entire RDD is consumed."""
def __init__(self, feed):
self.feed = feed
def end(self, session):
self.feed.terminate()
self.feed.next_batch(1)
BUFFER_SIZE = args.buffer_size
BATCH_SIZE = args.batch_size
LEARNING_RATE = args.learning_rate
tf_feed = TFNode.DataFeed(ctx.mgr)
def rdd_generator():
while not tf_feed.should_stop():
batch = tf_feed.next_batch(1)
if len(batch) > 0:
example = batch[0]
image = np.array(example[0]).astype(np.float32) / 255.0
image = np.reshape(image, (28, 28, 1))
label = np.array(example[1]).astype(np.float32)
label = np.reshape(label, (1,))
yield (image, label)
else:
return
def input_fn(mode, input_context=None):
if mode == tf.estimator.ModeKeys.TRAIN:
# Note: Spark is responsible for sharding/repeating/shuffling the data via RDD
ds = tf.data.Dataset.from_generator(rdd_generator, (tf.float32, tf.float32), (tf.TensorShape([28, 28, 1]), tf.TensorShape([1])))
return ds.batch(BATCH_SIZE)
else:
raise Exception("I'm evaluating: mode={}, input_context={}".format(mode, input_context))
def scale(image, label):
image = tf.cast(image, tf.float32) / 255.0
return image, label
mnist = tfds.load(name='mnist', with_info=True, as_supervised=True)
ds = mnist['test']
if input_context:
ds = ds.shard(input_context.num_input_pipelines, input_context.input_pipeline_id)
return ds.map(scale).batch(BATCH_SIZE)
def serving_input_receiver_fn():
features = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, 28, 28, 1], name='features')
receiver_tensors = {'conv2d_input': features}
return tf.estimator.export.ServingInputReceiver(receiver_tensors, receiver_tensors)
def model_fn(features, labels, mode):
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
logits = model(features, training=False)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {'logits': logits}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
optimizer = tf.compat.v1.train.GradientDescentOptimizer(
learning_rate=LEARNING_RATE)
loss = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)(labels, logits)
loss = tf.reduce_sum(input_tensor=loss) * (1. / BATCH_SIZE)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss=loss)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=optimizer.minimize(
loss, tf.compat.v1.train.get_or_create_global_step()))
config = tf.estimator.RunConfig(train_distribute=strategy, save_checkpoints_steps=100)
classifier = tf.estimator.Estimator(
model_fn=model_fn, model_dir=args.model_dir, config=config)
# exporter = tf.estimator.FinalExporter("serving", serving_input_receiver_fn=serving_input_receiver_fn)
# Note: MultiWorkerMirroredStrategy (CollectiveAllReduceStrategy) is synchronous,
# so we need to ensure that all workers complete training before any of them run out of data from the RDD.
# And given that Spark RDD partitions (and partition sizes) can be non-evenly divisible by num_workers,
# we'll just stop training at 90% of the total expected number of steps.
steps = 60000 * args.epochs / args.batch_size
steps_per_worker = steps / ctx.num_workers
max_steps_per_worker = steps_per_worker * 0.9
tf.estimator.train_and_evaluate(
classifier,
train_spec=tf.estimator.TrainSpec(input_fn=input_fn, max_steps=max_steps_per_worker, hooks=[StopFeedHook(tf_feed)]),
eval_spec=tf.estimator.EvalSpec(input_fn=input_fn)
# eval_spec=tf.estimator.EvalSpec(input_fn=input_fn, exporters=exporter)
)
if ctx.job_name == 'chief':
print("Exporting saved_model to {}".format(args.export_dir))
classifier.export_saved_model(args.export_dir, serving_input_receiver_fn)
if __name__ == "__main__":
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
from tensorflowonspark import TFCluster
import argparse
sc = SparkContext(conf=SparkConf().setAppName("mnist_estimator"))
executors = sc._conf.get("spark.executor.instances")
num_executors = int(executors) if executors is not None else 1
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", help="number of records per batch", type=int, default=64)
parser.add_argument("--buffer_size", help="size of shuffle buffer", type=int, default=10000)
parser.add_argument("--cluster_size", help="number of nodes in the cluster", type=int, default=num_executors)
parser.add_argument("--epochs", help="number of epochs", type=int, default=3)
parser.add_argument("--images_labels", help="path to MNIST images and labels in parallelized format")
parser.add_argument("--learning_rate", help="learning rate", type=float, default=1e-3)
parser.add_argument("--model_dir", help="path to save checkpoint", default="mnist_model")
parser.add_argument("--export_dir", help="path to export saved_model", default="mnist_export")
parser.add_argument("--tensorboard", help="launch tensorboard process", action="store_true")
args = parser.parse_args()
print("args:", args)
# create RDD of input data
def parse(ln):
vec = [int(x) for x in ln.split(',')]
return (vec[1:], vec[0])
images_labels = sc.textFile(args.images_labels).map(parse)
cluster = TFCluster.run(sc, main_fun, args, args.cluster_size, num_ps=0, tensorboard=args.tensorboard, input_mode=TFCluster.InputMode.SPARK, log_dir=args.model_dir, master_node='chief')
cluster.train(images_labels, args.epochs)
cluster.shutdown(grace_secs=60) # allow time for the chief to export model after data feeding
| {
"repo_name": "yahoo/TensorFlowOnSpark",
"path": "examples/mnist/estimator/mnist_spark.py",
"copies": "1",
"size": "6581",
"license": "apache-2.0",
"hash": -260550140453707550,
"line_mean": 41.4580645161,
"line_max": 187,
"alpha_frac": 0.6998936332,
"autogenerated": false,
"ratio": 3.5211342964151955,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9657267055752631,
"avg_score": 0.012752174772512875,
"num_lines": 155
} |
# Adapted from: https://www.tensorflow.org/beta/tutorials/distribute/multi_worker_with_estimator
def main_fun(args, ctx):
import tensorflow_datasets as tfds
import tensorflow as tf
BUFFER_SIZE = args.buffer_size
BATCH_SIZE = args.batch_size
LEARNING_RATE = args.learning_rate
def input_fn(mode, input_context=None):
datasets, info = tfds.load(name='mnist',
with_info=True,
as_supervised=True)
mnist_dataset = (datasets['train'] if mode == tf.estimator.ModeKeys.TRAIN else
datasets['test'])
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255
return image, label
if input_context:
mnist_dataset = mnist_dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
return mnist_dataset.repeat(args.epochs).map(scale).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
def serving_input_receiver_fn():
features = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, 28, 28, 1], name='features')
receiver_tensors = {'conv2d_input': features}
return tf.estimator.export.ServingInputReceiver(receiver_tensors, receiver_tensors)
def model_fn(features, labels, mode):
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10)
])
logits = model(features, training=False)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {'logits': logits}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
optimizer = tf.compat.v1.train.GradientDescentOptimizer(
learning_rate=LEARNING_RATE)
loss = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)(labels, logits)
loss = tf.reduce_sum(input_tensor=loss) * (1. / BATCH_SIZE)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss=loss)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=optimizer.minimize(
loss, tf.compat.v1.train.get_or_create_global_step()))
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
config = tf.estimator.RunConfig(train_distribute=strategy, save_checkpoints_steps=100)
classifier = tf.estimator.Estimator(
model_fn=model_fn, model_dir=args.model_dir, config=config)
# exporter = tf.estimator.FinalExporter("serving", serving_input_receiver_fn=serving_input_receiver_fn)
tf.estimator.train_and_evaluate(
classifier,
train_spec=tf.estimator.TrainSpec(input_fn=input_fn),
eval_spec=tf.estimator.EvalSpec(input_fn=input_fn)
# eval_spec=tf.estimator.EvalSpec(input_fn=input_fn, exporters=exporter)
)
if ctx.job_name == 'chief':
print("========== exporting saved_model to {}".format(args.export_dir))
classifier.export_saved_model(args.export_dir, serving_input_receiver_fn)
if __name__ == "__main__":
# tf.app.run()
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
from tensorflowonspark import TFCluster
import argparse
sc = SparkContext(conf=SparkConf().setAppName("mnist_estimator"))
executors = sc._conf.get("spark.executor.instances")
num_executors = int(executors) if executors is not None else 1
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", help="number of records per batch", type=int, default=64)
parser.add_argument("--buffer_size", help="size of shuffle buffer", type=int, default=10000)
parser.add_argument("--cluster_size", help="number of nodes in the cluster", type=int, default=num_executors)
parser.add_argument("--epochs", help="number of epochs", type=int, default=3)
parser.add_argument("--learning_rate", help="learning rate", type=float, default=1e-4)
parser.add_argument("--model_dir", help="path to save checkpoint", default="mnist_model")
parser.add_argument("--export_dir", help="path to export saved_model", default="mnist_export")
parser.add_argument("--tensorboard", help="launch tensorboard process", action="store_true")
args = parser.parse_args()
print("args:", args)
cluster = TFCluster.run(sc, main_fun, args, args.cluster_size, num_ps=0, tensorboard=args.tensorboard, input_mode=TFCluster.InputMode.TENSORFLOW, log_dir=args.model_dir, master_node='chief', eval_node=True)
cluster.shutdown(grace_secs=60)
| {
"repo_name": "yahoo/TensorFlowOnSpark",
"path": "examples/mnist/estimator/mnist_tf.py",
"copies": "1",
"size": "4659",
"license": "apache-2.0",
"hash": 8661494634531110000,
"line_mean": 42.1388888889,
"line_max": 208,
"alpha_frac": 0.6915647135,
"autogenerated": false,
"ratio": 3.5976833976833977,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4789248111183398,
"avg_score": null,
"num_lines": null
} |
# Adapted from: https://www.tensorflow.org/beta/tutorials/distribute/multi_worker_with_keras
from __future__ import absolute_import, division, print_function, unicode_literals
def main_fun(args, ctx):
"""Example demonstrating loading TFRecords directly from disk (e.g. HDFS) without tensorflow_datasets."""
import tensorflow as tf
from tensorflowonspark import compat
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
BUFFER_SIZE = args.buffer_size
BATCH_SIZE = args.batch_size
NUM_WORKERS = args.cluster_size
# parser for TFRecords downloaded by tensorflow_datasets
# these are images + labels, where the images are just serialized PNGs
def parse_tfds(x):
feature_def = {"label": tf.io.FixedLenFeature(1, tf.int64), "image": tf.io.VarLenFeature(tf.string)}
example = tf.io.parse_single_example(x, feature_def)
image = tf.io.decode_image(example['image'].values[0]) / 255
image.set_shape([28, 28, 1]) # fix for https://github.com/tensorflow/tensorflow/issues/24520
label = example['label']
return (image, label)
# parser for TFRecords generated by ${TFoS_HOME}/examples/mnist/mnist_data_setup.py
# these are images + labels, where the images are a flattened arrays of ints
def parse_tfos(example_proto):
feature_def = {"label": tf.io.FixedLenFeature(10, tf.int64),
"image": tf.io.FixedLenFeature(28 * 28 * 1, tf.int64)}
features = tf.io.parse_single_example(example_proto, feature_def)
image = tf.cast(features['image'], tf.float32) / 255
image = tf.reshape(image, (28, 28, 1))
label = tf.math.argmax(features['label'], output_type=tf.int32)
return (image, label)
# Dataset for input data
# tfds: /path/to/tensorflow_datasets/mnist/1.0.0/mnist-train.tfrecord*
# tfos: /path/to/mnist/tfr/train/part-r-*
image_pattern = ctx.absolute_path(args.images_labels)
ds = tf.data.Dataset.list_files(image_pattern)
ds = ds.repeat(args.epochs).shuffle(BUFFER_SIZE)
ds = ds.interleave(tf.data.TFRecordDataset)
if args.data_format == 'tfds':
train_datasets_unbatched = ds.map(parse_tfds)
else: # 'tfos'
train_datasets_unbatched = ds.map(parse_tfos)
def build_and_compile_cnn_model():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(
loss=tf.keras.losses.sparse_categorical_crossentropy,
optimizer=tf.keras.optimizers.SGD(learning_rate=0.001),
metrics=['accuracy'])
return model
# single node
# single_worker_model = build_and_compile_cnn_model()
# single_worker_model.fit(x=train_datasets, epochs=3)
# Here the batch size scales up by number of workers since
# `tf.data.Dataset.batch` expects the global batch size. Previously we used 64,
# and now this becomes 128.
GLOBAL_BATCH_SIZE = BATCH_SIZE * NUM_WORKERS
train_datasets = train_datasets_unbatched.batch(GLOBAL_BATCH_SIZE)
# this fails
# callbacks = [tf.keras.callbacks.ModelCheckpoint(filepath=args.model_dir)]
tf.io.gfile.makedirs(args.model_dir)
filepath = args.model_dir + "/weights-{epoch:04d}"
callbacks = [tf.keras.callbacks.ModelCheckpoint(filepath=filepath, verbose=1, save_weights_only=True)]
# Note: if you part files have an uneven number of records, you may see an "Out of Range" exception
# at less than the expected number of steps_per_epoch, because the executor with least amount of records will finish first.
steps_per_epoch = 60000 / GLOBAL_BATCH_SIZE
with strategy.scope():
multi_worker_model = build_and_compile_cnn_model()
multi_worker_model.fit(x=train_datasets, epochs=args.epochs, steps_per_epoch=steps_per_epoch, callbacks=callbacks)
compat.export_saved_model(multi_worker_model, args.export_dir, ctx.job_name == 'chief')
if __name__ == '__main__':
import argparse
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
from tensorflowonspark import TFCluster
sc = SparkContext(conf=SparkConf().setAppName("mnist_keras"))
executors = sc._conf.get("spark.executor.instances")
num_executors = int(executors) if executors is not None else 1
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", help="number of records per batch", type=int, default=64)
parser.add_argument("--buffer_size", help="size of shuffle buffer", type=int, default=10000)
parser.add_argument("--cluster_size", help="number of nodes in the cluster", type=int, default=num_executors)
parser.add_argument("--data_format", help="data format (tfos|tfds)", type=str, choices=["tfos", "tfds"], default="tfos")
parser.add_argument("--epochs", help="number of epochs", type=int, default=3)
parser.add_argument("--images_labels", help="HDFS path to MNIST image_label files in parallelized format")
parser.add_argument("--model_dir", help="path to save model/checkpoint", default="mnist_model")
parser.add_argument("--export_dir", help="path to export saved_model", default="mnist_export")
parser.add_argument("--tensorboard", help="launch tensorboard process", action="store_true")
args = parser.parse_args()
print("args:", args)
cluster = TFCluster.run(sc, main_fun, args, args.cluster_size, num_ps=0, tensorboard=args.tensorboard, input_mode=TFCluster.InputMode.TENSORFLOW, master_node='chief')
cluster.shutdown()
| {
"repo_name": "yahoo/TensorFlowOnSpark",
"path": "examples/mnist/keras/mnist_tf_ds.py",
"copies": "1",
"size": "5540",
"license": "apache-2.0",
"hash": 6679479221067892000,
"line_mean": 45.9491525424,
"line_max": 168,
"alpha_frac": 0.7151624549,
"autogenerated": false,
"ratio": 3.4155363748458694,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46306988297458695,
"avg_score": null,
"num_lines": null
} |
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary#Cached_Properties
import itertools
import time
from .decorators import wraps
from .python_compat import iteritems
from logging import getLogger
from types import MethodType, FunctionType
logger = getLogger(__name__)
class cached_property(object):
"""Decorator for read-only properties evaluated only once.
It can be used to created a cached property like this::
import random
# the class containing the property must be a new-style class
class MyClass(object):
# create property whose value is cached for ten minutes
@cached_method
def randint(self):
# will only be evaluated every 10 min. at maximum.
return random.randint(0, 100)
The value is cached in the '_cache' attribute of the object inst that
has the property getter method wrapped by this decorator. The '_cache'
attribute value is a dictionary which has a key for every property of the
object which is wrapped by this decorator. Each entry in the cache is
created only when the property is accessed for the first time and is a
two-element tuple with the last computed property value and the last time
it was updated in seconds since the epoch.
To expire a cached property value manually just do::
del inst._cache[<property name>]
"""
def __init__(self, fget, doc=None):
super(cached_property, self).__init__()
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
self.__module__ = fget.__module__
def __get__(self, inst, owner):
try:
value = inst._cache[self.__name__]
except (KeyError, AttributeError):
value = self.fget(inst)
try:
cache = inst._cache
except AttributeError:
cache = inst._cache = {}
cache[self.__name__] = value
return value
_cached_method_id_allocator = itertools.count()
def _get_instancemethod_cache_entry(method_id, *args, **kwargs):
if len(args) + len(kwargs) == 0:
return method_id
try:
kwargs_keys = list(kwargs.keys())
kwargs_keys.sort()
key = (method_id,) + args + tuple([kwargs[key] for key in kwargs_keys])
_ = {key: None}
return key
except TypeError:
return None
def cached_method(func):
"""Decorator that caches a method's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
"""
method_id = next(_cached_method_id_allocator)
@wraps(func)
def callee(inst, *args, **kwargs):
key = _get_instancemethod_cache_entry(method_id, *args, **kwargs)
if key is None:
logger.debug("Passed arguments to {0} are mutable, so the returned value will not be cached".format(func.__name__))
return func(inst, *args, **kwargs)
try:
value = inst._cache[key]
except (KeyError, AttributeError):
value = func(inst, *args, **kwargs)
try:
inst._cache[key] = value
except AttributeError:
inst._cache = {}
inst._cache[key] = value
return value
callee.__cached_method__ = True
callee.__method_id__ = method_id
return callee
class cached_method_with_custom_cache(object):
def __init__(self, cache_class=None):
if cache_class is None:
cache_class = dict
self.cache_class = cache_class
def __call__(self, func):
"""Decorator that caches a method's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
decorated class must implement inst.init_cache() which creates inst._cache dictionary.
"""
method_id = next(_cached_method_id_allocator)
@wraps(func)
def callee(inst, *args, **kwargs):
key = _get_instancemethod_cache_entry(method_id, *args, **kwargs)
func_name = func.__name__
if key is None:
logger.debug("Passed arguments to {0} are mutable, so the returned value will not be cached".format(func_name))
return func(inst, *args, **kwargs)
try:
return inst._cache[func_name][key]
except (KeyError, AttributeError):
value = func(inst, *args, **kwargs)
if not hasattr(inst, "_cache"):
inst._cache = CacheData()
if inst._cache.get(func_name, None) is None:
#cache class creator returns a dict
inst._cache[func_name] = self.cache_class()
inst._cache[func_name][key] = value
return value
callee.__cached_method__ = True
callee.__method_id__ = method_id
return callee
def _get_function_cache_entry(args, kwargs):
return (tuple(args), frozenset(iteritems(kwargs)))
def cached_function(func):
"""Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
"""
@wraps(func)
def callee(*args, **kwargs):
key = _get_function_cache_entry(args, kwargs)
try:
value = func._cache[key]
except (KeyError, AttributeError):
value = func(*args, **kwargs)
if not hasattr(func, '_cache'):
setattr(func, '_cache', {})
func._cache[key] = value
return value
callee._cache = func._cache = dict()
callee.__cached_method__ = True
return callee
def clear_cache(self):
if hasattr(self, '_cache'):
getattr(self, '_cache').clear()
def clear_cached_entry(self, *args, **kwargs):
if isinstance(self, MethodType) and getattr(self, '__cached_method__', False):
method = self
self = getattr(method, 'im_self', getattr(method, '__self__', None))
if self is None:
return
key = _get_instancemethod_cache_entry(method.__method_id__, *args, **kwargs)
elif isinstance(self, FunctionType) and getattr(self, '__cached_method__', False):
key = _get_function_cache_entry(args, kwargs)
else:
return
_ = getattr(self, '_cache', {}).pop(key, None)
def populate_cache(self, attributes_to_skip=[]):
"""this method attempts to get all the lazy cached properties and methods
There are two special cases:
- Some attributes may not be available and raises exceptions.
If you wish to skip these, pass them in the attributes_to_skip list
- The calling of cached methods is done without any arguments, and catches TypeError exceptions
for the case a cached method requires arguments. The exception is logged."""
from inspect import getmembers
for key, value in getmembers(self):
if key in attributes_to_skip:
continue
if hasattr(value, "__cached_method__"):
logger.debug("getting attribute %s from %s", repr(key), repr(self))
try:
_ = value()
except TypeError as e:
logger.exception(e)
class LazyImmutableDict(object):
""" Use this object when you have a list of keys but fetching the values is expensive,
and you want to do it in a lazy fasion"""
def __init__(self, dict):
self._dict = dict
def __getitem__(self, key):
value = self._dict[key]
if value is None:
value = self._dict[key] = self._create_value(key)
return value
def keys(self):
return self._dict.keys()
def __contains__(self, key):
return self._dict.__contains__(key)
def has_key(self, key):
return key in self._dict
def __len__(self):
return len(self._dict)
def _create_value(self, key):
raise NotImplementedError()
class CacheData(dict):
def __init__(self):
super(CacheData, self).__init__()
self._is_valid = set()
def __getitem__(self, key):
if key not in self._is_valid:
logger.debug("cache found invalidate., updating cache for {0}".format(key))
raise KeyError
return dict.__getitem__(self, key)
def __setitem__(self, key, value):
ret_val = dict.__setitem__(self, key, value)
self._is_valid.add(key)
return ret_val
def invalidate(self):
logger.debug("Invalidate cache")
self._is_valid = set()
class TimerCacheData(CacheData):
def __init__(self, poll_time):
super(TimerCacheData, self).__init__()
self.poll_time = poll_time
def __getitem__(self, key):
next_poll_time, value = CacheData.__getitem__(self, key)
if time.time() > next_poll_time:
raise KeyError
return value
def __setitem__(self, key, value):
next_poll_time = time.time() + self.poll_time
ret_val = CacheData.__setitem__(self, key, (next_poll_time, value))
return ret_val
| {
"repo_name": "Infinidat/infi.pyutils",
"path": "infi/pyutils/lazy.py",
"copies": "1",
"size": "9213",
"license": "bsd-3-clause",
"hash": -6182288365175713000,
"line_mean": 35.5595238095,
"line_max": 127,
"alpha_frac": 0.599044828,
"autogenerated": false,
"ratio": 4.16500904159132,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.526405386959132,
"avg_score": null,
"num_lines": null
} |
# adapted from http://www.nightmare.com/rushing/python/countmin.py
# estimate top k from a stream using a 'count-min' sketch and heap.
# based on https://github.com/ezyang/ocaml-cminsketch
# use gen-data.py to generate a data file
# usage
# python gen-data.py > dat.txt
# python countmin.py dat.txt
import heapq
import math
import random
import sys
int_size = len (bin (sys.maxint)) - 1
int_mask = (1 << int_size) - 1
def log2 (x):
return math.log (x) / math.log (2.0)
def multiply_shift (m, a, x):
return ((a * x)&int_mask) >> (int_size - m)
def random_odd_int():
n = int (random.getrandbits (int_size-2))
return n<<1|1
class sketch:
def __init__ (self, k, depth, width):
# round up the width to a power of 2
m = int (math.ceil (log2 (float (width))))
rounded_width = 1 << m
self.k = k
self.lg_width = m
self.count = [ [0] * rounded_width for x in range (depth) ]
self.hash_functions = [ random_odd_int() for x in range (depth) ]
self.heap = []
self.map = {}
def update (self, key, c):
ix = abs (hash (key))
est = sys.maxint
for i in range (len (self.hash_functions)):
hf = self.hash_functions[i]
j = multiply_shift (self.lg_width, hf, ix)
x = self.count[i][j]
est = min (est, x)
self.count[i][j] = (x + c)
self.update_heap (key, est)
def get (self, key):
ix = abs (hash (key))
r = sys.maxint
for i in range (len (self.hash_functions)):
hf = self.hash_functions[i]
j = multiply_shift (self.lg_width, hf, ix)
r = min (r, self.count[i][j])
return r
def update_heap (self, key, est):
if not self.heap or self.heap[0][0] < est:
probe = self.map.get (key, None)
if probe is None:
if len(self.map) < self.k:
# still growing...
entry = [est, key]
heapq.heappush (self.heap, entry)
self.map[key] = entry
else:
# push this guy out
entry = [est, key]
[oest, okey] = heapq.heappushpop (self.heap, entry)
del self.map[okey]
self.map[key] = entry
else:
probe[0] = est
heapq.heapify (self.heap)
else:
pass
def get_ranking (self):
vals = self.map.values()
vals.sort()
vals.reverse()
r = {}
for i in range (len (vals)):
r[vals[i][1]] = i
return r
def make_sketch (epsilon, delta):
assert (epsilon > 0.0)
assert (delta < 1.0)
assert (delta > 0)
depth = int_ceil (log (1.0 / delta))
width = int_ceil (math.e / epsilon)
return sketch (depth, width)
def test_file (path):
check = {}
s = sketch (200, 20, 200)
f = open (path, 'rb')
while 1:
line = f.readline()
if not line:
break
parts = line.split()
count = int (parts[1])
id = parts[0]
check.setdefault (id, 0)
if count_bytes:
s.update (id, count)
check[id] += count
else:
s.update (id, 1)
check[id] += 1
return s, check
def compare (s, check):
l = check.items()
l = [ (y, x) for (x, y) in l ]
l.sort()
s_rank = s.get_ranking()
print '%20s %10s %18s %10s' % ("value","true","est (err)","rank")
for bytes, id in l:
n0 = s.get (id)
n = check[id]
if s.map.has_key (id):
n2 = s_rank[id] + 1
else:
n2 = 0
print '%20s %10d %10d (%.2f) %10d' % (id, n, n0, (n0 - n)/float(n), n2)
if __name__ == '__main__':
import sys
if '-b' in sys.argv:
sys.argv.remove ('-b')
count_bytes = 1
else:
count_bytes = 0
path = sys.argv[1]
s, c = test_file (path)
compare (s, c)
| {
"repo_name": "mitdbg/asciiclass",
"path": "lectures/lec10/countmin.py",
"copies": "3",
"size": "4043",
"license": "mit",
"hash": 957780748457707100,
"line_mean": 26.3175675676,
"line_max": 79,
"alpha_frac": 0.4907247094,
"autogenerated": false,
"ratio": 3.2112787926926134,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5202003502092614,
"avg_score": null,
"num_lines": null
} |
# adapted from http://www.pygame.org/wiki/OBJFileLoader
import os
import cv2
import numpy as np
from visnav.algo import tools
def MTL(filename):
contents = {}
mtl = None
for line in open(filename, "r"):
if line.startswith('#'): continue
values = line.split()
if not values: continue
if values[0] == 'newmtl':
mtl = contents[values[1]] = {}
elif mtl is None:
raise ValueError("mtl file doesn't start with newmtl stmt")
elif values[0] == 'map_Kd':
mtl[values[0]] = values[1]
## load the texture referred to by this declaration
# surf = pygame.image.load(mtl['map_Kd'])
# image = pygame.image.tostring(surf, 'RGBA', 1)
# ix, iy = surf.get_rect().size
# texid = mtl['texture_Kd'] = glGenTextures(1)
# glBindTexture(GL_TEXTURE_2D, texid)
# glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
# GL_LINEAR)
# glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,
# GL_LINEAR)
# glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, ix, iy, 0, GL_RGBA,
# GL_UNSIGNED_BYTE, image)
else:
mtl[values[0]] = list(map(float, values[1:]))
return contents
class ShapeModel:
def __init__(self, fname=None, data=None):
self.vertices = None
self.normals = None
self.texcoords = None
self.faces = None
self.texfile = None
self._tex = None
if fname is not None:
self.from_file(fname)
elif data is not None:
self.from_dict(data)
def from_file(self, fname, swapyz=False):
"""Loads a Wavefront OBJ file. """
vertices = []
normals = []
texcoords = []
faces = []
self.texfile = None
dir = os.path.abspath(os.path.dirname(fname))
#material = None
for line in open(fname, "r"):
if line.startswith('#'): continue
values = line.split()
if not values: continue
if values[0] == 'v':
v = list(map(float, values[1:4]))
if swapyz:
v = v[0], v[2], v[1]
vertices.append(v)
elif values[0] == 'vn':
v = list(map(float, values[1:4]))
if swapyz:
v = v[0], v[2], v[1]
normals.append(v)
elif values[0] == 'vt':
txc = list(map(float, values[1:3]))
assert len(txc) == 2, 'wrong length texture coordinates'
texcoords.append(txc)
elif values[0] in ('usemtl', 'usemat'):
pass
# material = values[1]
elif values[0] == 'mtllib':
mtl = MTL(os.path.join(dir, values[1]))
material = tuple(mtl.values())[0]
if 'map_Kd' in material:
self.texfile = os.path.join(dir, material['map_Kd'])
elif values[0] == 'f':
fvert = []
ftext = []
# norm = []
for v in values[1:]:
w = v.split('/')
fvert.append(int(w[0])-1)
if len(w) >= 2 and len(w[1]) > 0:
ftext.append(int(w[1])-1)
# if len(w) >= 3 and len(w[2]) > 0:
# norm.append(int(w[2]))
# else:
# norm.append(0)
#self.faces.append((face, norms, texcoords, material))
if len(fvert) == 3:
assert len(ftext) == 0 or len(fvert) == len(ftext), 'Some tex coords missing!'
# normals are calculated for each face => same indices as faces
faces.append((fvert, len(faces), ftext)) # v idx, n idx, t idx
# self.triangles.append(tuple(face))
else:
assert False, 'Not a triangle!'
nf = len(faces)
faces = ShapeModel._face_massage(faces)
self.faces = np.array(faces, dtype=np.uint32)
assert self.faces.shape == (nf*3, 3),\
'wrong shape "faces" array %s should be (nf*3, 3)' % (self.faces.shape,)
self.vertices = np.array(vertices, dtype=np.float32)
assert self.vertices.shape[1:] == (3,),\
'wrong shape "vertices" array %s should be (-1, 3)' % (self.vertices.shape,)
self.texcoords = np.array(texcoords, dtype=np.float32)
assert len(self.texcoords) == 0 or self.texcoords.shape[1:] == (2,),\
'wrong shape "texcoords" array %s should be (-1, 2)' % (self.texcoords.shape,)
self.recalc_norms()
assert self.normals.shape[1:] == (3,), \
'wrong shape "normals" array %s should be (-1, 3)' % (self.normals.shape,)
@staticmethod
def _face_massage(faces):
# (n faces, v&n&t, 3 x vertices) => (nf*3, v&n&t)
faces = [(vx, i, (txs or 0) and txs[j])
for i, (vxs, nrm, txs) in enumerate(faces)
for j, vx in enumerate(vxs)]
return faces
def from_dict(self, data):
self.faces = data['faces']
self.vertices = data['vertices']
self.normals = data.get('normals', [])
self.texcoords = data.get('texcoords', [])
self.texfile = data.get('texfile', None)
self.tex = data.get('tex', None)
# backwards compatibility
if not isinstance(self.faces, np.ndarray):
nf = len(self.faces)
self.faces = np.array(ShapeModel._face_massage(self.faces), dtype=np.uint32)
self.faces[:, 2] -= 1 # tx idxs started from 1
assert self.faces.shape == (nf * 3, 3),\
'wrong shape "faces" array %s should be (nf*3, 3)' % (self.faces.shape,)
self.vertices = np.array(self.vertices, dtype=np.float32)
assert self.vertices.shape[1:] == (3,),\
'wrong shape "vertices" array %s should be (-1, 3)' % (self.vertices.shape,)
self.texcoords = np.array(self.texcoords, dtype=np.float32)
assert self.texcoords.shape[1:] == (2,),\
'wrong shape "texcoords" array %s should be (-1, 2)' % (self.texcoords.shape,)
self.normals = np.array(self.normals, dtype=np.float32)
assert len(self.normals) == 0 or self.normals.shape[1:] == (3,), \
'wrong shape "normals" array %s should be (-1, 3)' % (self.normals.shape,)
if len(self.normals) == 0:
self.recalc_norms()
self.faces = self.faces.astype(np.uint32)
self.vertices = self.vertices.astype(np.float32)
self.texcoords = self.texcoords.astype(np.float32)
self.normals = self.normals.astype(np.float32)
def as_dict(self):
return {'faces': self.faces, 'vertices': self.vertices, 'normals': self.normals,
'texcoords': self.texcoords, 'texfile': self.texfile, 'tex': self.tex}
def recalc_norms(self):
"""
Recalculate normals so that each vertex of a face has the normal of the face. For optional smooth normals,
would need to average normals across the faces each unique vertex belongs to and set faces[:, 1] = faces[:, 0]
"""
# reshape faces to be (nf, 3v, v&n&t)
f, v = self.faces.reshape((-1, 3, 3)), self.vertices
v1, v2, v3 = v[f[:, 0, 0]], v[f[:, 1, 0]], v[f[:, 2, 0]]
n = np.cross(v2 - v1, v3 - v1)
self.normals = n / np.linalg.norm(n, axis=1).reshape((-1, 1))
def pack_all(self):
f, v, n, t = self.faces, self.vertices, self.normals, self.texcoords
t = t if len(t) else np.zeros((len(f), 2), dtype=np.float32)
return np.hstack((v[f[:, 0], :], n[f[:, 1], :], t[f[:, 2], :])).astype(np.float32).tobytes()
def pack_simple(self):
f, v = self.faces, self.vertices
return v[f[:, 0], :].astype(np.float32).tobytes()
def texture_to_vertex_map(self):
tx2vx = np.ones((len(self.texcoords),), dtype=np.int64) * -1
for v, n, t in self.faces:
tx2vx[t] = v
return tx2vx
def export_smooth_faces(self):
"""
compatible output for a moderngl ext obj
"""
assert False, 'not supported anymore'
# norms = np.zeros((len(self.vertices), 3))
# for f, n, t in self.faces:
# norms[f[0]] += self.normals[n]
# norms[f[1]] += self.normals[n]
# norms[f[2]] += self.normals[n]
# norms = norms / np.linalg.norm(norms, axis=1).reshape((-1, 1))
# faces = [(vx + 1, (txs or None) and txs[i]+1, vx + 1)
# for vxs, nrm, txs in self.faces
# for i, vx in enumerate(vxs)]
# return self.vertices, [(tx, ty, 0) for tx, ty in self.texcoords], norms, faces
def export_angular_faces(self):
"""
compatible output for a moderngl ext obj
"""
texcoords = np.hstack((self.texcoords, np.zeros((len(self.texcoords), 1), dtype=np.float32)))
return self.vertices, texcoords, self.normals, self.faces[:, (0, 2, 1)] + 1
def load_texture(self, normalize=True):
if self.tex is not None:
return self.tex
if self.texfile is None:
return None
self._tex = cv2.imread(self.texfile, cv2.IMREAD_GRAYSCALE).astype('f4')
if normalize:
self._tex /= np.max(self._tex) # normalize so that max relative albedo is 1
return self._tex
@property
def tex(self):
return self._tex
@tex.setter
def tex(self, new_tex):
self._tex = None if new_tex is None else new_tex.astype('f4')
| {
"repo_name": "oknuutti/visnav-py",
"path": "visnav/iotools/objloader.py",
"copies": "1",
"size": "9954",
"license": "mit",
"hash": -6774647881378110000,
"line_mean": 39.962962963,
"line_max": 118,
"alpha_frac": 0.5134619249,
"autogenerated": false,
"ratio": 3.532292405961675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4545754330861675,
"avg_score": null,
"num_lines": null
} |
# Adapted from Jinja2. Jinja2 is (c) 2017 by the Jinja Team, licensed under the BSD license.
from typing import Union
binary_prefixes = ['KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB']
decimal_prefixes = ['kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']
def filesizeformat(value: Union[int, float], binary: bool = False) -> str:
"""Format the value like a 'human-readable' file size (i.e. 13 kB,
4.1 MB, 102 Bytes, etc). Per default decimal prefixes are used (Mega,
Giga, etc.), if the second parameter is set to `True` the binary
prefixes are used (Mebi, Gibi).
"""
bytes = float(value)
base = 1024 if binary else 1000
prefixes = binary_prefixes if binary else decimal_prefixes
if bytes == 1:
return '1 Byte'
elif bytes < base:
return f'{bytes} Bytes'
else:
for i, prefix in enumerate(prefixes):
unit = base ** (i + 2)
if bytes < unit:
return f'{base * bytes / unit:.1f} {prefix}'
return f'{base * bytes / unit:.1f} {prefix}'
| {
"repo_name": "valohai/valohai-cli",
"path": "valohai_cli/utils/file_size_format.py",
"copies": "1",
"size": "1054",
"license": "mit",
"hash": -4481390251797432300,
"line_mean": 39.5384615385,
"line_max": 92,
"alpha_frac": 0.5948766603,
"autogenerated": false,
"ratio": 3.346031746031746,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9438840581683277,
"avg_score": 0.000413564929693962,
"num_lines": 26
} |
# Adapted from Joe Birch's post: https://blog.bitrise.io/automating-code-review-tasks-for-multi-module-android-projects
# This takes all of our ktlint output XML files and combines them into one `ktlint-report.xml` file.
# This way we can pass one file into danger-checkstyle_format
import sys
import os.path
from xml.etree import ElementTree
first = None
parentDirectory = "../"
ktlintMain = "/build/reports/ktlint/ktlintMainSourceSetCheck/ktlintMainSourceSetCheck.xml"
ktlintTest = "/build/reports/ktlint/ktlintTestSourceSetCheck/ktlintTestSourceSetCheck.xml"
ktlintAndroidTest = "/build/reports/ktlint/ktlintAndroidTestSourceSetCheck/ktlintAndroidTestSourceSetCheck.xml"
detekt = "/build/reports/detekt/detekt.xml"
lint = "/build/reports/lint-results.xml"
file_list = []
module_list = [
"app",
"database",
"analytics"
]
for module in module_list:
file_list.append(parentDirectory + module + ktlintMain)
file_list.append(parentDirectory + module + ktlintTest)
file_list.append(parentDirectory + module + ktlintAndroidTest)
file_list.append(parentDirectory + module + detekt)
print(file_list)
ktlintFile = 'ktlint-report-orig.xml'
editedKtlintFile = 'ktlint-report.xml'
for filename in file_list:
if os.path.isfile(filename):
data = ElementTree.parse(filename).getroot()
if first is None:
first = data
else:
first.extend(data)
if first is not None:
f = open( ktlintFile, 'w+' )
f.write( "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n" )
f.write( ElementTree.tostring(first) )
f.close()
delete_list = []
fin = open(ktlintFile)
fout = open(editedKtlintFile, "w+")
for line in fin:
for word in delete_list:
line = line.replace(word, "")
fout.write(line)
print(line)
fin.close()
fout.close() | {
"repo_name": "AdamMc331/CashCaretaker",
"path": "scripts/combine_ktlint_reports.py",
"copies": "1",
"size": "1791",
"license": "mit",
"hash": -7852619629714822000,
"line_mean": 29.8965517241,
"line_max": 119,
"alpha_frac": 0.7213847013,
"autogenerated": false,
"ratio": 3.1039861351819757,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4325370836481976,
"avg_score": null,
"num_lines": null
} |
# Adapted from Keras source code
# License: https://github.com/fchollet/keras/blob/master/LICENSE
import itertools
from keras.layers.containers import Graph, Sequential
from keras.layers.core import Merge
try:
# pydot-ng is a fork of pydot that is better maintained
import pydot_ng as pydot
except ImportError:
# fall back on pydot if necessary
import pydot
if not pydot.find_graphviz():
raise RuntimeError("Failed to import pydot. You must install pydot"
" and graphviz for `pydotprint` to work.")
def layer_typename(layer):
return type(layer).__module__ + "." + type(layer).__name__
def get_layer_to_name(model):
"""Returns a dict mapping layer to their name in the model"""
if not isinstance(model, Graph):
return {}
else:
node_to_name = itertools.chain(
model.nodes.items(), model.inputs.items(), model.outputs.items()
)
return {v: k for k, v in node_to_name}
class ModelToDot(object):
"""
This is a helper class which visits a keras model (Sequential or Graph) and
returns a pydot.Graph representation.
This is implemented as a class because we need to maintain various states.
Use it as ```ModelToDot()(model)```
Keras models can have an arbitrary number of inputs and outputs. A given
layer can have multiple inputs but has a single output. We therefore
explore the model by starting at its output and crawling "up" the tree.
"""
def _pydot_node_for_layer(self, layer, label):
"""
Returns the pydot.Node corresponding to the given layer.
`label` specify the name of the layer (only used if the layer isn't yet
associated with a pydot.Node)
"""
# Check if this already exists (will be the case for nodes that
# serve as input to more than one layer)
if layer in self.layer_to_pydotnode:
node = self.layer_to_pydotnode[layer]
else:
layer_id = 'layer%d' % self.idgen
self.idgen += 1
label = label + " (" + layer_typename(layer) + ")"
if self.show_shape:
# Build the label that will actually contain a table with the
# input/output
outputlabels = str(layer.output_shape)
if hasattr(layer, 'input_shape'):
inputlabels = str(layer.input_shape)
elif hasattr(layer, 'input_shapes'):
inputlabels = ', '.join(
[str(ishape) for ishape in layer.input_shapes])
else:
inputlabels = ''
label = "%s\n|{input:|output:}|{{%s}|{%s}}" % (
label, inputlabels, outputlabels)
node = pydot.Node(layer_id, label=label)
self.g.add_node(node)
self.layer_to_pydotnode[layer] = node
return node
def _process_layer(self, layer, layer_to_name=None, connect_to=None):
"""
Process a layer, adding its node to the graph and creating edges to its
outputs.
`connect_to` specify where the output of the current layer will be
connected
`layer_to_name` is a dict mapping layer to their name in the Graph
model. Should be {} when processing a Sequential model
"""
# The layer can be a container layer, in which case we can recurse
is_graph = isinstance(layer, Graph)
is_seq = isinstance(layer, Sequential)
if self.recursive and (is_graph or is_seq):
# We got a container layer, recursively transform it
if is_graph:
child_layers = layer.outputs.values()
else:
child_layers = [layer.layers[-1]]
for l in child_layers:
self._process_layer(l, layer_to_name=get_layer_to_name(layer),
connect_to=connect_to)
else:
# This is a simple layer.
label = layer_to_name.get(layer, '')
layer_node = self._pydot_node_for_layer(layer, label=label)
if connect_to is not None:
self.g.add_edge(pydot.Edge(layer_node, connect_to))
# Proceed upwards to the parent(s). Only Merge layers have more
# than one parent
if isinstance(layer, Merge): # Merge layer
for l in layer.layers:
self._process_layer(l, layer_to_name,
connect_to=layer_node)
elif hasattr(layer, 'previous') and layer.previous is not None:
self._process_layer(layer.previous, layer_to_name,
connect_to=layer_node)
def __call__(self, model, recursive=True, show_shape=False,
connect_to=None):
self.idgen = 0
# Maps keras layer to the pydot.Node representing them
self.layer_to_pydotnode = {}
self.recursive = recursive
self.show_shape = show_shape
self.g = pydot.Dot()
self.g.set('rankdir', 'TB')
self.g.set('concentrate', True)
self.g.set_node_defaults(shape='record')
if hasattr(model, 'outputs'):
# Graph
for name, l in model.outputs.items():
self._process_layer(l, get_layer_to_name(model),
connect_to=connect_to)
else:
# Sequential container
self._process_layer(model.layers[-1], {}, connect_to=connect_to)
return self.g
def to_graph(model, **kwargs):
"""
`recursive` controls whether we recursively explore container layers
`show_shape` controls whether the shape is shown in the graph
"""
return ModelToDot()(model, **kwargs)
def plot(model, to_file='model.png', **kwargs):
graph = to_graph(model, **kwargs)
graph.write_png(to_file)
| {
"repo_name": "jisraeli/dragonn",
"path": "dragonn/visualize_util.py",
"copies": "2",
"size": "5953",
"license": "mit",
"hash": 5907687212018057000,
"line_mean": 36.9171974522,
"line_max": 79,
"alpha_frac": 0.5797077104,
"autogenerated": false,
"ratio": 4.108350586611456,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5688058297011457,
"avg_score": null,
"num_lines": null
} |
# Adapted from Kevin Keraudren's code at https://github.com/kevin-keraudren/randomforest-python
import numpy as np
from tree import *
import os
from glob import glob
import shutil
import itertools
import multiprocessing as mp
from weakLearner import WeakLearner, AxisAligned
def _grow_trees(params):
thread_id, points, responses, labels, tree = params
tree.grow(points, responses, labels)
return tree
class Forest:
def __init__( self,
ntrees=20,
tree_params={ 'max_depth' : 10,
'min_sample_count' : 5,
'test_count' : 100,
'test_class' : AxisAligned() },
nprocs=1):
self.ntrees = ntrees
self.tree_params = tree_params
self.trees=[]
self.labels = []
self.nprocs = nprocs
def __len__(self):
return self.ntrees
def grow(self,points,responses):
for r in responses:
if r not in self.labels:
self.labels.append(r)
if self.nprocs == 1:
for i in range(self.ntrees):
self.trees.append( Tree( self.tree_params ) )
self.trees[i].grow( points, responses, self.labels )
else:
thread_ids = np.arange(self.ntrees)
grow_input = itertools.izip(
thread_ids,
itertools.repeat(points),
itertools.repeat(responses),
itertools.repeat(self.labels),
itertools.repeat(Tree(self.tree_params))
)
pool = mp.Pool(processes=self.nprocs)
results = pool.map(func=_grow_trees, iterable=grow_input)
self.trees = list(results)
def predict(self, point, soft=False):
r = {}
for c in self.labels:
r[c] = 0.0
for i in range(self.ntrees):
response = int(self.trees[i].predict(point))
r[response] += 1
if soft:
for c in self.labels:
r[c] /= self.ntrees
return r
else:
response = None
max_count = -1
for c in self.labels:
if r[c] > max_count:
response = c
max_count = r[c]
return response
def save(self,folder):
if os.path.exists(folder):
shutil.rmtree(folder)
os.makedirs(folder)
template = '%0'+str(int(np.log10(self.ntrees))) + 'd.data'
for i in range(self.ntrees):
filename = template % i
self.trees[i].save(folder + '/' + filename)
return
def load(self,folder,test=WeakLearner()):
self.trees = []
for f in glob(folder+'/*'):
self.trees.append( Tree() )
self.trees[-1].load( f, test )
self.ntrees = len(self.trees)
self.labels = self.trees[0].labels
return
| {
"repo_name": "grantathon/computer_vision_machine_learning",
"path": "project/randomforest/forest.py",
"copies": "1",
"size": "2984",
"license": "mit",
"hash": 686679297107518600,
"line_mean": 25.8828828829,
"line_max": 95,
"alpha_frac": 0.5154155496,
"autogenerated": false,
"ratio": 4.016150740242261,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5031566289842261,
"avg_score": null,
"num_lines": null
} |
# Adapted from lightning
import pickle
import numpy as np
from numpy.testing import (assert_almost_equal, assert_array_equal,
assert_equal)
from modl.utils.randomkit import RandomState
def test_random():
rs = RandomState(seed=0)
vals = [rs.randint(10) for t in range(10000)]
assert_almost_equal(np.mean(vals), 5.018)
vals = [rs.binomial(1000, 0.8) for t in range(10000)]
assert_almost_equal(np.mean(vals), 799.8564)
def test_shuffle():
ind = np.arange(10)
rs = RandomState(seed=0)
rs.shuffle(ind)
assert_array_equal(ind, [2, 8, 4, 9, 1, 6, 7, 3, 0, 5])
def test_shuffle_with_trace():
ind = np.arange(10)
ind2 = np.arange(9, -1, -1)
rs = RandomState(seed=0)
perm = rs.shuffle_with_trace([ind, ind2])
assert_array_equal(ind, [2, 8, 4, 9, 1, 6, 7, 3, 0, 5])
assert_array_equal(ind2, [7, 1, 5, 0, 8, 3, 2, 6, 9, 4])
assert_array_equal(ind, perm)
def test_permutation():
rs = RandomState(seed=0)
perm = rs.permutation(10)
assert_array_equal(perm, [2, 8, 4, 9, 1, 6, 7, 3, 0, 5])
def test_random_state_pickle():
rs = RandomState(seed=0)
random_integer = rs.randint(5)
pickle_rs = pickle.dumps(rs)
pickle_rs = pickle.loads(pickle_rs)
pickle_random_integer = pickle_rs.randint(5)
assert_equal(random_integer, pickle_random_integer)
| {
"repo_name": "arthurmensch/modl",
"path": "modl/utils/randomkit/tests/test_random.py",
"copies": "1",
"size": "1362",
"license": "bsd-2-clause",
"hash": 1798569976198477800,
"line_mean": 27.9787234043,
"line_max": 67,
"alpha_frac": 0.6292217327,
"autogenerated": false,
"ratio": 2.861344537815126,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39905662705151257,
"avg_score": null,
"num_lines": null
} |
# Adapted from Matlab to Python by Jon Crall
# Original Matlab Source:
# http://www.mathworks.se/matlabcentral/fileexchange/36657-fast-bilateral-filter/content/FastBilateralFilter/shiftableBF.m
# These are shorthands I used to help with porting 1 based to 0 based
# [k] = 2:end
# [-k-1] = fliplr(1:end-1)
# [-k] = fliplr(2:end)
''' Filtering operation
function shiftableBF:
inImg : grayscale image
sigmaS : width of spatial Gaussian
sigmaR : width of range Gaussian
[-w, w]^2 : domain of spatial Gaussian
tol : truncation error
Author: Kunal N. Chaudhury
Date: March 1, 2012
Converted to python on March 25 2013 by Jon Crall
Reference:
[1] K.N. Chaudhury, D. Sage, and M. Unser, 'Fast O(1) bilateral filtering using
trigonometric range kernels,' IEEE Transactions on Image Processing, vol. 20,
no. 11, 2011.
[2] K.N. Chaudhury, 'Acceleration of the shiftable O(1) algorithm for bilateral filtering
and non-local means,' arXiv:1203.5128v1. '''
from pylab import find
from scipy.misc import comb
from scipy.ndimage import convolve
from scipy.signal import gaussian
from skimage.color import rgb2gray
import numpy as np
import sys
def maxFilter(inImg, w):
''' Computes the maximum 'local' dynamic range
inImg : grayscale image
[-w, w]^2 : search window (w must be odd)
T : maximum local dynamic range '''
T = -1
sym = (w - 1.)/2.
(m, n) = inImg.shape
pad1 = int(w*np.ceil(float(m)/float(w)) - m)
pad2 = int(w*np.ceil(float(n)/float(w)) - n)
inImg2 = np.pad(inImg, ((0, pad1), (0,pad2)), mode='symmetric')
template = inImg2.copy()
m = m + pad1
n = n + pad2
# scan along row
for ii in xrange(0,m):
L = np.zeros(n) # From the Left
R = np.zeros(n) # From the Right
L[0] = template[ii, 0]
R[n-1] = template[ii, n-1]
for k in xrange(1, n):
if k % w == 0:
L[k] = template[ii, k ]
R[-k-1] = template[ii, -k-1]
else:
L[k] = max(L[k-1], template[ii, k])
R[-k-1] = max(R[ -k], template[ii, -k-1])
for k in xrange(0, n):
p = k-sym; q = k+sym
template[ii, k] = max\
( R[p] if p >= 0 else -1,
L[q] if q < n else -1)
# scan along column
for jj in xrange(0, n):
L = np.zeros(m)
R = np.zeros(m)
L[0] = template[0, jj]
R[m-1] = template[m-1, jj]
for k in xrange(1, m):
if k % w == 0:
L[k] = template[k, jj]
R[-k-1] = template[-k-1, jj]
else:
L[k] = max(L[k-1], template[ k , jj])
R[-k-1] = max(R[ -k], template[-k-1, jj])
for k in xrange(0, m):
p = k-sym; q = k+sym
temp = max\
(R[p] if p >= 0 else -1,
L[q] if q < m else -1) - inImg2[k, jj]
if temp > T:
T = temp
return T
# These take a long time: (chip, 1.6, 10, 7, 0)
# These are almost as good: (chip, 1.6, 200, 7, 0)
# SigmaR is the real time sucker. The bigger it is the faster it goes
def shiftableBF(inImg, sigmaS=1.6, sigmaR=200, w=7, tol=0):
'''
inImg - expects a grayscale numpy array with dtype=uint8
np.asarray(Image.open(imname).convert('L'))
'''
inMax = inImg.max
nChan = 1 if len(inImg.shape) == 2 else inImg.shape[2]
inTyp = inImg.dtype
if nChan == 4: inImg = inImg[:,:,0:3] # remove alpha
if nChan > 1: inImg = rgb2gray(inImg) # remove color
if inMax <= 1: inImg *= 255. # force to range 0,255
if w % 2 == 0:
w = w + 1
g = gaussian(w,sigmaS)
g = g.reshape((w,1)) / sum(g)
filt = g*np.transpose(g)
# set range interval and the order of raised cosine
#T = maxFilter(inImg, w)
T = 192
#print 'T= %r' % T
N = float(np.ceil( 0.405 * (float(T) / float(sigmaR))**2 ))
#print 'N= %r' % N
gamma = 1. / (np.sqrt(N) * sigmaR)
#print 'gamma= %r' % gamma
twoN = 2**N
#print 'N^2= %r' % twoN
# compute truncation
if tol == 0:
M = 0
else:
if sigmaR > 40:
#print "SigmaR > 4, setting M to 0"
M = 0
elif sigmaR > 10:
#print "SigmaR > 10, adjusting to tolerence"
sumCoeffs = 0
M = -1
#print "Trying N choose from from 0 to %d " % int(np.round(N/2)+1)
for k in xrange(0,int(np.round(N/2)+1)):
sumCoeffs += comb(N,k)/twoN
if sumCoeffs > tol/2.:
#print "sumCoeeffs %d tol/2 = %f" % (sumCoeffs, tol/2)
M = k
break
if M == -1:
#print "Setting to 0"
M = 0
else:
#print "40 > SigmaR > 10, adjusting to tolerence"
M = np.ceil( 0.5 * ( N - np.sqrt(4 * N * np.log10(2./tol)) ) )
#print 'M = %r' % M
# main filter
(m, n) = inImg.shape
outImg1 = np.zeros((m, n))
outImg2 = np.zeros((m, n))
outImg = np.zeros((m, n))
#print (M, N-M+1)
#sys.stdout.flush()
for k in np.arange(M, N-M+1):
coeff = comb(N,k) / twoN
cosImg = np.cos( (2.*k-N) * gamma * inImg )
sinImg = np.sin( (2.*k-N) * gamma * inImg )
phi1 = convolve(np.multiply(inImg, cosImg), filt)
phi2 = convolve(np.multiply(inImg, sinImg), filt)
phi3 = convolve(cosImg, filt)
phi4 = convolve(sinImg, filt)
outImg1 += coeff * np.add(np.multiply(cosImg, phi1), np.multiply(sinImg, phi2))
outImg2 += coeff * np.add(np.multiply(cosImg, phi3), np.multiply(sinImg, phi4))
# avoid division by zero
inImg.shape = inImg.size
outImg.shape = outImg.size
outImg1.shape = outImg1.size
outImg2.shape = outImg2.size
idx1 = find( outImg2 < 0.0001)
idx2 = find( outImg2 > 0.0001)
outImg[ idx1 ] = inImg[idx1]
outImg[ idx2 ] = np.divide(outImg1[idx2], outImg2[idx2])
inImg.shape = (m,n)
outImg.shape = (m,n)
outImg1.shape = (m,n)
outImg2.shape = (m,n)
# keep output consistent with input
if outImg.max() <= 1 and inMax > 1:
outImg *= 255.
elif inMax <= 1 and outImg.max() > 1:
outImg /= 255.
if outImg.dtype != inTyp:
outImg = np.array(outImg,dtype=inTyp)
return outImg
| {
"repo_name": "SU-ECE-17-7/hotspotter",
"path": "hstpl/other/shiftableBF.py",
"copies": "2",
"size": "6577",
"license": "apache-2.0",
"hash": 2168533983182901000,
"line_mean": 31.7213930348,
"line_max": 122,
"alpha_frac": 0.5187775582,
"autogenerated": false,
"ratio": 2.890989010989011,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4409766569189011,
"avg_score": null,
"num_lines": null
} |
from ntlm import HTTPNtlmAuthHandler
import xml.etree.ElementTree as et
import uuid, urllib2, urlparse
class SoapService:
NS_SOAP_ENV = "{http://schemas.xmlsoap.org/soap/envelope/}"
NS_XSI = "{http://www.w3.org/2001/XMLSchema-instance}"
NS_XSD = "{http://www.w3.org/2001/XMLSchema}"
def __init__(self, url, username, password):
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
self.host = netloc
self.url = '%s://%s%s/WorkItemTracking/v1.0/ClientService.asmx' % (scheme, netloc, path)
self.username = username
self.password = password
def _appendElement(self, name, value, parent):
element = et.SubElement(parent, name)
element.text = value
def _appendNullElement(self, name, parent):
element = et.Element(name, {SoapService.NS_XSI + 'nil': 'true'})
parent.append(element)
def _buildHeader(self, envelope):
header = self._getHeader()
if header is None:
return
envHeader = et.SubElement(envelope, SoapService.NS_SOAP_ENV + 'Header')
envHeader.append(header)
def _buildMessage(self, bodyMessage):
envelope = et.Element(SoapService.NS_SOAP_ENV + 'Envelope')
self._buildHeader(envelope)
body = et.SubElement(envelope, SoapService.NS_SOAP_ENV + 'Body')
body.append(bodyMessage)
return envelope
def _send(self, action, body):
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, self.url, self.username, self.password)
proxy = urllib2.ProxyHandler({})
ntlm = HTTPNtlmAuthHandler.HTTPNtlmAuthHandler(passman)
opener = urllib2.build_opener(proxy, ntlm)
urllib2.install_opener(opener)
message = self._buildMessage(body)
xmlMessage = et.tostring(message)
headers = {'Host':self.host,
'Content-Type':'text/xml; charset=utf-8',
'Content-Length':len(xmlMessage),
'SOAPAction':action}
request = urllib2.Request(self.url, xmlMessage, headers)
response = urllib2.urlopen(request)
output = response.read()
if response.fp.status == 200:
return et.XML(output)
raise urllib2.HTTPError(response.geturl(), response.fp.status, response.fp.reason, None, response.fp)
def _getHeader(self):
return None
class TfsClientService(SoapService):
WIT_URL = "http://schemas.microsoft.com/TeamFoundation/2005/06/WorkItemTracking/ClientServices/03"
NS_WIT = '{%s}' % WIT_URL
_default_computedColumns = {'System.RevisedDate':None, 'System.ChangedDate':None, 'System.PersonId':None}
def __init__(self, url, username, password, columns, computedColumns):
SoapService.__init__(self, url, username, password)
self.columns = {}
if columns:
self.columns.update(columns)
self.computedColumns = TfsClientService._default_computedColumns
if computedColumns:
self.computedColumns.update(computedColumns)
def _buildComputedColumns(self, parent):
cols = et.SubElement(parent, 'ComputedColumns')
for name, value in self.computedColumns.iteritems():
et.SubElement(cols, 'ComputedColumn', {'Column':name})
def _buildColumns(self, parent):
cols = et.SubElement(parent, 'Columns')
for name, value in self.columns.iteritems():
col = et.SubElement(cols, 'Column', {'Column':name})
et.SubElement(col, 'Value').text = value
def _buildElement(self, method):
return et.Element(method, {'xmlns': TfsClientService.WIT_URL})
def _getAction(self, method):
return '"%s/%s"' % (TfsClientService.WIT_URL, method)
def _getHeader(self):
requestHeader = self._buildElement('RequestHeader')
self._appendElement('Id', 'uuid:%s' % uuid.uuid4(), requestHeader)
return requestHeader
def _getColumns(self, table):
columnData = {}
columns = table.findall(str.format('./{0}columns//{0}c', TfsClientService.NS_WIT))
values = table.findall(str.format('./{0}rows/{0}r//{0}f', TfsClientService.NS_WIT))
if len(values) == 0:
return None
i = 0
for field in values:
fieldIndex = field.get('k', None)
if fieldIndex:
for j in range(i, int(fieldIndex)):
key = columns[j].find('./%sn' % TfsClientService.NS_WIT).text
columnData[key] = None
index = int(fieldIndex)
else:
key = columns[i].find('./%sn' % TfsClientService.NS_WIT).text
columnData[key] = field.text
i += 1
return columnData
def getWorkItem(self, id):
method = 'GetWorkItem'
body = self._buildElement(method)
self._appendElement('workItemId', str(id), body)
self._appendElement('revisionId', '0', body)
self._appendElement('minimumRevisionId', '0', body)
self._appendNullElement('asOfDate', body)
self._appendElement('useMaster', "true", body)
response = self._send(self._getAction(method), body)
tables = response.findall(str.format('./{0}Body/{1}GetWorkItemResponse/{1}workItem//{1}table', SoapService.NS_SOAP_ENV, TfsClientService.NS_WIT))
for t in tables:
if t.get('name') == 'WorkItemInfo':
return self._getColumns(t)
return None
def addWorkItemComment(self, id, rev, comment):
method = 'Update'
body = self._buildElement(method)
package = et.SubElement(body, 'package')
subPackage = et.SubElement(package, 'Package', {'xmlns':''})
update = et.SubElement(subPackage, 'UpdateWorkItem', {'ObjectType':'WorkItem', 'WorkItemID':str(id), 'Revision':str(rev)})
et.SubElement(update, 'InsertText', {'FieldName':'System.History', 'FieldDisplayName':'History'}).text = comment
self._buildComputedColumns(update)
self._buildColumns(update)
self._send(self._getAction(method), body)
| {
"repo_name": "madzak/tfs-git-hook",
"path": "lib/tfs.py",
"copies": "1",
"size": "6355",
"license": "bsd-2-clause",
"hash": -3186517757871922000,
"line_mean": 38.2283950617,
"line_max": 153,
"alpha_frac": 0.6207710464,
"autogenerated": false,
"ratio": 3.767042086544161,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4887813132944161,
"avg_score": null,
"num_lines": null
} |
elif x[0] == 'if': # (if test conseq alt)
if len(x) == 4:
(_, test, conseq, alt) = x
elif len(x) == 3:
(_, test, conseq) = x
alt = None
if eval(test, env):
return eval(conseq, env)
elif alt:
return eval(alt, env)
elif x[0] == 'set!': # (set! var exp)
(_, var, exp) = x
env.find(var)[var] = eval(exp, env)
elif x[0] == 'setparam':
(_, param, value) = x
try:
env.parameters[param] = env.find(value)[value]
except:
env.parameters[param] = value
elif x[0] == 'define': # (define var exp)
(_, var, exp) = x
try:
env[var] = eval(exp, env)
except Exception as e:
logger.warning('Failed to evaluate definition of "{}": {}'.format(var, e))
logger.debug(traceback.format_exc())
env[var] = None
elif x[0] == 'sort':
seq = [link for link in eval(x[1], env) if link is not None and link.output]
logger.debug(seq)
if len(seq) == 1: return seq
try: pred = x[2]
except: pred = '<'
try:
k = x[3]
assert k == ':key'
lam = x[4]
eval(['define', 'sort_func', lam], env)
except: lam = None
rev = pred == '>'
if lam:
l = sorted(seq, key=lambda n: eval(['sort_func', n], env), reverse=rev)
else:
l = sorted(seq, reverse=rev)
return l
elif x[0] == 'lambda': # (lambda (var*) exp)
(_, vars, exp) = x
return lambda *args: eval(exp, Env(vars, args, env))
elif x[0] == 'upload': # (upload exp) Store each intermediate for return
(_, exp) = x
try:
val = eval(exp, env)
results = val
except Exception as e:
logger.warn('Failed to evaluate upload of "{}": {}'. format(to_string(exp), e))
logger.debug(traceback.format_exc())
env.errors.append(e)
env.exceptions.append(traceback.format_exc())
results = None
if type(results) is list:
for r in results:
env.emissions.append(r)
elif results:
env.emissions.append(results)
return results
elif x[0] == 'get':
(_, key, exp) = x
chain = eval(exp, env)
assert type(chain) is WaspLink
val = chain.get_value(key)
if isinstance(val, asmtypes.FileSet):
chain['default_output'] = val
return chain
else: # A value
return val
elif x[0] == 'all_files': ## Gets all data from module directory
(_, exp) = x
chain = eval(exp, env)
assert type(chain) is WaspLink
all_files = utils.ls_recursive(chain['outpath'])
module = chain['module']
chain['default_output'] = asmtypes.set_factory('misc', all_files,
name='{}.all_files'.format(module),
keep_name=True)
return chain
elif x[0] == 'tar': ## Tar outputs from WaspLink(s)
bare_exp, kwargs = extract_kwargs(x)
wlinks = [eval(exp, env) for exp in bare_exp[1:]]
### Format tarball name
if 'name' in kwargs:
tar_name = '{}.tar.gz'.format(kwargs['name'])
else: # Generate Tar Name
tar_name = '{}.tar.gz'.format('_'.join([w['module'] for w in wlinks]))
### Tag the tarball fileset
tag = kwargs.get('tag')
tags = [tag] if tag else []
### Create new link
chain = WaspLink('tar', wlinks)
filelist = []
for w in wlinks:
filelist += w.files
chain['default_output'] = asmtypes.set_factory(
'tar', utils.tar_list(env.outpath, filelist, tar_name),
name=tar_name, keep_name=True, tags=tags)
return chain
elif x[0] == 'begin': # (begin exp*) Return each intermediate
inner_env = Env(outer=env)
val = []
for exp in x[1:]:
try:
ret = eval(exp, inner_env)
if ret:val.append(ret)
except Exception as e:
if list(e):
logger.warning('Failed to eval "{}": {}'.format(to_string(exp), e))
logger.debug(traceback.format_exc())
env.errors.append(e)
env.exceptions.append(traceback.format_exc())
if val:
return val if len(val) > 1 else val[0]
elif x[0] == 'print':
for exp in x[1:]:
print eval(exp, env)
elif x[0] == 'prog': # same as begin, but use same env
val = []
for exp in x[1:]:
try:
ret = eval(exp, env)
if ret: val.append(ret)
except Exception as e:
if list(e):
logger.warning('Failed to eval "{}": {}'.format(to_string(exp), e))
logger.debug(traceback.format_exc())
env.errors.append(e)
env.exceptions.append(traceback.format_exc())
if val:
return val if len(val) > 1 else val[0]
else: # (proc exp*)
exps = [eval(exp, env) for exp in x]
proc = exps.pop(0)
env.next_stage(x[0])
try: ## Assembly functions
return proc(*exps, env=env)
except TypeError as e: ## Built-in functions
logger.debug(traceback.format_exc())
return proc(*exps)
################ parse, read, and user interaction
def extract_kwargs(exp):
"Find :keys in top level exp"
kwargs = {}
stripped = []
skip = False
for i,x in enumerate(exp):
if skip:
skip = False
continue
if x[0] == ':':
kwargs[x[1:]] = exp[i+1]
skip = True
else:
stripped.append(x)
return stripped, kwargs
def read(s):
"Read a Scheme expression from a string."
return read_from(tokenize(s))
parse = read
def tokenize(s):
"Convert a string into a list of tokens."
return s.replace('(',' ( ').replace(')',' ) ').split()
def read_from(tokens):
"Read an expression from a sequence of tokens."
if len(tokens) == 0:
raise SyntaxError('unexpected EOF while reading')
token = tokens.pop(0)
if '(' == token:
L = []
while tokens[0] != ')':
L.append(read_from(tokens))
tokens.pop(0) # pop off ')'
return L
elif ')' == token:
raise SyntaxError('unexpected )')
else:
return atom(token)
def atom(token):
"Numbers become numbers; every other token is a symbol."
try: return int(token)
except ValueError:
try: return float(token)
except ValueError:
return Symbol(token)
def to_string(exp):
"Convert a Python object back into a Lisp-readable string."
return '('+' '.join(map(to_string, exp))+')' if isa(exp, list) else str(exp)
def repl(prompt='lis.py> '):
"A prompt-read-eval-print loop."
while True:
val = eval(parse(raw_input(prompt)))
if val is not None:
print to_string(val)
def run(exp, env):
stages = 0
for plugin in env.plugins:
stages += exp.count(plugin)
env.global_data['stages'] = stages
return eval(parse(exp), env=env)
class WaspLink(dict):
def __init__(self, module=None, link=None):
self['link'] = link
self['module'] = module
self['default_output'] = ''
self['data'] = None
self['info'] = {}
@property
def files(self):
""" Return default results of current link """
out = self['default_output']
if type(out) is list:
return [f for fset in out for f in fset.files]
return self['default_output'].files
@property
def output(self):
return self['default_output']
def insert_output(self, output, default_type, module_name):
""" Parses the output dict of a completed module and stores the
data and information within the WaspLink object """
filesets = []
for outtype, outvalue in output.items():
name = '{}_{}'.format(module_name, outtype)
if not type(outvalue) is list:
outvalue = [outvalue]
## Store default output
if default_type == outtype:
if isinstance(outvalue[0], asmtypes.FileSet):
for out in outvalue:
out['tags'].append(module_name)
self['default_output'] = outvalue
else: # Files
self['default_output'] = asmtypes.set_factory(outtype, [asmtypes.FileInfo(f) for f in outvalue],
name=name)
self['default_output']['tags'].append(module_name)
## Store all outputs and values
outputs = []
are_files = False
for out in outvalue:
try:
if os.path.exists(out): # These are files, convert to FileInfo format
outputs.append(asmtypes.FileInfo(out))
are_files = True
else:
raise Exception('Not a file')
except Exception as e: # Not a file
outputs = outvalue
break
if are_files:
filesets.append(asmtypes.set_factory(outtype, outputs, name=name))
else:
self['info'][outtype] = outputs if not len(outputs) == 1 else outputs[0]
self['data'] = asmtypes.FileSetContainer(filesets)
def get_value(self, key):
if key in self['info']:
return self['info'][key]
return self['data'].find_type(key)[0]
def traverse(self):
if self['link']:
logger.debug("traverse: {}".format(self['default_output']['name']))
for i,wlink in enumerate(self['link']):
logger.debug('link: {}'.format(i))
wlink.traverse()
def find_module(self, module):
""" Traverses the chain to find module """
if self['module'] == module:
return self
for wlink in self['link']:
return wlink.find_module(module)
class WaspEngine():
def __init__(self, plugin_manager, job_data, meta=None):
self.constants_reads = 'READS'
self.constants_contigs = 'CONTIGS'
self.pmanager = plugin_manager
self.assembly_env = add_globals(Env(job_data=job_data, meta=meta))
self.assembly_env.update({k:self.get_wasp_func(k, job_data) for k in self.pmanager.plugins})
self.assembly_env.plugins = self.pmanager.plugins
self.job_data = job_data
reads_link = WaspLink()
contigs_link = WaspLink()
if 'initial_data' not in job_data:
job_data['initial_data'] = asmtypes.FileSetContainer(job_data.wasp_data().referencesets +
job_data.wasp_data().readsets +
job_data.wasp_data().contigsets)
reads_link['default_output'] = list(job_data['initial_data'].readsets)
contigs_link['default_output'] = list(job_data['initial_data'].contigsets)
self.assembly_env.update({self.constants_reads: reads_link})
self.assembly_env.update({self.constants_contigs: contigs_link})
self.assembly_env.update({'arast_score': wf.arast_score,
'has_paired': wf.has_paired,
'has_short_reads_only': wf.has_short_reads_only,
'n50': wf.n50})
def run_expression(self, exp, job_data=None):
if not job_data:
job_data = self.job_data
## Run Wasp expression
if type(exp) is str or type(exp) is unicode:
w_chain = run(exp, self.assembly_env)
## Record results into job_data
if type(w_chain) is not list: # Single
w_chain = [w_chain]
for w in self.assembly_env.emissions + w_chain:
try:
job_data.add_results(w['default_output'])
except:
logger.warn('Output not added: {}'.format(w))
job_data['tracebacks'] = [str(e) for e in self.assembly_env.exceptions]
job_data['errors'] = [str(e) for e in self.assembly_env.errors]
return w_chain[0]
def get_wasp_func(self, module, job_data):
def run_module(*inlinks, **kwargs):
# WaspLinks keep track of the recursive pipelines
env = kwargs['env']
## Flatten inlinks if lists are present
links = []
for link in inlinks:
if type(link) is list:
links += link
else:
links.append(link)
wlink = WaspLink(module, links)
self.pmanager.run_proc(module, wlink, job_data, env.parameters)
return wlink
return run_module
###### Utility
def pipelines_to_exp(pipes, job_id):
"""
Convert pipeline mode into Wasp expression
"""
# Assume that these modules will use initial reads
add_reads = ['sspace', 'reapr', 'bwa', 'bowtie2']
all_pipes = []
for pipe in pipes:
exp = 'READS'
params = []
for m in pipe:
if m[0] == '?':
params.append(m[1:].split('='))
else:
if params:
setparams = ' '.join(['(setparam {} {})'.format(p[0], p[1]) for p in params])
exp = '(begin {} {})'.format(setparams, exp)
params = []
if m in add_reads:
exp = '({} {} READS)'.format(m, exp)
else:
exp = '({} {})'.format(m, exp)
#### Flush params
if params:
setparams = ' '.join(['(setparam {} {})'.format(p[0], p[1]) for p in params])
exp = '(begin {} {})'.format(setparams, exp)
params = []
#exp = '(upload {})'.format(exp)
#all_pipes.append(exp)
all_pipes.append(exp)
#### Check for duplicates and redefine
val_num = 0
replacements = []
defs = []
reversed_pairs = set()
lces = set()
for pipe1, pipe2 in itertools.permutations(all_pipes, 2):
reversed_pairs.add((pipe2, pipe1))
if not (pipe1, pipe2) in reversed_pairs:
for n in get_orphans(pipe1):
for m in get_orphans(pipe2):
if n == m and to_string(n) not in lces:
lce = to_string(n)
lces.add(lce)
replacements.append((lce.strip(), 'val{}'.format(val_num)))
defs.append('(define val{} {})'.format(val_num, lce.strip()))
val_num += 1
#### Replace defined expressions
for replacement in replacements:
for i, pipe in enumerate(all_pipes):
all_pipes[i] = pipe.replace(*replacement)
#### Form final expression
ranked_upload = '(upload (sort (list {}) > :key (lambda (c) (arast_score c))))'.format(' '.join(all_pipes))
final_exp = '(begin {} (tar (all_files (quast {})) :name {}_analysis :tag quast))'.format(' '.join(defs), ranked_upload, job_id)
return final_exp
def _has_sibling(exp_list, exp):
if not exp_list:
return False
siblings = 0
for e in exp_list:
if type(e) is list:
siblings += 1
return exp in exp_list and siblings > 1
def orphans(exp_list, parent=None):
for i,l in enumerate(exp_list):
if type(l) is list:
for e in orphans(l, parent=exp_list):
yield e
elif i == 0 and not _has_sibling(parent, exp_list):
yield exp_list
def get_orphans(pipe_string):
return [o for o in orphans(read_from(tokenize(pipe_string)))]
| {
"repo_name": "kbase/assembly",
"path": "lib/assembly/wasp.py",
"copies": "1",
"size": "19952",
"license": "mit",
"hash": 4523150534895927300,
"line_mean": 34.9495495495,
"line_max": 132,
"alpha_frac": 0.5081696071,
"autogenerated": false,
"ratio": 3.90755973364669,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9879094182746366,
"avg_score": 0.007327031600064643,
"num_lines": 555
} |
elif x[0] == 'if': # (if test conseq alt)
(_, test, conseq, alt) = x
return eval((conseq if eval(test, env) else alt), env)
elif x[0] == 'set!': # (set! var exp)
(_, var, exp) = x
env.find(var)[var] = eval(exp, env)
elif x[0] == 'setparam':
(_, param, value) = x
try:
env.parameters[param] = env.find(value)[value]
except:
env.parameters[param] = value
elif x[0] == 'define': # (define var exp)
(_, var, exp) = x
try: env[var] = eval(exp, env)
except Exception as e:
print ' [!] Failed to evaluate definition of "{}": {}'.format(var, e)
print traceback.format_exc()
env.errors.append(e)
env.exceptions.append(traceback.format_exc())
env[var] = None
elif x[0] == 'sort':
seq = [link for link in eval(x[1], env) if link is not None and link.output]
logging.debug(seq)
if len(seq) == 1: return seq
try: pred = x[2]
except: pred = '<'
try:
k = x[3]
assert k == ':key'
lam = x[4]
eval(['define', 'sort_func', lam], env)
except: lam = None
rev = pred == '>'
if lam:
l = sorted(seq, key=lambda n: eval(['sort_func', n], env), reverse=rev)
else:
l = sorted(seq, reverse=rev)
return l
elif x[0] == 'lambda': # (lambda (var*) exp)
(_, vars, exp) = x
return lambda *args: eval(exp, Env(vars, args, env))
elif x[0] == 'upload': # (upload exp) Store each intermediate for return
(_, exp) = x
try:
val = eval(exp, env)
results = val
except Exception as e:
print ' [!]: {} -- {}'.format(to_string(exp), e)
print traceback.format_exc()
env.errors.append(e)
env.exceptions.append(traceback.format_exc())
results = None
if type(results) is list:
for r in results:
env.emissions.append(r)
elif results:
env.emissions.append(results)
return results
elif x[0] == 'get':
(_, key, exp) = x
chain = eval(exp, env)
assert type(chain) is WaspLink
val = chain.get_value(key)
if isinstance(val, asmtypes.FileSet):
chain['default_output'] = val
return chain
else: # A value
return val
elif x[0] == 'all_files': ## Gets all data from module directory
(_, exp) = x
chain = eval(exp, env)
assert type(chain) is WaspLink
all_files = utils.ls_recursive(chain['outpath'])
module = chain['module']
chain['default_output'] = asmtypes.set_factory('misc', all_files,
name='{}.all_files'.format(module),
keep_name=True)
return chain
elif x[0] == 'tar': ## Tar outputs from WaspLink(s)
bare_exp, kwargs = extract_kwargs(x)
wlinks = [eval(exp, env) for exp in bare_exp[1:]]
### Format tarball name
if 'name' in kwargs:
tar_name = '{}.tar.gz'.format(kwargs['name'])
else: # Generate Tar Name
tar_name = '{}.tar.gz'.format('_'.join([w['module'] for w in wlinks]))
### Tag the tarball fileset
tag = kwargs.get('tag')
tags = [tag] if tag else []
### Create new link
chain = WaspLink('tar', wlinks)
filelist = []
for w in wlinks:
filelist += w.files
chain['default_output'] = asmtypes.set_factory(
'tar', utils.tar_list(env.outpath, filelist, tar_name),
name=tar_name, keep_name=True, tags=tags)
return chain
elif x[0] == 'begin': # (begin exp*) Return each intermediate
inner_env = Env(outer=env)
val = []
for exp in x[1:]:
try:
ret = eval(exp, inner_env)
if ret:val.append(ret)
except Exception as e:
if list(e):
print(traceback.format_exc())
env.errors.append(e)
env.exceptions.append(traceback.format_exc())
if val:
return val if len(val) > 1 else val[0]
elif x[0] == 'prog': # same as begin, but use same env
val = []
for exp in x[1:]:
try:
ret = eval(exp, env)
if ret:val.append(ret)
except Exception as e:
if list(e):
print(traceback.format_exc())
env.errors.append(e)
env.exceptions.append(traceback.format_exc())
if val:
return val if len(val) > 1 else val[0]
else: # (proc exp*)
exps = [eval(exp, env) for exp in x]
proc = exps.pop(0)
env.next_stage(x[0])
try: ## Assembly functions
return proc(*exps, env=env)
except TypeError as e: ## Built-in functions
logging.info(traceback.format_exc())
return proc(*exps)
################ parse, read, and user interaction
def extract_kwargs(exp):
"Find :keys in top level exp"
kwargs = {}
stripped = []
skip = False
for i,x in enumerate(exp):
if skip:
skip = False
continue
if x[0] == ':':
kwargs[x[1:]] = exp[i+1]
skip = True
else:
stripped.append(x)
return stripped, kwargs
def read(s):
"Read a Scheme expression from a string."
return read_from(tokenize(s))
parse = read
def tokenize(s):
"Convert a string into a list of tokens."
return s.replace('(',' ( ').replace(')',' ) ').split()
def read_from(tokens):
"Read an expression from a sequence of tokens."
if len(tokens) == 0:
raise SyntaxError('unexpected EOF while reading')
token = tokens.pop(0)
if '(' == token:
L = []
while tokens[0] != ')':
L.append(read_from(tokens))
tokens.pop(0) # pop off ')'
return L
elif ')' == token:
raise SyntaxError('unexpected )')
else:
return atom(token)
def atom(token):
"Numbers become numbers; every other token is a symbol."
try: return int(token)
except ValueError:
try: return float(token)
except ValueError:
return Symbol(token)
def to_string(exp):
"Convert a Python object back into a Lisp-readable string."
return '('+' '.join(map(to_string, exp))+')' if isa(exp, list) else str(exp)
def repl(prompt='lis.py> '):
"A prompt-read-eval-print loop."
while True:
val = eval(parse(raw_input(prompt)))
if val is not None: print to_string(val)
def run(exp, env):
stages = 0
for plugin in env.plugins:
stages += exp.count(plugin)
env.global_data['stages'] = stages
return eval(parse(exp), env=env)
class WaspLink(dict):
def __init__(self, module=None, link=None):
self['link'] = link
self['module'] = module
self['default_output'] = ''
self['data'] = None
self['info'] = {}
@property
def files(self):
""" Return default results of current link """
out = self['default_output']
if type(out) is list:
return [f for fset in out for f in fset.files]
return self['default_output'].files
@property
def output(self):
return self['default_output']
def insert_output(self, output, default_type, module_name):
""" Parses the output dict of a completed module and stores the
data and information within the WaspLink object """
filesets = []
for outtype, outvalue in output.items():
name = '{}_{}'.format(module_name, outtype)
if not type(outvalue) is list:
outvalue = [outvalue]
## Store default output
if default_type == outtype:
if isinstance(outvalue[0], asmtypes.FileSet):
for out in outvalue:
out['tags'].append(module_name)
self['default_output'] = outvalue
else: # Files
self['default_output'] = asmtypes.set_factory(outtype, [asmtypes.FileInfo(f) for f in outvalue],
name=name)
self['default_output']['tags'].append(module_name)
## Store all outputs and values
outputs = []
are_files = False
for out in outvalue:
try:
if os.path.exists(out): # These are files, convert to FileInfo format
outputs.append(asmtypes.FileInfo(out))
are_files = True
else:
raise Exception('Not a file')
except Exception as e: # Not a file
outputs = outvalue
break
if are_files:
filesets.append(asmtypes.set_factory(outtype, outputs, name=name))
else:
self['info'][outtype] = outputs if not len(outputs) == 1 else outputs[0]
self['data'] = asmtypes.FileSetContainer(filesets)
def get_value(self, key):
if key in self['info']:
return self['info'][key]
return self['data'].find_type(key)[0]
def traverse(self):
if self['link']:
print self['default_output']['name']
for i,wlink in enumerate(self['link']):
print 'link ', i
wlink.traverse()
def find_module(self, module):
""" Traverses the chain to find module """
if self['module'] == module:
return self
for wlink in self['link']:
return wlink.find_module(module)
class WaspEngine():
def __init__(self, plugin_manager, job_data, meta=None):
self.constants_reads = 'READS'
self.constants_contigs = 'CONTIGS'
self.pmanager = plugin_manager
self.assembly_env = add_globals(Env(job_data=job_data, meta=meta))
self.assembly_env.update({k:self.get_wasp_func(k, job_data) for k in self.pmanager.plugins})
self.assembly_env.plugins = self.pmanager.plugins
self.job_data = job_data
init_link = WaspLink()
if 'initial_data' not in job_data:
job_data['initial_data'] = asmtypes.FileSetContainer(job_data.wasp_data().referencesets +
job_data.wasp_data().readsets)
init_link['default_output'] = list(job_data['initial_data'].readsets)
self.assembly_env.update({self.constants_reads: init_link})
self.assembly_env.update({'arast_score': wf.arast_score,
'has_paired': wf.has_paired,
'n50': wf.n50})
def run_expression(self, exp, job_data=None):
if not job_data:
job_data = self.job_data
## Run Wasp expression
if type(exp) is str or type(exp) is unicode:
w_chain = run(exp, self.assembly_env)
## Record results into job_data
if type(w_chain) is not list: # Single
w_chain = [w_chain]
for w in self.assembly_env.emissions + w_chain:
try:
job_data.add_results(w['default_output'])
except: print 'Output', w
job_data['tracebacks'] = [str(e) for e in self.assembly_env.exceptions]
job_data['errors'] = [str(e) for e in self.assembly_env.errors]
return w_chain[0]
def get_wasp_func(self, module, job_data):
def run_module(*inlinks, **kwargs):
# WaspLinks keep track of the recursive pipelines
env = kwargs['env']
## Flatten inlinks if lists are present
links = []
for link in inlinks:
if type(link) is list:
links += link
else:
links.append(link)
wlink = WaspLink(module, links)
self.pmanager.run_proc(module, wlink, job_data, env.parameters)
return wlink
return run_module
###### Utility
def pipelines_to_exp(pipes, job_id):
"""
Convert pipeline mode into Wasp expression
"""
# Assume that these modules will use initial reads
add_reads = ['sspace', 'reapr', 'bwa', 'bowtie2']
all_pipes = []
for pipe in pipes:
exp = 'READS'
params = []
for m in pipe:
if m[0] == '?':
params.append(m[1:].split('='))
else:
if params:
setparams = ' '.join(['(setparam {} {})'.format(p[0], p[1]) for p in params])
exp = '(begin {} {})'.format(setparams, exp)
params = []
if m in add_reads:
exp = '({} {} READS)'.format(m, exp)
else:
exp = '({} {})'.format(m, exp)
#### Flush params
if params:
setparams = ' '.join(['(setparam {} {})'.format(p[0], p[1]) for p in params])
exp = '(begin {} {})'.format(setparams, exp)
params = []
#exp = '(upload {})'.format(exp)
#all_pipes.append(exp)
all_pipes.append(exp)
#### Check for duplicates and redefine
val_num = 0
replacements = []
defs = []
reversed_pairs = set()
lces = set()
for pipe1, pipe2 in itertools.permutations(all_pipes, 2):
reversed_pairs.add((pipe2, pipe1))
if not (pipe1, pipe2) in reversed_pairs:
for n in get_orphans(pipe1):
for m in get_orphans(pipe2):
if n == m and to_string(n) not in lces:
lce = to_string(n)
lces.add(lce)
replacements.append((lce.strip(), 'val{}'.format(val_num)))
defs.append('(define val{} {})'.format(val_num, lce.strip()))
val_num += 1
#### Replace defined expressions
for replacement in replacements:
for i, pipe in enumerate(all_pipes):
all_pipes[i] = pipe.replace(*replacement)
#### Form final expression
ranked_upload = '(upload (sort (list {}) > :key (lambda (c) (arast_score c))))'.format(' '.join(all_pipes))
final_exp = '(begin {} (tar (all_files (quast {})) :name {}_analysis :tag quast))'.format(' '.join(defs), ranked_upload, job_id)
return final_exp
def _has_sibling(exp_list, exp):
if not exp_list:
return False
siblings = 0
for e in exp_list:
if type(e) is list:
siblings += 1
return exp in exp_list and siblings > 1
def orphans(exp_list, parent=None):
for i,l in enumerate(exp_list):
if type(l) is list:
for e in orphans(l, parent=exp_list):
yield e
elif i == 0 and not _has_sibling(parent, exp_list):
yield exp_list
def get_orphans(pipe_string):
return [o for o in orphans(read_from(tokenize(pipe_string)))]
| {
"repo_name": "levinas/assembly",
"path": "lib/assembly/wasp.py",
"copies": "2",
"size": "19138",
"license": "mit",
"hash": -8035814847772044000,
"line_mean": 35.0414312618,
"line_max": 132,
"alpha_frac": 0.5064270039,
"autogenerated": false,
"ratio": 3.9161039492531207,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5422530953153121,
"avg_score": null,
"num_lines": null
} |
"""Adapted from Nematode: https://github.com/demelin/nematode """
import sys
import tensorflow as tf
from tensorflow.python.ops.init_ops import glorot_uniform_initializer
# ModuleNotFoundError is new in 3.6; older versions will throw SystemError
if sys.version_info < (3, 6):
ModuleNotFoundError = SystemError
try:
from .tf_utils import get_shape_list
from .transformer_layers import FeedForwardLayer, matmul_nd
except (ModuleNotFoundError, ImportError) as e:
from tf_utils import get_shape_list
from transformer_layers import FeedForwardLayer, matmul_nd
class MultiHeadAttentionLayer(object):
""" Defines the multi-head, multiplicative attention mechanism;
based on the tensor2tensor library implementation. """
def __init__(self,
reference_dims,
hypothesis_dims,
total_key_dims,
total_value_dims,
output_dims,
num_heads,
float_dtype,
dropout_attn,
drophead,
training,
name=None):
# Set attributes
self.reference_dims = reference_dims
self.hypothesis_dims = hypothesis_dims
self.total_key_dims = total_key_dims
self.total_value_dims = total_value_dims
self.output_dims = output_dims
self.num_heads = num_heads
self.float_dtype = float_dtype
self.training = training
self.name = name
# Check if the specified hyper-parameters are consistent
if total_key_dims % num_heads != 0:
raise ValueError('Specified total attention key dimensions {:d} must be divisible by the number of '
'attention heads {:d}'.format(total_key_dims, num_heads))
if total_value_dims % num_heads != 0:
raise ValueError('Specified total attention value dimensions {:d} must be divisible by the number of '
'attention heads {:d}'.format(total_value_dims, num_heads))
if dropout_attn > 0:
self.dropout_attn = tf.keras.layers.Dropout(rate=dropout_attn)
else:
self.dropout_attn = None
if drophead > 0:
self.drophead = tf.keras.layers.Dropout(rate=drophead, noise_shape=[None, None, 1, 1])
else:
self.drophead = None
# Instantiate parameters
with tf.compat.v1.variable_scope(self.name):
self.queries_projection = FeedForwardLayer(self.hypothesis_dims,
self.total_key_dims,
float_dtype,
dropout_rate=0.,
activation=None,
use_bias=False,
use_layer_norm=False,
training=self.training,
name='queries_projection')
self.keys_projection = FeedForwardLayer(self.reference_dims,
self.total_key_dims,
float_dtype,
dropout_rate=0.,
activation=None,
use_bias=False,
use_layer_norm=False,
training=self.training,
name='keys_projection')
self.values_projection = FeedForwardLayer(self.reference_dims,
self.total_value_dims,
float_dtype,
dropout_rate=0.,
activation=None,
use_bias=False,
use_layer_norm=False,
training=self.training,
name='values_projection')
self.context_projection = FeedForwardLayer(self.total_value_dims,
self.output_dims,
float_dtype,
dropout_rate=0.,
activation=None,
use_bias=False,
use_layer_norm=False,
training=self.training,
name='context_projection')
def _compute_attn_inputs(self, query_context, memory_context):
""" Computes query, key, and value tensors used by the attention function for the calculation of the
time-dependent context representation. """
queries = self.queries_projection.forward(query_context)
keys = self.keys_projection.forward(memory_context)
values = self.values_projection.forward(memory_context)
return queries, keys, values
def _split_among_heads(self, inputs):
""" Splits the attention inputs among multiple heads. """
# Retrieve the depth of the input tensor to be split (input is 3d)
inputs_dims = get_shape_list(inputs)
inputs_depth = inputs_dims[-1]
# Assert the depth is compatible with the specified number of attention heads
if isinstance(inputs_depth, int) and isinstance(self.num_heads, int):
assert inputs_depth % self.num_heads == 0, \
('Attention inputs depth {:d} is not evenly divisible by the specified number of attention heads {:d}'
.format(inputs_depth, self.num_heads))
split_inputs = tf.reshape(inputs, inputs_dims[:-1] + [self.num_heads, inputs_depth // self.num_heads])
return split_inputs
def _merge_from_heads(self, split_inputs):
""" Inverts the _split_among_heads operation. """
# Transpose split_inputs to perform the merge along the last two dimensions of the split input
split_inputs = tf.transpose(a=split_inputs, perm=[0, 2, 1, 3])
# Retrieve the depth of the tensor to be merged
split_inputs_dims = get_shape_list(split_inputs)
split_inputs_depth = split_inputs_dims[-1]
# Merge the depth and num_heads dimensions of split_inputs
merged_inputs = tf.reshape(split_inputs, split_inputs_dims[:-2] + [self.num_heads * split_inputs_depth])
return merged_inputs
def _dot_product_attn(self, queries, keys, values, attn_mask, scaling_on):
""" Defines the dot-product attention function; see Vasvani et al.(2017), Eq.(1). """
# query/ key/ value have shape = [batch_size, time_steps, num_heads, num_features]
# Tile keys and values tensors to match the number of decoding beams; ignored if already done by fusion module
num_beams = get_shape_list(queries)[0] // get_shape_list(keys)[0]
keys = tf.cond(pred=tf.greater(num_beams, 1), true_fn=lambda: tf.tile(keys, [num_beams, 1, 1, 1]), false_fn=lambda: keys)
values = tf.cond(pred=tf.greater(num_beams, 1), true_fn=lambda: tf.tile(values, [num_beams, 1, 1, 1]), false_fn=lambda: values)
# Transpose split inputs
queries = tf.transpose(a=queries, perm=[0, 2, 1, 3])
values = tf.transpose(a=values, perm=[0, 2, 1, 3])
attn_logits = tf.matmul(queries, tf.transpose(a=keys, perm=[0, 2, 3, 1]))
# Scale attention_logits by key dimensions to prevent softmax saturation, if specified
if scaling_on:
key_dims = get_shape_list(keys)[-1]
normalizer = tf.sqrt(tf.cast(key_dims, self.float_dtype))
attn_logits /= normalizer
# Optionally mask out positions which should not be attended to
# attention mask should have shape=[batch, num_heads, query_length, key_length]
# attn_logits has shape=[batch, num_heads, query_length, key_length]
if attn_mask is not None:
attn_mask = tf.cond(pred=tf.greater(num_beams, 1),
true_fn=lambda: tf.tile(attn_mask, [num_beams, 1, 1, 1]),
false_fn=lambda: attn_mask)
attn_logits += attn_mask
# Calculate attention weights
attn_weights = tf.nn.softmax(attn_logits)
# Optionally apply dropout:
if self.dropout_attn is not None:
attn_weights = self.dropout_attn(attn_weights, training=self.training)
# Optionally apply DropHead:
if self.drophead is not None:
attn_weights = self.drophead(attn_weights, training=self.training)
# Weigh attention values
weighted_memories = tf.matmul(attn_weights, values)
return weighted_memories
def forward(self, query_context, memory_context, attn_mask, layer_memories):
""" Propagates the input information through the attention layer. """
# The context for the query and the referenced memory is identical in case of self-attention
if memory_context is None:
memory_context = query_context
# Get attention inputs
queries, keys, values = self._compute_attn_inputs(query_context, memory_context)
# Recall and update memories (analogous to the RNN state) - decoder only
if layer_memories is not None:
keys = tf.concat([layer_memories['keys'], keys], axis=1)
values = tf.concat([layer_memories['values'], values], axis=1)
layer_memories['keys'] = keys
layer_memories['values'] = values
# Split attention inputs among attention heads
split_queries = self._split_among_heads(queries)
split_keys = self._split_among_heads(keys)
split_values = self._split_among_heads(values)
# Apply attention function
split_weighted_memories = self._dot_product_attn(split_queries, split_keys, split_values, attn_mask,
scaling_on=True)
# Merge head output
weighted_memories = self._merge_from_heads(split_weighted_memories)
# Feed through a dense layer
projected_memories = self.context_projection.forward(weighted_memories)
return projected_memories, layer_memories
class SingleHeadAttentionLayer(object):
""" Single-head attention module. """
def __init__(self,
reference_dims,
hypothesis_dims,
hidden_dims,
float_dtype,
dropout_attn,
training,
name,
attn_type='multiplicative'):
# Declare attributes
self.reference_dims = reference_dims
self.hypothesis_dims = hypothesis_dims
self.hidden_dims = hidden_dims
self.float_dtype = float_dtype
self.attn_type = attn_type
self.training = training
self.name = name
assert attn_type in ['additive', 'multiplicative'], 'Attention type {:s} is not supported.'.format(attn_type)
if dropout_attn > 0:
self.dropout_attn = tf.keras.layers.Dropout(rate=dropout_attn)
else:
self.dropout_attn = None
# Instantiate parameters
with tf.compat.v1.variable_scope(self.name):
self.queries_projection = None
self.attn_weight = None
if attn_type == 'additive':
self.queries_projection = FeedForwardLayer(self.hypothesis_dims,
self.hidden_dims,
float_dtype,
dropout_rate=0.,
activation=None,
use_bias=False,
use_layer_norm=False,
training=self.training,
name='queries_projection')
self.attn_weight = tf.compat.v1.get_variable(name='attention_weight',
shape=self.hidden_dims,
dtype=float_dtype,
initializer=glorot_uniform_initializer(),
trainable=True)
self.keys_projection = FeedForwardLayer(self.reference_dims,
self.hidden_dims,
float_dtype,
dropout_rate=0.,
activation=None,
use_bias=False,
use_layer_norm=False,
training=self.training,
name='keys_projection')
def _compute_attn_inputs(self, query_context, memory_context):
""" Computes query, key, and value tensors used by the attention function for the calculation of the
time-dependent context representation. """
queries = query_context
if self.attn_type == 'additive':
queries = self.queries_projection.forward(query_context)
keys = self.keys_projection.forward(memory_context)
values = memory_context
return queries, keys, values
def _additive_attn(self, queries, keys, values, attn_mask):
""" Uses additive attention to compute contextually enriched source-side representations. """
# Account for beam-search
num_beams = get_shape_list(queries)[0] // get_shape_list(keys)[0]
keys = tf.tile(keys, [num_beams, 1, 1])
values = tf.tile(values, [num_beams, 1, 1])
def _logits_fn(query):
""" Computes time-step-wise attention scores. """
query = tf.expand_dims(query, 1)
return tf.reduce_sum(input_tensor=self.attn_weight * tf.nn.tanh(keys + query), axis=-1)
# Obtain attention scores
transposed_queries = tf.transpose(a=queries, perm=[1, 0, 2]) # time-major
attn_logits = tf.map_fn(_logits_fn, transposed_queries)
attn_logits = tf.transpose(a=attn_logits, perm=[1, 0, 2])
if attn_mask is not None:
# Transpose and tile the mask
attn_logits += tf.tile(tf.squeeze(attn_mask, 1),
[get_shape_list(queries)[0] // get_shape_list(attn_mask)[0], 1, 1])
# Compute the attention weights
attn_weights = tf.nn.softmax(attn_logits, axis=-1, name='attn_weights')
# Optionally apply dropout
if self.dropout_attn is not None:
attn_weights = self.dropout_attn(attn_weights, training=self.training)
# Obtain context vectors
weighted_memories = tf.matmul(attn_weights, values)
return weighted_memories
def _multiplicative_attn(self, queries, keys, values, attn_mask):
""" Uses multiplicative attention to compute contextually enriched source-side representations. """
# Account for beam-search
num_beams = get_shape_list(queries)[0] // get_shape_list(keys)[0]
keys = tf.tile(keys, [num_beams, 1, 1])
values = tf.tile(values, [num_beams, 1, 1])
# Use multiplicative attention
transposed_keys = tf.transpose(a=keys, perm=[0, 2, 1])
attn_logits = tf.matmul(queries, transposed_keys)
if attn_mask is not None:
# Transpose and tile the mask
attn_logits += tf.tile(tf.squeeze(attn_mask, 1),
[get_shape_list(queries)[0] // get_shape_list(attn_mask)[0], 1, 1])
# Compute the attention weights
attn_weights = tf.nn.softmax(attn_logits, axis=-1, name='attn_weights')
# Optionally apply dropout
if self.dropout_attn is not None:
attn_weights = self.dropout_attn(attn_weights, training=self.training)
# Obtain context vectors
weighted_memories = tf.matmul(attn_weights, values)
return weighted_memories
def forward(self, query_context, memory_context, attn_mask, layer_memories):
""" Propagates the input information through the attention layer. """
# The context for the query and the referenced memory is identical in case of self-attention
if memory_context is None:
memory_context = query_context
# Get attention inputs
queries, keys, values = self._compute_attn_inputs(query_context, memory_context)
# Recall and update memories (analogous to the RNN state) - decoder only
if layer_memories is not None:
keys = tf.concat([layer_memories['keys'], keys], axis=1)
values = tf.concat([layer_memories['values'], values], axis=1)
layer_memories['keys'] = keys
layer_memories['values'] = values
# Obtain weighted layer hidden representations
if self.attn_type == 'additive':
weighted_memories = self._additive_attn(queries, keys, values, attn_mask)
else:
weighted_memories = self._multiplicative_attn(queries, keys, values, attn_mask)
return weighted_memories, layer_memories
class FineGrainedAttentionLayer(SingleHeadAttentionLayer):
""" Defines the fine-grained attention layer; based on
"Fine-grained attention mechanism for neural machine translation.", Choi et al, 2018. """
def __init__(self,
reference_dims,
hypothesis_dims,
hidden_dims,
float_dtype,
dropout_attn,
training,
name,
attn_type='multiplicative'):
super(FineGrainedAttentionLayer, self).__init__(reference_dims, hypothesis_dims, hidden_dims, float_dtype,
dropout_attn, training, name, attn_type)
def _additive_attn(self, queries, keys, values, attn_mask):
""" Uses additive attention to compute contextually enriched source-side representations. """
# Account for beam-search
num_beams = get_shape_list(queries)[0] // get_shape_list(keys)[0]
keys = tf.tile(keys, [num_beams, 1, 1])
values = tf.tile(values, [num_beams, 1, 1])
def _logits_fn(query):
""" Computes time-step-wise attention scores. """
query = tf.expand_dims(query, 1)
return self.attn_weight * tf.nn.tanh(keys + query)
# Obtain attention scores
transposed_queries = tf.transpose(a=queries, perm=[1, 0, 2]) # time-major
# attn_logits has shape=[time_steps_q, batch_size, time_steps_k, num_features]
attn_logits = tf.map_fn(_logits_fn, transposed_queries)
if attn_mask is not None:
transposed_mask = \
tf.transpose(a=tf.tile(attn_mask, [get_shape_list(queries)[0] // get_shape_list(attn_mask)[0], 1, 1, 1]),
perm=[2, 0, 3, 1])
attn_logits += transposed_mask
# Compute the attention weights
attn_weights = tf.nn.softmax(attn_logits, axis=-2, name='attn_weights')
# Optionally apply dropout
if self.dropout_attn is not None:
attn_weights = self.dropout_attn(attn_weights, training=self.training)
# Obtain context vectors
expanded_values = tf.expand_dims(values, axis=1)
weighted_memories = \
tf.reduce_sum(input_tensor=tf.multiply(tf.transpose(a=attn_weights, perm=[1, 0, 2, 3]), expanded_values), axis=2)
return weighted_memories
def _multiplicative_attn(self, queries, keys, values, attn_mask):
""" Uses multiplicative attention to compute contextually enriched source-side representations. """
# Account for beam-search
num_beams = get_shape_list(queries)[0] // get_shape_list(keys)[0]
keys = tf.tile(keys, [num_beams, 1, 1])
values = tf.tile(values, [num_beams, 1, 1])
def _logits_fn(query):
""" Computes time-step-wise attention scores. """
query = tf.expand_dims(query, 1)
return tf.multiply(keys, query)
# Obtain attention scores
transposed_queries = tf.transpose(a=queries, perm=[1, 0, 2]) # time-major
# attn_logits has shape=[time_steps_q, batch_size, time_steps_k, num_features]
attn_logits = tf.map_fn(_logits_fn, transposed_queries)
if attn_mask is not None:
transposed_mask = \
tf.transpose(a=tf.tile(attn_mask, [get_shape_list(queries)[0] // get_shape_list(attn_mask)[0], 1, 1, 1]),
perm=[2, 0, 3, 1])
attn_logits += transposed_mask
# Compute the attention weights
attn_weights = tf.nn.softmax(attn_logits, axis=-2, name='attn_weights')
# Optionally apply dropout
if self.dropout_attn is not None:
attn_weights = self.dropout_attn(attn_weights, training=self.training)
# Obtain context vectors
expanded_values = tf.expand_dims(values, axis=1)
weighted_memories = \
tf.reduce_sum(input_tensor=tf.multiply(tf.transpose(a=attn_weights, perm=[1, 0, 2, 3]), expanded_values), axis=2)
return weighted_memories
def _attn(self, queries, keys, values, attn_mask):
""" For each encoder layer, weighs and combines time-step-wise hidden representation into a single layer
context state. -- DEPRECATED, SINCE IT'S SLOW AND PROBABLY NOT ENTIRELY CORRECT """
# Account for beam-search
num_beams = tf.shape(input=queries)[0] // tf.shape(input=keys)[0]
keys = tf.tile(keys, [num_beams, 1, 1])
values = tf.tile(values, [num_beams, 1, 1])
def _logits_fn(query):
""" Computes position-wise attention scores. """
query = tf.expand_dims(query, 1)
# return tf.squeeze(self.attn_weight * (tf.nn.tanh(keys + query + norm_bias)), axis=2)
return self.attn_weight * tf.nn.tanh(keys + query) # 4D output
def _weighting_fn(step_weights):
""" Computes position-wise context vectors. """
# step_weights = tf.expand_dims(step_weights, 2)
return tf.reduce_sum(input_tensor=tf.multiply(step_weights, values), axis=1)
# Obtain attention scores
transposed_queries = tf.transpose(a=queries, perm=[1, 0, 2])
attn_logits = tf.map_fn(_logits_fn, transposed_queries) # multiple queries per step are possible
if attn_mask is not None:
# attn_logits has shape=[batch, query_lengh, key_length, attn_features]
transposed_mask = \
tf.transpose(a=tf.tile(attn_mask, [tf.shape(input=queries)[0] // tf.shape(input=attn_mask)[0], 1, 1, 1]),
perm=[2, 0, 3, 1])
attn_logits += transposed_mask
# Compute the attention weights
attn_weights = tf.nn.softmax(attn_logits, axis=-2, name='attn_weights')
# Optionally apply dropout
if self.dropout_attn is not None:
attn_weights = self.dropout_attn(attn_weights, training=self.training)
# Obtain context vectors
weighted_memories = tf.map_fn(_weighting_fn, attn_weights)
weighted_memories = tf.transpose(a=weighted_memories, perm=[1, 0, 2])
return weighted_memories
| {
"repo_name": "EdinburghNLP/nematus",
"path": "nematus/transformer_attention_modules.py",
"copies": "1",
"size": "24243",
"license": "bsd-3-clause",
"hash": -4614031530207206000,
"line_mean": 49.2966804979,
"line_max": 135,
"alpha_frac": 0.5527368725,
"autogenerated": false,
"ratio": 4.252411857568847,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0017506754108377112,
"num_lines": 482
} |
"""Adapted from Nematode: https://github.com/demelin/nematode """
import sys
import tensorflow as tf
import numpy
# ModuleNotFoundError is new in 3.6; older versions will throw SystemError
if sys.version_info < (3, 6):
ModuleNotFoundError = SystemError
try:
from . import model_inputs
from . import mrt_utils as mru
from .sampling_utils import SamplingUtils
from . import tf_utils
from .transformer_blocks import AttentionBlock, FFNBlock
from .transformer_layers import \
EmbeddingLayer, \
MaskedCrossEntropy, \
get_right_context_mask, \
get_positional_signal
except (ModuleNotFoundError, ImportError) as e:
import model_inputs
import mrt_utils as mru
from sampling_utils import SamplingUtils
import tf_utils
from transformer_blocks import AttentionBlock, FFNBlock
from transformer_layers import \
EmbeddingLayer, \
MaskedCrossEntropy, \
get_right_context_mask, \
get_positional_signal
INT_DTYPE = tf.int32
FLOAT_DTYPE = tf.float32
class Transformer(object):
""" The main transformer model class. """
def __init__(self, config):
# Set attributes
self.config = config
self.source_vocab_size = config.source_vocab_sizes[0]
self.target_vocab_size = config.target_vocab_size
self.name = 'transformer'
# Placeholders
self.inputs = model_inputs.ModelInputs(config)
# Convert from time-major to batch-major, handle factors
self.source_ids, \
self.source_mask, \
self.target_ids_in, \
self.target_ids_out, \
self.target_mask = self._convert_inputs(self.inputs)
self.training = self.inputs.training
self.scores = self.inputs.scores
self.index = self.inputs.index
# Build the common parts of the graph.
with tf.compat.v1.name_scope('{:s}_loss'.format(self.name)):
# (Re-)generate the computational graph
self.dec_vocab_size = self._build_graph()
# Build the training-specific parts of the graph.
with tf.compat.v1.name_scope('{:s}_loss'.format(self.name)):
# Encode source sequences
with tf.compat.v1.name_scope('{:s}_encode'.format(self.name)):
enc_output, cross_attn_mask = self.enc.encode(
self.source_ids, self.source_mask)
# Decode into target sequences
with tf.compat.v1.name_scope('{:s}_decode'.format(self.name)):
logits = self.dec.decode_at_train(self.target_ids_in,
enc_output,
cross_attn_mask)
# Instantiate loss layer(s)
loss_layer = MaskedCrossEntropy(self.dec_vocab_size,
self.config.label_smoothing,
INT_DTYPE,
FLOAT_DTYPE,
time_major=False,
name='loss_layer')
# Calculate loss
masked_loss, sentence_loss, batch_loss = \
loss_layer.forward(logits, self.target_ids_out, self.target_mask, self.training)
if self.config.print_per_token_pro:
# e**(-(-log(probability))) = probability
self._print_pro = tf.math.exp(-masked_loss)
sent_lens = tf.reduce_sum(input_tensor=self.target_mask, axis=1, keepdims=False)
self._loss_per_sentence = sentence_loss * sent_lens
self._loss = tf.reduce_mean(input_tensor=self._loss_per_sentence, keepdims=False)
# calculate expected risk
if self.config.loss_function == 'MRT':
# self._loss_per_sentence is negative log probability of the output sentence, each element represents
# the loss of each sample pair.
self._risk = mru.mrt_cost(self._loss_per_sentence, self.scores, self.index, self.config)
self.sampling_utils = SamplingUtils(config)
def _build_graph(self):
""" Defines the model graph. """
with tf.compat.v1.variable_scope('{:s}_model'.format(self.name)):
# Instantiate embedding layer(s)
if not self.config.tie_encoder_decoder_embeddings:
enc_vocab_size = self.source_vocab_size
dec_vocab_size = self.target_vocab_size
else:
assert self.source_vocab_size == self.target_vocab_size, \
'Input and output vocabularies should be identical when tying embedding tables.'
enc_vocab_size = dec_vocab_size = self.source_vocab_size
encoder_embedding_layer = EmbeddingLayer(enc_vocab_size,
self.config.embedding_size,
self.config.state_size,
FLOAT_DTYPE,
name='encoder_embedding_layer')
if not self.config.tie_encoder_decoder_embeddings:
decoder_embedding_layer = EmbeddingLayer(dec_vocab_size,
self.config.embedding_size,
self.config.state_size,
FLOAT_DTYPE,
name='decoder_embedding_layer')
else:
decoder_embedding_layer = encoder_embedding_layer
if not self.config.tie_decoder_embeddings:
softmax_projection_layer = EmbeddingLayer(dec_vocab_size,
self.config.embedding_size,
self.config.state_size,
FLOAT_DTYPE,
name='softmax_projection_layer')
else:
softmax_projection_layer = decoder_embedding_layer
# Instantiate the component networks
self.enc = TransformerEncoder(self.config,
encoder_embedding_layer,
self.training,
'encoder')
self.dec = TransformerDecoder(self.config,
decoder_embedding_layer,
softmax_projection_layer,
self.training,
'decoder')
return dec_vocab_size
@property
def loss_per_sentence(self):
return self._loss_per_sentence
@property
def loss(self):
return self._loss
@property
def risk(self):
return self._risk
@property
def print_pro(self):
return self._print_pro
def _convert_inputs(self, inputs):
# Convert from time-major to batch-major. Note that we take factor 0
# from x and ignore any other factors.
source_ids = tf.transpose(a=inputs.x[0], perm=[1,0])
source_mask = tf.transpose(a=inputs.x_mask, perm=[1,0])
target_ids_out = tf.transpose(a=inputs.y, perm=[1,0])
target_mask = tf.transpose(a=inputs.y_mask, perm=[1,0])
# target_ids_in is a bit more complicated since we need to insert
# the special <GO> symbol (with value 1) at the start of each sentence
max_len, batch_size = tf.shape(input=inputs.y)[0], tf.shape(input=inputs.y)[1]
go_symbols = tf.fill(value=1, dims=[1, batch_size])
tmp = tf.concat([go_symbols, inputs.y], 0)
tmp = tmp[:-1, :]
target_ids_in = tf.transpose(a=tmp, perm=[1,0])
return (source_ids, source_mask, target_ids_in, target_ids_out,
target_mask)
class TransformerEncoder(object):
""" The encoder module used within the transformer model. """
def __init__(self,
config,
embedding_layer,
training,
name):
# Set attributes
self.config = config
self.embedding_layer = embedding_layer
self.training = training
self.name = name
# Track layers
self.encoder_stack = dict()
self.is_final_layer = False
# Create nodes
self._build_graph()
def _embed(self, index_sequence):
""" Embeds source-side indices to obtain the corresponding dense tensor representations. """
# Embed input tokens
return self.embedding_layer.embed(index_sequence)
def _build_graph(self):
""" Defines the model graph. """
# Initialize layers
with tf.compat.v1.variable_scope(self.name):
if self.config.transformer_dropout_embeddings > 0:
self.dropout_embedding = tf.keras.layers.Dropout(rate=self.config.transformer_dropout_embeddings)
else:
self.dropout_embedding = None
for layer_id in range(1, self.config.transformer_enc_depth + 1):
layer_name = 'layer_{:d}'.format(layer_id)
# Check if constructed layer is final
if layer_id == self.config.transformer_enc_depth:
self.is_final_layer = True
# Specify ffn dimensions sequence
ffn_dims = [self.config.transformer_ffn_hidden_size, self.config.state_size]
with tf.compat.v1.variable_scope(layer_name):
# Build layer blocks (see layers.py)
self_attn_block = AttentionBlock(self.config,
FLOAT_DTYPE,
self_attention=True,
training=self.training)
ffn_block = FFNBlock(self.config,
ffn_dims,
FLOAT_DTYPE,
is_final=self.is_final_layer,
training=self.training)
# Maintain layer-wise dict entries for easier data-passing (may change later)
self.encoder_stack[layer_id] = dict()
self.encoder_stack[layer_id]['self_attn'] = self_attn_block
self.encoder_stack[layer_id]['ffn'] = ffn_block
def encode(self, source_ids, source_mask):
""" Encodes source-side input tokens into meaningful, contextually-enriched representations. """
def _prepare_source():
""" Pre-processes inputs to the encoder and generates the corresponding attention masks."""
# Embed
source_embeddings = self._embed(source_ids)
# Obtain length and depth of the input tensors
_, time_steps, depth = tf_utils.get_shape_list(source_embeddings)
# Transform input mask into attention mask
inverse_mask = tf.cast(tf.equal(source_mask, 0.0), dtype=FLOAT_DTYPE)
attn_mask = inverse_mask * -1e9
# Expansion to shape [batch_size, 1, 1, time_steps] is needed for compatibility with attention logits
attn_mask = tf.expand_dims(tf.expand_dims(attn_mask, 1), 1)
# Differentiate between self-attention and cross-attention masks for further, optional modifications
self_attn_mask = attn_mask
cross_attn_mask = attn_mask
# Add positional encodings
positional_signal = get_positional_signal(time_steps, depth, FLOAT_DTYPE)
source_embeddings += positional_signal
# Apply dropout
if self.dropout_embedding is not None:
source_embeddings = self.dropout_embedding(source_embeddings, training=self.training)
return source_embeddings, self_attn_mask, cross_attn_mask
with tf.compat.v1.variable_scope(self.name):
# Prepare inputs to the encoder, get attention masks
enc_inputs, self_attn_mask, cross_attn_mask = _prepare_source()
# Propagate inputs through the encoder stack
enc_output = enc_inputs
for layer_id in range(1, self.config.transformer_enc_depth + 1):
enc_output, _ = self.encoder_stack[layer_id]['self_attn'].forward(enc_output, None, self_attn_mask)
enc_output = self.encoder_stack[layer_id]['ffn'].forward(enc_output)
return enc_output, cross_attn_mask
class TransformerDecoder(object):
""" The decoder module used within the transformer model. """
def __init__(self,
config,
embedding_layer,
softmax_projection_layer,
training,
name,
from_rnn=False):
# Set attributes
self.config = config
self.embedding_layer = embedding_layer
self.softmax_projection_layer = softmax_projection_layer
self.training = training
self.name = name
self.from_rnn = from_rnn
# If the decoder is used in a hybrid system, adjust parameters accordingly
self.time_dim = 0 if from_rnn else 1
# Track layers
self.decoder_stack = dict()
self.is_final_layer = False
# Create nodes
self._build_graph()
def _embed(self, index_sequence):
""" Embeds target-side indices to obtain the corresponding dense tensor representations. """
return self.embedding_layer.embed(index_sequence)
def _build_graph(self):
""" Defines the model graph. """
# Initialize layers
with tf.compat.v1.variable_scope(self.name):
if self.config.transformer_dropout_embeddings > 0:
self.dropout_embedding = tf.keras.layers.Dropout(rate=self.config.transformer_dropout_embeddings)
else:
self.dropout_embedding = None
for layer_id in range(1, self.config.transformer_dec_depth + 1):
layer_name = 'layer_{:d}'.format(layer_id)
# Check if constructed layer is final
if layer_id == self.config.transformer_dec_depth:
self.is_final_layer = True
# Specify ffn dimensions sequence
ffn_dims = [self.config.transformer_ffn_hidden_size, self.config.state_size]
with tf.compat.v1.variable_scope(layer_name):
# Build layer blocks (see layers.py)
self_attn_block = AttentionBlock(self.config,
FLOAT_DTYPE,
self_attention=True,
training=self.training)
cross_attn_block = AttentionBlock(self.config,
FLOAT_DTYPE,
self_attention=False,
training=self.training,
from_rnn=self.from_rnn)
ffn_block = FFNBlock(self.config,
ffn_dims,
FLOAT_DTYPE,
is_final=self.is_final_layer,
training=self.training)
# Maintain layer-wise dict entries for easier data-passing (may change later)
self.decoder_stack[layer_id] = dict()
self.decoder_stack[layer_id]['self_attn'] = self_attn_block
self.decoder_stack[layer_id]['cross_attn'] = cross_attn_block
self.decoder_stack[layer_id]['ffn'] = ffn_block
def decode_at_train(self, target_ids, enc_output, cross_attn_mask):
""" Returns the probability distribution over target-side tokens conditioned on the output of the encoder;
performs decoding in parallel at training time. """
def _decode_all(target_embeddings):
""" Decodes the encoder-generated representations into target-side logits in parallel. """
# Propagate inputs through the encoder stack
dec_output = target_embeddings
for layer_id in range(1, self.config.transformer_dec_depth + 1):
dec_output, _ = self.decoder_stack[layer_id]['self_attn'].forward(dec_output, None, self_attn_mask)
dec_output, _ = \
self.decoder_stack[layer_id]['cross_attn'].forward(dec_output, enc_output, cross_attn_mask)
dec_output = self.decoder_stack[layer_id]['ffn'].forward(dec_output)
return dec_output
def _prepare_targets():
""" Pre-processes target token ids before they're passed on as input to the decoder
for parallel decoding. """
# Embed target_ids
target_embeddings = self._embed(target_ids)
target_embeddings += positional_signal
if self.dropout_embedding is not None:
target_embeddings = self.dropout_embedding(target_embeddings, training=self.training)
return target_embeddings
def _decoding_function():
""" Generates logits for target-side tokens. """
# Embed the model's predictions up to the current time-step; add positional information, mask
target_embeddings = _prepare_targets()
# Pass encoder context and decoder embeddings through the decoder
dec_output = _decode_all(target_embeddings)
# Project decoder stack outputs and apply the soft-max non-linearity
full_logits = self.softmax_projection_layer.project(dec_output)
return full_logits
with tf.compat.v1.variable_scope(self.name):
# Transpose encoder information in hybrid models
if self.from_rnn:
enc_output = tf.transpose(a=enc_output, perm=[1, 0, 2])
cross_attn_mask = tf.transpose(a=cross_attn_mask, perm=[3, 1, 2, 0])
self_attn_mask = get_right_context_mask(tf.shape(input=target_ids)[-1])
positional_signal = get_positional_signal(tf.shape(input=target_ids)[-1],
self.config.embedding_size,
FLOAT_DTYPE)
logits = _decoding_function()
return logits
| {
"repo_name": "EdinburghNLP/nematus",
"path": "nematus/transformer.py",
"copies": "1",
"size": "18824",
"license": "bsd-3-clause",
"hash": 8521944491262957000,
"line_mean": 45.7096774194,
"line_max": 117,
"alpha_frac": 0.5450488738,
"autogenerated": false,
"ratio": 4.5711510441962115,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5616199917996212,
"avg_score": null,
"num_lines": null
} |
"""Adapted from Nematode: https://github.com/demelin/nematode """
import sys
import tensorflow as tf
# ModuleNotFoundError is new in 3.6; older versions will throw SystemError
if sys.version_info < (3, 6):
ModuleNotFoundError = SystemError
try:
from . import tf_utils
from .transformer import INT_DTYPE, FLOAT_DTYPE
from .transformer_layers import get_positional_signal
except (ModuleNotFoundError, ImportError) as e:
import tf_utils
from transformer import INT_DTYPE, FLOAT_DTYPE
from transformer_layers import get_positional_signal
class EncoderOutput:
def __init__(self, enc_output, cross_attn_mask):
self.enc_output = enc_output
self.cross_attn_mask = cross_attn_mask
class ModelAdapter:
"""Implements model-specific functionality needed by the *Sampler classes.
The BeamSearchSampler and RandomSampler classes need to work with RNN and
Transformer models, which have different interfaces (and obviously
different architectures). This class hides the Transformer-specific details
behind a common interace (see rnn_inference.ModelAdapter for the RNN
counterpart).
"""
def __init__(self, model, config, scope):
self._model = model
self._config = config
self._scope = scope
@property
def model(self):
return self._model
@property
def config(self):
return self._config
@property
def target_vocab_size(self):
return self._model.dec.embedding_layer.get_vocab_size()
@property
def batch_size(self):
return tf.shape(input=self._model.inputs.x)[-1]
def encode(self):
with tf.compat.v1.name_scope(self._scope):
enc_output, cross_attn_mask = self._model.enc.encode(
self._model.source_ids, self._model.source_mask)
return EncoderOutput(enc_output, cross_attn_mask)
def generate_decoding_function(self, encoder_output):
with tf.compat.v1.name_scope(self._scope):
# Generate a positional signal for the longest possible output.
positional_signal = get_positional_signal(
self._config.translation_maxlen,
self._config.embedding_size,
FLOAT_DTYPE)
decoder = self._model.dec
if self.config.transformer_dropout_embeddings > 0:
dropout = tf.keras.layers.Dropout(rate=self.config.transformer_dropout_embeddings)
else:
dropout = None
def _decoding_function(step_target_ids, current_time_step, memories):
"""Single-step decoding function.
Args:
step_target_ids: Tensor with shape (batch_size)
current_time_step: scalar Tensor.
memories: dictionary (see top-level class description)
Returns:
"""
with tf.compat.v1.name_scope(self._scope):
# TODO Is this necessary?
vocab_ids = tf.reshape(step_target_ids, [-1, 1])
# Look up embeddings for target IDs.
target_embeddings = decoder._embed(vocab_ids)
# Add positional signal.
signal_slice = positional_signal[
:, current_time_step-1:current_time_step, :]
target_embeddings += signal_slice
# Optionally, apply dropout to embeddings.
if dropout is not None:
target_embeddings = dropout(
target_embeddings,
training=decoder.training)
# Propagate values through the decoder stack.
# NOTE: No self-attention mask is applied at decoding, as
# future information is unavailable.
layer_output = target_embeddings
for layer_id in range(1, self.config.transformer_dec_depth+1):
layer = decoder.decoder_stack[layer_id]
mem_key = 'layer_{:d}'.format(layer_id)
layer_output, memories[mem_key] = \
layer['self_attn'].forward(
layer_output, None, None, memories[mem_key])
layer_output, _ = layer['cross_attn'].forward(
layer_output, encoder_output.enc_output,
encoder_output.cross_attn_mask)
layer_output = layer['ffn'].forward(layer_output)
# Return prediction at the final time-step to be consistent
# with the inference pipeline.
dec_output = layer_output[:, -1, :]
# Project decoder stack outputs and apply the soft-max
# non-linearity.
step_logits = \
decoder.softmax_projection_layer.project(dec_output)
return step_logits, memories
return _decoding_function
def generate_initial_memories(self, batch_size, beam_size):
with tf.compat.v1.name_scope(self._scope):
state_size = self.config.state_size
memories = {}
for layer_id in range(1, self.config.transformer_dec_depth + 1):
memories['layer_{:d}'.format(layer_id)] = { \
'keys': tf.tile(tf.zeros([batch_size, 0, state_size]),
[beam_size, 1, 1]),
'values': tf.tile(tf.zeros([batch_size, 0, state_size]),
[beam_size, 1, 1])
}
return memories
def get_memory_invariants(self, memories):
"""Generate shape invariants for memories.
Args:
memories: dictionary (see top-level class description)
Returns:
Dictionary of shape invariants with same structure as memories.
"""
with tf.compat.v1.name_scope(self._scope):
invariants = dict()
for layer_id in memories.keys():
layer_mems = memories[layer_id]
invariants[layer_id] = {
key: tf.TensorShape(
[None]*len(tf_utils.get_shape_list(layer_mems[key])))
for key in layer_mems.keys()
}
return invariants
def gather_memories(self, memories, gather_coordinates):
""" Gathers layer-wise memory tensors for selected beam entries.
Args:
memories: dictionary (see top-level class description)
gather_coordinates: Tensor with shape [batch_size_x, beam_size, 2]
Returns:
Dictionary containing gathered memories.
"""
with tf.compat.v1.name_scope(self._scope):
shapes = { gather_coordinates: ('batch_size_x', 'beam_size', 2) }
tf_utils.assert_shapes(shapes)
coords_shape = tf.shape(input=gather_coordinates)
batch_size_x, beam_size = coords_shape[0], coords_shape[1]
def gather_attn(attn):
# TODO Specify second and third?
shapes = { attn: ('batch_size', None, None) }
tf_utils.assert_shapes(shapes)
attn_dims = tf_utils.get_shape_list(attn)
new_shape = [beam_size, batch_size_x] + attn_dims[1:]
tmp = tf.reshape(attn, new_shape)
flat_tensor = tf.transpose(a=tmp, perm=[1, 0, 2, 3])
tmp = tf.gather_nd(flat_tensor, gather_coordinates)
tmp = tf.transpose(a=tmp, perm=[1, 0, 2, 3])
gathered_values = tf.reshape(tmp, attn_dims)
return gathered_values
gathered_memories = dict()
for layer_key in memories.keys():
layer_dict = memories[layer_key]
gathered_memories[layer_key] = dict()
for attn_key in layer_dict.keys():
attn_tensor = layer_dict[attn_key]
gathered_memories[layer_key][attn_key] = \
gather_attn(attn_tensor)
return gathered_memories
| {
"repo_name": "EdinburghNLP/nematus",
"path": "nematus/transformer_inference.py",
"copies": "1",
"size": "8088",
"license": "bsd-3-clause",
"hash": -5883049761063383000,
"line_mean": 39.0396039604,
"line_max": 94,
"alpha_frac": 0.5675074184,
"autogenerated": false,
"ratio": 4.265822784810126,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5333330203210126,
"avg_score": null,
"num_lines": null
} |
"""Adapted from Nematode: https://github.com/demelin/nematode """
import tensorflow as tf
from tensorflow.python.ops.init_ops import glorot_uniform_initializer
from transformer_layers import \
get_shape_list, \
FeedForwardLayer, \
matmul_nd
class MultiHeadAttentionLayer(object):
""" Defines the multi-head, multiplicative attention mechanism;
based on the tensor2tensor library implementation. """
def __init__(self,
reference_dims,
hypothesis_dims,
total_key_dims,
total_value_dims,
output_dims,
num_heads,
float_dtype,
dropout_attn,
training,
name=None):
# Set attributes
self.reference_dims = reference_dims
self.hypothesis_dims = hypothesis_dims
self.total_key_dims = total_key_dims
self.total_value_dims = total_value_dims
self.output_dims = output_dims
self.num_heads = num_heads
self.float_dtype = float_dtype
self.dropout_attn = dropout_attn
self.training = training
self.name = name
# Check if the specified hyper-parameters are consistent
if total_key_dims % num_heads != 0:
raise ValueError('Specified total attention key dimensions {:d} must be divisible by the number of '
'attention heads {:d}'.format(total_key_dims, num_heads))
if total_value_dims % num_heads != 0:
raise ValueError('Specified total attention value dimensions {:d} must be divisible by the number of '
'attention heads {:d}'.format(total_value_dims, num_heads))
# Instantiate parameters
with tf.variable_scope(self.name):
self.queries_projection = FeedForwardLayer(self.hypothesis_dims,
self.total_key_dims,
float_dtype,
dropout_rate=0.,
activation=None,
use_bias=False,
use_layer_norm=False,
training=self.training,
name='queries_projection')
self.keys_projection = FeedForwardLayer(self.reference_dims,
self.total_key_dims,
float_dtype,
dropout_rate=0.,
activation=None,
use_bias=False,
use_layer_norm=False,
training=self.training,
name='keys_projection')
self.values_projection = FeedForwardLayer(self.reference_dims,
self.total_value_dims,
float_dtype,
dropout_rate=0.,
activation=None,
use_bias=False,
use_layer_norm=False,
training=self.training,
name='values_projection')
self.context_projection = FeedForwardLayer(self.total_value_dims,
self.output_dims,
float_dtype,
dropout_rate=0.,
activation=None,
use_bias=False,
use_layer_norm=False,
training=self.training,
name='context_projection')
def _compute_attn_inputs(self, query_context, memory_context):
""" Computes query, key, and value tensors used by the attention function for the calculation of the
time-dependent context representation. """
queries = self.queries_projection.forward(query_context)
keys = self.keys_projection.forward(memory_context)
values = self.values_projection.forward(memory_context)
return queries, keys, values
def _split_among_heads(self, inputs):
""" Splits the attention inputs among multiple heads. """
# Retrieve the depth of the input tensor to be split (input is 3d)
inputs_dims = get_shape_list(inputs)
inputs_depth = inputs_dims[-1]
# Assert the depth is compatible with the specified number of attention heads
if isinstance(inputs_depth, int) and isinstance(self.num_heads, int):
assert inputs_depth % self.num_heads == 0, \
('Attention inputs depth {:d} is not evenly divisible by the specified number of attention heads {:d}'
.format(inputs_depth, self.num_heads))
split_inputs = tf.reshape(inputs, inputs_dims[:-1] + [self.num_heads, inputs_depth // self.num_heads])
return split_inputs
def _merge_from_heads(self, split_inputs):
""" Inverts the _split_among_heads operation. """
# Transpose split_inputs to perform the merge along the last two dimensions of the split input
split_inputs = tf.transpose(split_inputs, [0, 2, 1, 3])
# Retrieve the depth of the tensor to be merged
split_inputs_dims = get_shape_list(split_inputs)
split_inputs_depth = split_inputs_dims[-1]
# Merge the depth and num_heads dimensions of split_inputs
merged_inputs = tf.reshape(split_inputs, split_inputs_dims[:-2] + [self.num_heads * split_inputs_depth])
return merged_inputs
def _dot_product_attn(self, queries, keys, values, attn_mask, scaling_on):
""" Defines the dot-product attention function; see Vasvani et al.(2017), Eq.(1). """
# query/ key/ value have shape = [batch_size, time_steps, num_heads, num_features]
# Tile keys and values tensors to match the number of decoding beams; ignored if already done by fusion module
num_beams = get_shape_list(queries)[0] // get_shape_list(keys)[0]
keys = tf.cond(tf.greater(num_beams, 1), lambda: tf.tile(keys, [num_beams, 1, 1, 1]), lambda: keys)
values = tf.cond(tf.greater(num_beams, 1), lambda: tf.tile(values, [num_beams, 1, 1, 1]), lambda: values)
# Transpose split inputs
queries = tf.transpose(queries, [0, 2, 1, 3])
values = tf.transpose(values, [0, 2, 1, 3])
attn_logits = tf.matmul(queries, tf.transpose(keys, [0, 2, 3, 1]))
# Scale attention_logits by key dimensions to prevent softmax saturation, if specified
if scaling_on:
key_dims = get_shape_list(keys)[-1]
normalizer = tf.sqrt(tf.cast(key_dims, self.float_dtype))
attn_logits /= normalizer
# Optionally mask out positions which should not be attended to
# attention mask should have shape=[batch, num_heads, query_length, key_length]
# attn_logits has shape=[batch, num_heads, query_length, key_length]
if attn_mask is not None:
attn_mask = tf.cond(tf.greater(num_beams, 1),
lambda: tf.tile(attn_mask, [num_beams, 1, 1, 1]),
lambda: attn_mask)
attn_logits += attn_mask
# Calculate attention weights
attn_weights = tf.nn.softmax(attn_logits)
# Optionally apply dropout:
if self.dropout_attn > 0.0:
attn_weights = tf.layers.dropout(attn_weights, rate=self.dropout_attn, training=self.training)
# Weigh attention values
weighted_memories = tf.matmul(attn_weights, values)
return weighted_memories
def forward(self, query_context, memory_context, attn_mask, layer_memories):
""" Propagates the input information through the attention layer. """
# The context for the query and the referenced memory is identical in case of self-attention
if memory_context is None:
memory_context = query_context
# Get attention inputs
queries, keys, values = self._compute_attn_inputs(query_context, memory_context)
# Recall and update memories (analogous to the RNN state) - decoder only
if layer_memories is not None:
keys = tf.concat([layer_memories['keys'], keys], axis=1)
values = tf.concat([layer_memories['values'], values], axis=1)
layer_memories['keys'] = keys
layer_memories['values'] = values
# Split attention inputs among attention heads
split_queries = self._split_among_heads(queries)
split_keys = self._split_among_heads(keys)
split_values = self._split_among_heads(values)
# Apply attention function
split_weighted_memories = self._dot_product_attn(split_queries, split_keys, split_values, attn_mask,
scaling_on=True)
# Merge head output
weighted_memories = self._merge_from_heads(split_weighted_memories)
# Feed through a dense layer
projected_memories = self.context_projection.forward(weighted_memories)
return projected_memories, layer_memories
class SingleHeadAttentionLayer(object):
""" Single-head attention module. """
def __init__(self,
reference_dims,
hypothesis_dims,
hidden_dims,
float_dtype,
dropout_attn,
training,
name,
attn_type='multiplicative'):
# Declare attributes
self.reference_dims = reference_dims
self.hypothesis_dims = hypothesis_dims
self.hidden_dims = hidden_dims
self.float_dtype = float_dtype
self.dropout_attn = dropout_attn
self.attn_type = attn_type
self.training = training
self.name = name
assert attn_type in ['additive', 'multiplicative'], 'Attention type {:s} is not supported.'.format(attn_type)
# Instantiate parameters
with tf.variable_scope(self.name):
self.queries_projection = None
self.attn_weight = None
if attn_type == 'additive':
self.queries_projection = FeedForwardLayer(self.hypothesis_dims,
self.hidden_dims,
float_dtype,
dropout_rate=0.,
activation=None,
use_bias=False,
use_layer_norm=False,
training=self.training,
name='queries_projection')
self.attn_weight = tf.get_variable(name='attention_weight',
shape=self.hidden_dims,
dtype=float_dtype,
initializer=glorot_uniform_initializer(),
trainable=True)
self.keys_projection = FeedForwardLayer(self.reference_dims,
self.hidden_dims,
float_dtype,
dropout_rate=0.,
activation=None,
use_bias=False,
use_layer_norm=False,
training=self.training,
name='keys_projection')
def _compute_attn_inputs(self, query_context, memory_context):
""" Computes query, key, and value tensors used by the attention function for the calculation of the
time-dependent context representation. """
queries = query_context
if self.attn_type == 'additive':
queries = self.queries_projection.forward(query_context)
keys = self.keys_projection.forward(memory_context)
values = memory_context
return queries, keys, values
def _additive_attn(self, queries, keys, values, attn_mask):
""" Uses additive attention to compute contextually enriched source-side representations. """
# Account for beam-search
num_beams = get_shape_list(queries)[0] // get_shape_list(keys)[0]
keys = tf.tile(keys, [num_beams, 1, 1])
values = tf.tile(values, [num_beams, 1, 1])
def _logits_fn(query):
""" Computes time-step-wise attention scores. """
query = tf.expand_dims(query, 1)
return tf.reduce_sum(self.attn_weight * tf.nn.tanh(keys + query), axis=-1)
# Obtain attention scores
transposed_queries = tf.transpose(queries, [1, 0, 2]) # time-major
attn_logits = tf.map_fn(_logits_fn, transposed_queries)
attn_logits = tf.transpose(attn_logits, [1, 0, 2])
if attn_mask is not None:
# Transpose and tile the mask
attn_logits += tf.tile(tf.squeeze(attn_mask, 1),
[get_shape_list(queries)[0] // get_shape_list(attn_mask)[0], 1, 1])
# Compute the attention weights
attn_weights = tf.nn.softmax(attn_logits, axis=-1, name='attn_weights')
# Optionally apply dropout
if self.dropout_attn > 0.0:
attn_weights = tf.layers.dropout(attn_weights, rate=self.dropout_attn, training=self.training)
# Obtain context vectors
weighted_memories = tf.matmul(attn_weights, values)
return weighted_memories
def _multiplicative_attn(self, queries, keys, values, attn_mask):
""" Uses multiplicative attention to compute contextually enriched source-side representations. """
# Account for beam-search
num_beams = get_shape_list(queries)[0] // get_shape_list(keys)[0]
keys = tf.tile(keys, [num_beams, 1, 1])
values = tf.tile(values, [num_beams, 1, 1])
# Use multiplicative attention
transposed_keys = tf.transpose(keys, [0, 2, 1])
attn_logits = tf.matmul(queries, transposed_keys)
if attn_mask is not None:
# Transpose and tile the mask
attn_logits += tf.tile(tf.squeeze(attn_mask, 1),
[get_shape_list(queries)[0] // get_shape_list(attn_mask)[0], 1, 1])
# Compute the attention weights
attn_weights = tf.nn.softmax(attn_logits, axis=-1, name='attn_weights')
# Optionally apply dropout
if self.dropout_attn > 0.0:
attn_weights = tf.layers.dropout(attn_weights, rate=self.dropout_attn, training=self.training)
# Obtain context vectors
weighted_memories = tf.matmul(attn_weights, values)
return weighted_memories
def forward(self, query_context, memory_context, attn_mask, layer_memories):
""" Propagates the input information through the attention layer. """
# The context for the query and the referenced memory is identical in case of self-attention
if memory_context is None:
memory_context = query_context
# Get attention inputs
queries, keys, values = self._compute_attn_inputs(query_context, memory_context)
# Recall and update memories (analogous to the RNN state) - decoder only
if layer_memories is not None:
keys = tf.concat([layer_memories['keys'], keys], axis=1)
values = tf.concat([layer_memories['values'], values], axis=1)
layer_memories['keys'] = keys
layer_memories['values'] = values
# Obtain weighted layer hidden representations
if self.attn_type == 'additive':
weighted_memories = self._additive_attn(queries, keys, values, attn_mask)
else:
weighted_memories = self._multiplicative_attn(queries, keys, values, attn_mask)
return weighted_memories, layer_memories
class FineGrainedAttentionLayer(SingleHeadAttentionLayer):
""" Defines the fine-grained attention layer; based on
"Fine-grained attention mechanism for neural machine translation.", Choi et al, 2018. """
def __init__(self,
reference_dims,
hypothesis_dims,
hidden_dims,
float_dtype,
dropout_attn,
training,
name,
attn_type='multiplicative'):
super(FineGrainedAttentionLayer, self).__init__(reference_dims, hypothesis_dims, hidden_dims, float_dtype,
dropout_attn, training, name, attn_type)
def _additive_attn(self, queries, keys, values, attn_mask):
""" Uses additive attention to compute contextually enriched source-side representations. """
# Account for beam-search
num_beams = get_shape_list(queries)[0] // get_shape_list(keys)[0]
keys = tf.tile(keys, [num_beams, 1, 1])
values = tf.tile(values, [num_beams, 1, 1])
def _logits_fn(query):
""" Computes time-step-wise attention scores. """
query = tf.expand_dims(query, 1)
return self.attn_weight * tf.nn.tanh(keys + query)
# Obtain attention scores
transposed_queries = tf.transpose(queries, [1, 0, 2]) # time-major
# attn_logits has shape=[time_steps_q, batch_size, time_steps_k, num_features]
attn_logits = tf.map_fn(_logits_fn, transposed_queries)
if attn_mask is not None:
transposed_mask = \
tf.transpose(tf.tile(attn_mask, [get_shape_list(queries)[0] // get_shape_list(attn_mask)[0], 1, 1, 1]),
[2, 0, 3, 1])
attn_logits += transposed_mask
# Compute the attention weights
attn_weights = tf.nn.softmax(attn_logits, axis=-2, name='attn_weights')
# Optionally apply dropout
if self.dropout_attn > 0.0:
attn_weights = tf.layers.dropout(attn_weights, rate=self.dropout_attn, training=self.training)
# Obtain context vectors
expanded_values = tf.expand_dims(values, axis=1)
weighted_memories = \
tf.reduce_sum(tf.multiply(tf.transpose(attn_weights, [1, 0, 2, 3]), expanded_values), axis=2)
return weighted_memories
def _multiplicative_attn(self, queries, keys, values, attn_mask):
""" Uses multiplicative attention to compute contextually enriched source-side representations. """
# Account for beam-search
num_beams = get_shape_list(queries)[0] // get_shape_list(keys)[0]
keys = tf.tile(keys, [num_beams, 1, 1])
values = tf.tile(values, [num_beams, 1, 1])
def _logits_fn(query):
""" Computes time-step-wise attention scores. """
query = tf.expand_dims(query, 1)
return tf.multiply(keys, query)
# Obtain attention scores
transposed_queries = tf.transpose(queries, [1, 0, 2]) # time-major
# attn_logits has shape=[time_steps_q, batch_size, time_steps_k, num_features]
attn_logits = tf.map_fn(_logits_fn, transposed_queries)
if attn_mask is not None:
transposed_mask = \
tf.transpose(tf.tile(attn_mask, [get_shape_list(queries)[0] // get_shape_list(attn_mask)[0], 1, 1, 1]),
[2, 0, 3, 1])
attn_logits += transposed_mask
# Compute the attention weights
attn_weights = tf.nn.softmax(attn_logits, axis=-2, name='attn_weights')
# Optionally apply dropout
if self.dropout_attn > 0.0:
attn_weights = tf.layers.dropout(attn_weights, rate=self.dropout_attn, training=self.training)
# Obtain context vectors
expanded_values = tf.expand_dims(values, axis=1)
weighted_memories = \
tf.reduce_sum(tf.multiply(tf.transpose(attn_weights, [1, 0, 2, 3]), expanded_values), axis=2)
return weighted_memories
def _attn(self, queries, keys, values, attn_mask):
""" For each encoder layer, weighs and combines time-step-wise hidden representation into a single layer
context state. -- DEPRECATED, SINCE IT'S SLOW AND PROBABLY NOT ENTIRELY CORRECT """
# Account for beam-search
num_beams = tf.shape(queries)[0] // tf.shape(keys)[0]
keys = tf.tile(keys, [num_beams, 1, 1])
values = tf.tile(values, [num_beams, 1, 1])
def _logits_fn(query):
""" Computes position-wise attention scores. """
query = tf.expand_dims(query, 1)
# return tf.squeeze(self.attn_weight * (tf.nn.tanh(keys + query + norm_bias)), axis=2)
return self.attn_weight * tf.nn.tanh(keys + query) # 4D output
def _weighting_fn(step_weights):
""" Computes position-wise context vectors. """
# step_weights = tf.expand_dims(step_weights, 2)
return tf.reduce_sum(tf.multiply(step_weights, values), axis=1)
# Obtain attention scores
transposed_queries = tf.transpose(queries, [1, 0, 2])
attn_logits = tf.map_fn(_logits_fn, transposed_queries) # multiple queries per step are possible
if attn_mask is not None:
# attn_logits has shape=[batch, query_lengh, key_length, attn_features]
transposed_mask = \
tf.transpose(tf.tile(attn_mask, [tf.shape(queries)[0] // tf.shape(attn_mask)[0], 1, 1, 1]),
[2, 0, 3, 1])
attn_logits += transposed_mask
# Compute the attention weights
attn_weights = tf.nn.softmax(attn_logits, axis=-2, name='attn_weights')
# Optionally apply dropout
if self.dropout_attn > 0.0:
attn_weights = tf.layers.dropout(attn_weights, rate=self.dropout_attn, training=self.training)
# Obtain context vectors
weighted_memories = tf.map_fn(_weighting_fn, attn_weights)
weighted_memories = tf.transpose(weighted_memories, [1, 0, 2])
return weighted_memories
| {
"repo_name": "rsennrich/nematus",
"path": "nematus/transformer_attention_modules.py",
"copies": "1",
"size": "23162",
"license": "bsd-3-clause",
"hash": 462439565097547800,
"line_mean": 49.4618736383,
"line_max": 119,
"alpha_frac": 0.5470598394,
"autogenerated": false,
"ratio": 4.2868776605589485,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5333937499958948,
"avg_score": null,
"num_lines": null
} |
"""Adapted from Nematode: https://github.com/demelin/nematode """
import tensorflow as tf
import numpy
import model_inputs
from transformer_layers import \
EmbeddingLayer, \
MaskedCrossEntropy, \
get_shape_list, \
get_right_context_mask, \
get_positional_signal
from transformer_blocks import AttentionBlock, FFNBlock
from transformer_inference import greedy_search, beam_search
class Transformer(object):
""" The main transformer model class. """
def __init__(self, config):
# Set attributes
self.config = config
self.source_vocab_size = config.source_vocab_sizes[0]
self.target_vocab_size = config.target_vocab_size
self.name = 'transformer'
self.int_dtype = tf.int32
self.float_dtype = tf.float32
# Placeholders
self.inputs = model_inputs.ModelInputs(config)
# Convert from time-major to batch-major, handle factors
self.source_ids, \
self.source_mask, \
self.target_ids_in, \
self.target_ids_out, \
self.target_mask = self._convert_inputs(self.inputs)
self.training = self.inputs.training
# Build the common parts of the graph.
with tf.name_scope('{:s}_loss'.format(self.name)):
# (Re-)generate the computational graph
self.dec_vocab_size = self._build_graph()
# Build the training-specific parts of the graph.
with tf.name_scope('{:s}_loss'.format(self.name)):
# Encode source sequences
with tf.name_scope('{:s}_encode'.format(self.name)):
enc_output, cross_attn_mask = self.enc.encode(
self.source_ids, self.source_mask)
# Decode into target sequences
with tf.name_scope('{:s}_decode'.format(self.name)):
logits = self.dec.decode_at_train(self.target_ids_in,
enc_output,
cross_attn_mask)
# Instantiate loss layer(s)
loss_layer = MaskedCrossEntropy(self.dec_vocab_size,
self.config.label_smoothing,
self.int_dtype,
self.float_dtype,
time_major=False,
name='loss_layer')
# Calculate loss
masked_loss, sentence_loss, batch_loss = \
loss_layer.forward(logits, self.target_ids_out, self.target_mask, self.training)
sent_lens = tf.reduce_sum(self.target_mask, axis=1, keepdims=False)
self._loss_per_sentence = sentence_loss * sent_lens
self._loss = tf.reduce_mean(self._loss_per_sentence, keepdims=False)
def _build_graph(self):
""" Defines the model graph. """
with tf.variable_scope('{:s}_model'.format(self.name)):
# Instantiate embedding layer(s)
if not self.config.tie_encoder_decoder_embeddings:
enc_vocab_size = self.source_vocab_size
dec_vocab_size = self.target_vocab_size
else:
assert self.source_vocab_size == self.target_vocab_size, \
'Input and output vocabularies should be identical when tying embedding tables.'
enc_vocab_size = dec_vocab_size = self.source_vocab_size
encoder_embedding_layer = EmbeddingLayer(enc_vocab_size,
self.config.embedding_size,
self.config.state_size,
self.float_dtype,
name='encoder_embedding_layer')
if not self.config.tie_encoder_decoder_embeddings:
decoder_embedding_layer = EmbeddingLayer(dec_vocab_size,
self.config.embedding_size,
self.config.state_size,
self.float_dtype,
name='decoder_embedding_layer')
else:
decoder_embedding_layer = encoder_embedding_layer
if not self.config.tie_encoder_decoder_embeddings:
softmax_projection_layer = EmbeddingLayer(dec_vocab_size,
self.config.embedding_size,
self.config.state_size,
self.float_dtype,
name='softmax_projection_layer')
else:
softmax_projection_layer = decoder_embedding_layer
# Instantiate the component networks
self.enc = TransformerEncoder(self.config,
encoder_embedding_layer,
self.training,
self.float_dtype,
'encoder')
self.dec = TransformerDecoder(self.config,
decoder_embedding_layer,
softmax_projection_layer,
self.training,
self.int_dtype,
self.float_dtype,
'decoder')
return dec_vocab_size
@property
def loss_per_sentence(self):
return self._loss_per_sentence
@property
def loss(self):
return self._loss
def _convert_inputs(self, inputs):
# Convert from time-major to batch-major. Note that we take factor 0
# from x and ignore any other factors.
source_ids = tf.transpose(inputs.x[0], perm=[1,0])
source_mask = tf.transpose(inputs.x_mask, perm=[1,0])
target_ids_out = tf.transpose(inputs.y, perm=[1,0])
target_mask = tf.transpose(inputs.y_mask, perm=[1,0])
# target_ids_in is a bit more complicated since we need to insert
# the special <GO> symbol (with value 1) at the start of each sentence
max_len, batch_size = tf.shape(inputs.y)[0], tf.shape(inputs.y)[1]
go_symbols = tf.fill(value=1, dims=[1, batch_size])
tmp = tf.concat([go_symbols, inputs.y], 0)
tmp = tmp[:-1, :]
target_ids_in = tf.transpose(tmp, perm=[1,0])
return (source_ids, source_mask, target_ids_in, target_ids_out,
target_mask)
class TransformerEncoder(object):
""" The encoder module used within the transformer model. """
def __init__(self,
config,
embedding_layer,
training,
float_dtype,
name):
# Set attributes
self.config = config
self.embedding_layer = embedding_layer
self.training = training
self.float_dtype = float_dtype
self.name = name
# Track layers
self.encoder_stack = dict()
self.is_final_layer = False
# Create nodes
self._build_graph()
def _embed(self, index_sequence):
""" Embeds source-side indices to obtain the corresponding dense tensor representations. """
# Embed input tokens
return self.embedding_layer.embed(index_sequence)
def _build_graph(self):
""" Defines the model graph. """
# Initialize layers
with tf.variable_scope(self.name):
for layer_id in range(1, self.config.transformer_enc_depth + 1):
layer_name = 'layer_{:d}'.format(layer_id)
# Check if constructed layer is final
if layer_id == self.config.transformer_enc_depth:
self.is_final_layer = True
# Specify ffn dimensions sequence
ffn_dims = [self.config.transformer_ffn_hidden_size, self.config.state_size]
with tf.variable_scope(layer_name):
# Build layer blocks (see layers.py)
self_attn_block = AttentionBlock(self.config,
self.float_dtype,
self_attention=True,
training=self.training)
ffn_block = FFNBlock(self.config,
ffn_dims,
self.float_dtype,
is_final=self.is_final_layer,
training=self.training)
# Maintain layer-wise dict entries for easier data-passing (may change later)
self.encoder_stack[layer_id] = dict()
self.encoder_stack[layer_id]['self_attn'] = self_attn_block
self.encoder_stack[layer_id]['ffn'] = ffn_block
def encode(self, source_ids, source_mask):
""" Encodes source-side input tokens into meaningful, contextually-enriched representations. """
def _prepare_source():
""" Pre-processes inputs to the encoder and generates the corresponding attention masks."""
# Embed
source_embeddings = self._embed(source_ids)
# Obtain length and depth of the input tensors
_, time_steps, depth = get_shape_list(source_embeddings)
# Transform input mask into attention mask
inverse_mask = tf.cast(tf.equal(source_mask, 0.0), dtype=self.float_dtype)
attn_mask = inverse_mask * -1e9
# Expansion to shape [batch_size, 1, 1, time_steps] is needed for compatibility with attention logits
attn_mask = tf.expand_dims(tf.expand_dims(attn_mask, 1), 1)
# Differentiate between self-attention and cross-attention masks for further, optional modifications
self_attn_mask = attn_mask
cross_attn_mask = attn_mask
# Add positional encodings
positional_signal = get_positional_signal(time_steps, depth, self.float_dtype)
source_embeddings += positional_signal
# Apply dropout
if self.config.transformer_dropout_embeddings > 0:
source_embeddings = tf.layers.dropout(source_embeddings,
rate=self.config.transformer_dropout_embeddings, training=self.training)
return source_embeddings, self_attn_mask, cross_attn_mask
with tf.variable_scope(self.name):
# Prepare inputs to the encoder, get attention masks
enc_inputs, self_attn_mask, cross_attn_mask = _prepare_source()
# Propagate inputs through the encoder stack
enc_output = enc_inputs
for layer_id in range(1, self.config.transformer_enc_depth + 1):
enc_output, _ = self.encoder_stack[layer_id]['self_attn'].forward(enc_output, None, self_attn_mask)
enc_output = self.encoder_stack[layer_id]['ffn'].forward(enc_output)
return enc_output, cross_attn_mask
class TransformerDecoder(object):
""" The decoder module used within the transformer model. """
def __init__(self,
config,
embedding_layer,
softmax_projection_layer,
training,
int_dtype,
float_dtype,
name,
from_rnn=False):
# Set attributes
self.config = config
self.embedding_layer = embedding_layer
self.softmax_projection_layer = softmax_projection_layer
self.training = training
self.int_dtype = int_dtype
self.float_dtype = float_dtype
self.name = name
self.from_rnn = from_rnn
# If the decoder is used in a hybrid system, adjust parameters accordingly
self.time_dim = 0 if from_rnn else 1
# Track layers
self.decoder_stack = dict()
self.is_final_layer = False
# Create nodes
self._build_graph()
def _embed(self, index_sequence):
""" Embeds target-side indices to obtain the corresponding dense tensor representations. """
return self.embedding_layer.embed(index_sequence)
def _get_initial_memories(self, batch_size, beam_size):
""" Initializes decoder memories used for accelerated inference. """
initial_memories = dict()
for layer_id in range(1, self.config.transformer_dec_depth + 1):
initial_memories['layer_{:d}'.format(layer_id)] = \
{'keys': tf.tile(tf.zeros([batch_size, 0, self.config.state_size]), [beam_size, 1, 1]),
'values': tf.tile(tf.zeros([batch_size, 0, self.config.state_size]), [beam_size, 1, 1])}
return initial_memories
def _build_graph(self):
""" Defines the model graph. """
# Initialize layers
with tf.variable_scope(self.name):
for layer_id in range(1, self.config.transformer_enc_depth + 1):
layer_name = 'layer_{:d}'.format(layer_id)
# Check if constructed layer is final
if layer_id == self.config.transformer_enc_depth:
self.is_final_layer = True
# Specify ffn dimensions sequence
ffn_dims = [self.config.transformer_ffn_hidden_size, self.config.state_size]
with tf.variable_scope(layer_name):
# Build layer blocks (see layers.py)
self_attn_block = AttentionBlock(self.config,
self.float_dtype,
self_attention=True,
training=self.training)
cross_attn_block = AttentionBlock(self.config,
self.float_dtype,
self_attention=False,
training=self.training,
from_rnn=self.from_rnn)
ffn_block = FFNBlock(self.config,
ffn_dims,
self.float_dtype,
is_final=self.is_final_layer,
training=self.training)
# Maintain layer-wise dict entries for easier data-passing (may change later)
self.decoder_stack[layer_id] = dict()
self.decoder_stack[layer_id]['self_attn'] = self_attn_block
self.decoder_stack[layer_id]['cross_attn'] = cross_attn_block
self.decoder_stack[layer_id]['ffn'] = ffn_block
def decode_at_train(self, target_ids, enc_output, cross_attn_mask):
""" Returns the probability distribution over target-side tokens conditioned on the output of the encoder;
performs decoding in parallel at training time. """
def _decode_all(target_embeddings):
""" Decodes the encoder-generated representations into target-side logits in parallel. """
# Apply input dropout
dec_input = \
tf.layers.dropout(target_embeddings, rate=self.config.transformer_dropout_embeddings, training=self.training)
# Propagate inputs through the encoder stack
dec_output = dec_input
for layer_id in range(1, self.config.transformer_dec_depth + 1):
dec_output, _ = self.decoder_stack[layer_id]['self_attn'].forward(dec_output, None, self_attn_mask)
dec_output, _ = \
self.decoder_stack[layer_id]['cross_attn'].forward(dec_output, enc_output, cross_attn_mask)
dec_output = self.decoder_stack[layer_id]['ffn'].forward(dec_output)
return dec_output
def _prepare_targets():
""" Pre-processes target token ids before they're passed on as input to the decoder
for parallel decoding. """
# Embed target_ids
target_embeddings = self._embed(target_ids)
target_embeddings += positional_signal
if self.config.transformer_dropout_embeddings > 0:
target_embeddings = tf.layers.dropout(target_embeddings,
rate=self.config.transformer_dropout_embeddings, training=self.training)
return target_embeddings
def _decoding_function():
""" Generates logits for target-side tokens. """
# Embed the model's predictions up to the current time-step; add positional information, mask
target_embeddings = _prepare_targets()
# Pass encoder context and decoder embeddings through the decoder
dec_output = _decode_all(target_embeddings)
# Project decoder stack outputs and apply the soft-max non-linearity
full_logits = self.softmax_projection_layer.project(dec_output)
return full_logits
with tf.variable_scope(self.name):
# Transpose encoder information in hybrid models
if self.from_rnn:
enc_output = tf.transpose(enc_output, [1, 0, 2])
cross_attn_mask = tf.transpose(cross_attn_mask, [3, 1, 2, 0])
self_attn_mask = get_right_context_mask(tf.shape(target_ids)[-1])
positional_signal = get_positional_signal(tf.shape(target_ids)[-1],
self.config.embedding_size,
self.float_dtype)
logits = _decoding_function()
return logits
| {
"repo_name": "rsennrich/nematus",
"path": "nematus/transformer.py",
"copies": "1",
"size": "18170",
"license": "bsd-3-clause",
"hash": 1345388726622965500,
"line_mean": 47.5828877005,
"line_max": 126,
"alpha_frac": 0.5323059989,
"autogenerated": false,
"ratio": 4.651817716333845,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5684123715233845,
"avg_score": null,
"num_lines": null
} |
"""Adapted from Nematode: https://github.com/demelin/nematode """
import tensorflow as tf
from transformer_layers import \
get_shape_list, \
get_positional_signal
def sample(session, model, x, x_mask, graph=None):
"""Randomly samples from a Transformer translation model.
Args:
session: TensorFlow session.
model: a Transformer object.
x: Numpy array with shape (factors, max_seq_len, batch_size).
x_mask: Numpy array with shape (max_seq_len, batch_size).
graph: a SampleGraph (to allow reuse if sampling repeatedly).
Returns:
A list of NumPy arrays (one for each input sentence in x).
"""
feed_dict = {}
feed_dict[model.inputs.x] = x
feed_dict[model.inputs.x_mask] = x_mask
feed_dict[model.training] = False
if graph is None:
graph = SampleGraph(model)
target_batch, scores = session.run(graph.outputs, feed_dict=feed_dict)
assert len(target_batch) == x.shape[-1]
assert len(scores) == x.shape[-1]
return target_batch
def beam_search(session, models, x, x_mask, beam_size,
normalization_alpha=0.0, graph=None):
"""Beam search using one Transformer translation model (TODO ensemble)
TODO Ensemble
If using an ensemble (i.e. more than one model), then at each timestep
the top k tokens are selected according to the sum of the models'
probabilities (where k is the beam size).
Args:
session: TensorFlow session.
models: a list of Transformer objects.
x: input Tensor with shape (factors, max_seq_len, batch_size).
x_mask: mask Tensor for x with shape (max_seq_len, batch_size).
beam_size: beam width.
normalization_alpha: length normalization hyperparameter.
graph: a BeamSearchGraph (to allow reuse if searching repeatedly).
Returns:
A list of lists of (translation, score) pairs. The outer list has one
element for each input sentence in the batch. The inner lists have
k elements (where k is the beam size), sorted by score in best-first
order.
"""
assert len(models) == 1 # ensembles not supported yet
feed_dict = {}
for model in models:
feed_dict[model.inputs.x] = x
feed_dict[model.inputs.x_mask] = x_mask
feed_dict[model.training] = False
if graph is None:
graph = BeamSearchGraph(models, beam_size, normalization_alpha)
target_batch, scores = session.run(graph.outputs, feed_dict=feed_dict)
assert len(target_batch) == x.shape[-1]
assert len(scores) == x.shape[-1]
hypotheses = []
for i in range(len(target_batch)):
pairs = zip(target_batch[i], scores[i])
hypotheses.append(sorted(pairs, key=lambda sent_cost: sent_cost[1],
reverse=True))
return hypotheses
"""Builds a graph fragment for sampling over a TransformerModel."""
class SampleGraph(object):
def __init__(self, model):
self._ids, self._scores = construct_sampling_ops(model)
@property
def outputs(self):
return (self._ids, self._scores)
"""Builds a graph fragment for beam search over a TransformerModel."""
class BeamSearchGraph(object):
def __init__(self, models, beam_size, normalization_alpha):
self._beam_size = beam_size
self._normalization_alpha = normalization_alpha
self._outputs = construct_beam_search_ops(models, beam_size,
normalization_alpha)
@property
def outputs(self):
return self._outputs
@property
def beam_size(self):
return self._beam_size
@property
def normalization_alpha(self):
return self._normalization_alpha
def construct_sampling_ops(model):
"""Builds a graph fragment for sampling over a TransformerModel.
Args:
model: a TransformerModel.
Returns:
A tuple (ids, scores), where ids is a Tensor with shape (batch_size,
max_seq_len) containing one sampled translation for each input sentence
in model.inputs.x and scores is a Tensor with shape (batch_size)
"""
ids, scores = decode_greedy(model, do_sample=True)
return ids, scores
def construct_beam_search_ops(models, beam_size, normalization_alpha):
"""Builds a graph fragment for sampling over a TransformerModel.
Args:
models: a list of TransformerModel objects.
Returns:
A tuple (ids, scores), where ids is a Tensor with shape (batch_size, k,
max_seq_len) containing k translations for each input sentence in
model.inputs.x and scores is a Tensor with shape (batch_size, k)
"""
assert len(models) == 1
model = models[0]
ids, scores = decode_greedy(model,
beam_size=beam_size,
normalization_alpha=normalization_alpha)
return ids, scores
def decode_greedy(model, do_sample=False, beam_size=0,
normalization_alpha=None):
# Determine size of current batch
batch_size, _ = get_shape_list(model.source_ids)
# Encode source sequences
with tf.name_scope('{:s}_encode'.format(model.name)):
enc_output, cross_attn_mask = model.enc.encode(model.source_ids,
model.source_mask)
# Decode into target sequences
with tf.name_scope('{:s}_decode'.format(model.name)):
dec_output, scores = decode_at_test(model.dec, enc_output,
cross_attn_mask, batch_size, beam_size, do_sample, normalization_alpha)
return dec_output, scores
def decode_at_test(decoder, enc_output, cross_attn_mask, batch_size, beam_size, do_sample, normalization_alpha):
""" Returns the probability distribution over target-side tokens conditioned on the output of the encoder;
performs decoding via auto-regression at test time. """
def _decode_step(target_embeddings, memories):
""" Decode the encoder-generated representations into target-side logits with auto-regression. """
# Propagate inputs through the encoder stack
dec_output = target_embeddings
# NOTE: No self-attention mask is applied at decoding, as future information is unavailable
for layer_id in range(1, decoder.config.transformer_dec_depth + 1):
dec_output, memories['layer_{:d}'.format(layer_id)] = \
decoder.decoder_stack[layer_id]['self_attn'].forward(
dec_output, None, None, memories['layer_{:d}'.format(layer_id)])
dec_output, _ = \
decoder.decoder_stack[layer_id]['cross_attn'].forward(dec_output, enc_output, cross_attn_mask)
dec_output = decoder.decoder_stack[layer_id]['ffn'].forward(dec_output)
# Return prediction at the final time-step to be consistent with the inference pipeline
dec_output = dec_output[:, -1, :]
return dec_output, memories
def _pre_process_targets(step_target_ids, current_time_step):
""" Pre-processes target token ids before they're passed on as input to the decoder
for auto-regressive decoding. """
# Embed target_ids
target_embeddings = decoder._embed(step_target_ids)
signal_slice = positional_signal[:, current_time_step - 1: current_time_step, :]
target_embeddings += signal_slice
if decoder.config.transformer_dropout_embeddings > 0:
target_embeddings = tf.layers.dropout(target_embeddings,
rate=decoder.config.transformer_dropout_embeddings, training=decoder.training)
return target_embeddings
def _decoding_function(step_target_ids, current_time_step, memories):
""" Generates logits for the target-side token predicted for the next-time step with auto-regression. """
# Embed the model's predictions up to the current time-step; add positional information, mask
target_embeddings = _pre_process_targets(step_target_ids, current_time_step)
# Pass encoder context and decoder embeddings through the decoder
dec_output, memories = _decode_step(target_embeddings, memories)
# Project decoder stack outputs and apply the soft-max non-linearity
step_logits = decoder.softmax_projection_layer.project(dec_output)
return step_logits, memories
with tf.variable_scope(decoder.name):
# Transpose encoder information in hybrid models
if decoder.from_rnn:
enc_output = tf.transpose(enc_output, [1, 0, 2])
cross_attn_mask = tf.transpose(cross_attn_mask, [3, 1, 2, 0])
positional_signal = get_positional_signal(decoder.config.translation_maxlen,
decoder.config.embedding_size,
decoder.float_dtype)
if beam_size > 0:
# Initialize target IDs with <GO>
initial_ids = tf.cast(tf.fill([batch_size], 1), dtype=decoder.int_dtype)
initial_memories = decoder._get_initial_memories(batch_size, beam_size=beam_size)
output_sequences, scores = _beam_search(_decoding_function,
initial_ids,
initial_memories,
decoder.int_dtype,
decoder.float_dtype,
decoder.config.translation_maxlen,
batch_size,
beam_size,
decoder.embedding_layer.get_vocab_size(),
0,
normalization_alpha)
else:
# Initialize target IDs with <GO>
initial_ids = tf.cast(tf.fill([batch_size, 1], 1), dtype=decoder.int_dtype)
initial_memories = decoder._get_initial_memories(batch_size, beam_size=1)
output_sequences, scores = greedy_search(_decoding_function,
initial_ids,
initial_memories,
decoder.int_dtype,
decoder.float_dtype,
decoder.config.translation_maxlen,
batch_size,
0,
do_sample,
time_major=False)
return output_sequences, scores
""" Inference functions for the transformer model. The generative process follows the 'Look, Generate, Update' paradigm,
as opposed to the 'Look, Update, Generate' paradigm employed by the deep-RNN. """
# TODO: Add coverage penalty from Tu, Zhaopeng, et al.
# TODO: "Modeling coverage for neural machine translation." arXiv preprint arXiv:1601.04811 (2016).
# Note: Some inference mechanisms are adopted from the tensor2tensor library, with modifications
# ============================================== Helper functions ==================================================== #
def batch_to_beam(batch_tensor, beam_size):
""" Multiplies the batch tensor so as to match the size of the model's beam. """
batch_clones = [batch_tensor] * beam_size
new_beam = tf.stack(batch_clones, axis=1)
return new_beam
def compute_batch_indices(batch_size, beam_size):
""" Generates a matrix of batch indices for the 'merged' beam tensor; each index denotes the batch from which the
sequence occupying the same relative position as the index within the 'merged' tensor belongs. """
batch_range = tf.range(batch_size * beam_size) // beam_size
batch_index_matrix = tf.reshape(batch_range, [batch_size, beam_size])
return batch_index_matrix
def get_memory_invariants(memories):
""" Calculates the invariant shapes for the model memories (i.e. states of th RNN ar layer-wise attentions of the
transformer). """
memory_type = type(memories)
if memory_type == dict:
memory_invariants = dict()
for layer_id in memories.keys():
memory_invariants[layer_id] = {key: tf.TensorShape([None] * len(get_shape_list(memories[layer_id][key])))
for key in memories[layer_id].keys()}
else:
raise ValueError('Memory type not supported, must be a dictionary.')
return memory_invariants
# Seems to work alright
def gather_memories(memory_dict, gather_coordinates):
""" Gathers layer-wise memory tensors corresponding to top sequences from the provided memory dictionary
during beam search. """
# Initialize dicts
gathered_memories = dict()
# Get coordinate shapes
coords_dims = get_shape_list(gather_coordinates)
# Gather
for layer_key in memory_dict.keys():
layer_dict = memory_dict[layer_key]
gathered_memories[layer_key] = dict()
for attn_key in layer_dict.keys():
attn_tensor = layer_dict[attn_key]
attn_dims = get_shape_list(attn_tensor)
# Not sure if this is faster than the 'memory-less' version
flat_tensor = \
tf.transpose(tf.reshape(attn_tensor, [-1, coords_dims[0]] + attn_dims[1:]), [1, 0, 2, 3])
gathered_values = tf.reshape(tf.transpose(tf.gather_nd(flat_tensor, gather_coordinates), [1, 0, 2, 3]),
[tf.multiply(coords_dims[1], coords_dims[0])] + attn_dims[1:])
gathered_memories[layer_key][attn_key] = gathered_values
return gathered_memories
def gather_top_sequences(all_sequences,
all_scores,
all_scores_to_gather,
all_eos_flags,
all_memories,
beam_size,
batch_size,
prefix):
""" Selects |beam size| sequences from a |beam size ** 2| sequence set; the selected sequences are used to update
sets of unfinished and completed decoding hypotheses. """
# Obtain indices of the top-k scores within the scores tensor
_, top_indices = tf.nn.top_k(all_scores, k=beam_size)
# Create a lookup-indices tensor for gathering the sequences associated with the top scores
batch_index_matrix = compute_batch_indices(batch_size, beam_size)
gather_coordinates = tf.stack([batch_index_matrix, top_indices], axis=2) # coordinates in final dimension
# Collect top outputs
gathered_sequences = tf.gather_nd(all_sequences, gather_coordinates, name='{:s}_sequences'.format(prefix))
gathered_scores = tf.gather_nd(all_scores_to_gather, gather_coordinates, name='{:s}_scores'.format(prefix))
gathered_eos_flags = tf.gather_nd(all_eos_flags, gather_coordinates, name='{:s}_eos_flags'.format(prefix))
gathered_memories = None
if all_memories is not None:
gathered_memories = gather_memories(all_memories, gather_coordinates)
# gathered_memories = all_memories
return gathered_sequences, gathered_scores, gathered_eos_flags, gathered_memories
# ============================================= Decoding functions =================================================== #
def greedy_search(decoding_function,
initial_ids,
initial_memories,
int_dtype,
float_dtype,
max_prediction_length,
batch_size,
eos_id,
do_sample,
time_major):
""" Greedily decodes the target sequence conditioned on the output of the encoder and the current output prefix. """
# Declare time-dimension
time_dim = int(not time_major) # i.e. 0 if time_major, 1 if batch_major
# Define the 'body for the tf.while_loop() call
def _decoding_step(current_time_step, all_finished, next_ids, decoded_ids, decoded_score, memories):
""" Defines a single step of greedy decoding. """
# Propagate through decoder
step_logits, memories = decoding_function(next_ids, current_time_step, memories)
# Calculate log probabilities for token prediction at current time-step
step_scores = tf.nn.log_softmax(step_logits)
# Determine next token to be generated, next_ids has shape [batch_size]
if do_sample:
next_ids = tf.squeeze(tf.multinomial(step_scores, num_samples=1, output_dtype=int_dtype), axis=1)
else:
# Greedy decoding
next_ids = tf.argmax(step_scores, -1, output_type=int_dtype)
# Collect scores associated with the selected tokens
score_coordinates = tf.stack([tf.range(batch_size, dtype=int_dtype), next_ids], axis=1)
decoded_score += tf.gather_nd(step_scores, score_coordinates)
# Concatenate newly decoded token ID with the previously decoded ones
decoded_ids = tf.concat([decoded_ids, tf.expand_dims(next_ids, 1)], 1)
# Extend next_id's dimensions to be compatible with input dimensionality for the subsequent step
next_ids = tf.expand_dims(next_ids, time_dim)
# Check if generation has concluded with <EOS>
# all_finished |= tf.equal(tf.squeeze(next_ids, axis=time_dim), eos_id)
all_finished |= tf.equal(tf.reduce_prod(decoded_ids - eos_id, axis=time_dim), eos_id)
return current_time_step + 1, all_finished, next_ids, decoded_ids, decoded_score, memories
# Define the termination condition for the tf.while_loop() call
def _continue_decoding(_current_time_step, _all_finished, *_):
""" Returns 'False' if all of the sequences in the generated sequence batch exceeded the maximum specified
length or terminated with <EOS>, upon which the while loop is exited. """
continuation_check = \
tf.logical_and(tf.less(_current_time_step, max_prediction_length),
tf.logical_not(tf.reduce_all(_all_finished)))
return continuation_check
# Initialize decoding-relevant variables and containers
current_time_step = tf.constant(1)
all_finished = tf.fill([batch_size], False) # None of the sequences is marked as finished
next_ids = initial_ids
decoded_ids = tf.zeros([batch_size, 0], dtype=int_dtype) # Sequence buffer is empty
decoded_score = tf.zeros([batch_size], dtype=float_dtype)
memories = initial_memories
# Execute the auto-regressive decoding step via while loop
_, _, _, decoded_ids, log_scores, memories = \
tf.while_loop(cond=_continue_decoding,
body=_decoding_step,
loop_vars=[current_time_step, all_finished, next_ids, decoded_ids, decoded_score, memories],
shape_invariants=[tf.TensorShape([]),
tf.TensorShape([None]),
tf.TensorShape([None, None]),
tf.TensorShape([None, None]),
tf.TensorShape([None]),
get_memory_invariants(memories)],
parallel_iterations=10,
swap_memory=False,
back_prop=False)
# Should return logits also, for training
return decoded_ids, log_scores
def _beam_search(decoding_function,
initial_ids,
initial_memories,
int_dtype,
float_dtype,
translation_maxlen,
batch_size,
beam_size,
vocab_size,
eos_id,
normalization_alpha):
""" Decodes the target sequence by maintaining a beam of candidate hypotheses, thus allowing for better exploration
of the hypothesis space; optionally applies scaled length normalization; based on the T2T implementation.
alive = set of n unfinished hypotheses presently within the beam; n == beam_size
finished = set of n finished hypotheses, each terminating in <EOS>; n == beam_size
"""
def _extend_hypotheses(current_time_step, alive_sequences, alive_log_probs, alive_memories):
""" Generates top-k extensions of the alive beam candidates from the previous time-step, which are subsequently
used to update the alive and finished sets at the current time-step; top-k = 2 s* beam_size """
# Get logits for the current prediction step
next_ids = alive_sequences[:, :, -1] # [batch_size, beam_size]
next_ids = tf.transpose(next_ids, [1, 0]) # [beam_size, batch_size]; transpose to match model
next_ids = tf.reshape(next_ids, [-1, 1]) # [beam_size * batch_size, 1]
step_logits, alive_memories = decoding_function(next_ids, current_time_step, alive_memories)
step_logits = tf.reshape(step_logits, [beam_size, batch_size, -1]) # [beam_size, batch_size, num_words]
step_logits = tf.transpose(step_logits, [1, 0, 2]) # [batch_size, beam_size, num_words]; transpose back
# Calculate the scores for all possible extensions of alive hypotheses
candidate_log_probs = tf.nn.log_softmax(step_logits, axis=-1)
curr_log_probs = candidate_log_probs + tf.expand_dims(alive_log_probs, axis=2)
# Apply length normalization
length_penalty = 1.
if normalization_alpha > 0.:
length_penalty = ((5. + tf.to_float(current_time_step)) ** normalization_alpha) / \
((5. + 1.) ** normalization_alpha)
curr_scores = curr_log_probs / length_penalty
# Select top-k highest scores
flat_curr_scores = tf.reshape(curr_scores, [batch_size, -1])
top_scores, top_ids = tf.nn.top_k(flat_curr_scores, k=beam_size ** 2)
# Recover non-normalized scores for tracking
top_log_probs = top_scores * length_penalty
# Determine the beam from which the top-scoring items originate and their identity (i.e. token-ID)
top_beam_indices = top_ids // vocab_size
top_ids %= vocab_size
# Determine the location of top candidates
batch_index_matrix = compute_batch_indices(batch_size, beam_size ** 2) # [batch_size, beam_size * factor]
top_coordinates = tf.stack([batch_index_matrix, top_beam_indices], axis=2)
# Extract top decoded sequences
top_sequences = tf.gather_nd(alive_sequences, top_coordinates) # [batch_size, beam_size * factor, sent_len]
top_sequences = tf.concat([top_sequences, tf.expand_dims(top_ids, axis=2)], axis=2)
# Extract top memories
top_memories = gather_memories(alive_memories, top_coordinates)
# top_memories = alive_memories
# Check how many of the top sequences have terminated
top_eos_flags = tf.equal(top_ids, eos_id) # [batch_size, beam_size * factor]
# Diversify beams at the outset of the generation
init_top_sequences = tf.reshape(
tf.reshape(top_sequences, [batch_size, beam_size, beam_size, -1])[:, :, 1, :], [batch_size, beam_size, -1])
init_top_log_probs = \
tf.reshape(tf.reshape(top_log_probs, [batch_size, beam_size, beam_size])[:, :, 1], [batch_size, beam_size])
init_top_scores = \
tf.reshape(tf.reshape(top_scores, [batch_size, beam_size, beam_size])[:, :, 1], [batch_size, beam_size])
init_top_eos_flags = \
tf.reshape(tf.reshape(top_eos_flags, [batch_size, beam_size, beam_size])[:, :, 1], [batch_size, beam_size])
top_sequences, top_log_probs, top_scores, top_eos_flags = \
tf.cond(tf.equal(current_time_step, 1),
lambda: [init_top_sequences, init_top_log_probs, init_top_scores, init_top_eos_flags],
lambda: [top_sequences, top_log_probs, top_scores, top_eos_flags])
return top_sequences, top_log_probs, top_scores, top_eos_flags, top_memories
def _update_alive(top_sequences, top_scores, top_log_probs, top_eos_flags, top_memories):
""" Assembles an updated set of unfinished beam candidates from the set of top-k translation hypotheses
generated at the current time-step; top-k for the incoming sequences in 2 * beam_size """
# Exclude completed sequences from the alive beam by setting their scores to a large negative value
top_scores += tf.to_float(top_eos_flags) * (-1. * 1e7)
# Update the alive beam
updated_alive_sequences, updated_alive_log_probs, updated_alive_eos_flags, updated_alive_memories = \
gather_top_sequences(top_sequences,
top_scores,
top_log_probs,
top_eos_flags,
top_memories,
beam_size,
batch_size,
'alive')
return updated_alive_sequences, updated_alive_log_probs, updated_alive_eos_flags, updated_alive_memories
def _update_finished(finished_sequences, finished_scores, finished_eos_flags, top_sequences, top_scores,
top_eos_flags):
""" Updates the list of completed translation hypotheses (i.e. ones terminating in <EOS>) on the basis of the
top-k hypotheses generated at the current time-step; top-k for the incoming sequences in 2 * beam_size """
# Match the length of the 'finished sequences' tensor with the length of the 'finished scores' tensor
zero_padding = tf.zeros([batch_size, beam_size, 1], dtype=int_dtype)
finished_sequences = tf.concat([finished_sequences, zero_padding], axis=2)
# Exclude incomplete sequences from the finished beam by setting their scores to a large negative value
top_scores += (1. - tf.to_float(top_eos_flags)) * (-1. * 1e7)
# Combine sequences finished at previous time steps with the top sequences from current time step, as well as
# their scores and eos-flags, for the selection of a new, most likely, set of finished sequences
top_finished_sequences = tf.concat([finished_sequences, top_sequences], axis=1)
top_finished_scores = tf.concat([finished_scores, top_scores], axis=1)
top_finished_eos_flags = tf.concat([finished_eos_flags, top_eos_flags], axis=1)
# Update the finished beam
updated_finished_sequences, updated_finished_scores, updated_finished_eos_flags, _ = \
gather_top_sequences(top_finished_sequences,
top_finished_scores,
top_finished_scores,
top_finished_eos_flags,
None,
beam_size,
batch_size,
'finished')
return updated_finished_sequences, updated_finished_scores, updated_finished_eos_flags
def _decoding_step(current_time_step,
alive_sequences,
alive_log_probs,
finished_sequences,
finished_scores,
finished_eos_flags,
alive_memories):
""" Defines a single step of greedy decoding. """
# 1. Get the top sequences/ scores/ flags for the current time step
top_sequences, top_log_probs, top_scores, top_eos_flags, top_memories = \
_extend_hypotheses(current_time_step,
alive_sequences,
alive_log_probs,
alive_memories)
# 2. Update the alive beam
alive_sequences, alive_log_probs, alive_eos_flags, alive_memories = \
_update_alive(top_sequences,
top_scores,
top_log_probs,
top_eos_flags,
top_memories)
# 3. Update the finished beam
finished_sequences, finished_scores, finished_eos_flags = \
_update_finished(finished_sequences,
finished_scores,
finished_eos_flags,
top_sequences,
top_scores,
top_eos_flags)
return current_time_step + 1, alive_sequences, alive_log_probs, finished_sequences, finished_scores, \
finished_eos_flags, alive_memories
def _continue_decoding(curr_time_step,
alive_sequences,
alive_log_probs,
finished_sequences,
finished_scores,
finished_eos_flags,
alive_memories):
""" Returns 'False' if all of the sequences in the extended hypotheses exceeded the maximum specified
length or if none of the extended hypotheses are more likely than the lowest scoring finished hypothesis. """
# Check if the maximum prediction length has been reached
length_criterion = tf.less(curr_time_step, translation_maxlen)
# Otherwise, check if the most likely alive hypothesis is less likely than the least probable completed sequence
# Calculate the best possible score of the most probably sequence currently alive
max_length_penalty = 1.
if normalization_alpha > 0.:
max_length_penalty = ((5. + tf.to_float(translation_maxlen)) ** normalization_alpha) / \
((5. + 1.) ** normalization_alpha)
highest_alive_score = alive_log_probs[:, 0] / max_length_penalty
# Calculate the score of the least likely sequence currently finished
lowest_finished_score = tf.reduce_min(finished_scores * tf.cast(finished_eos_flags, float_dtype), axis=1)
# Account for the case in which none of the sequences in 'finished' have terminated so far;
# In that case, each of the unfinished sequences is assigned a high negative probability, so that the
# termination condition is not met
mask_unfinished = (1. - tf.to_float(tf.reduce_any(finished_eos_flags, 1))) * (-1. * 1e7)
lowest_finished_score += mask_unfinished
# Check is the current highest alive score is lower than the current lowest finished score
likelihood_criterion = tf.logical_not(tf.reduce_all(tf.greater(lowest_finished_score, highest_alive_score)))
# Decide whether to continue the decoding process
do_continue = tf.logical_and(length_criterion, likelihood_criterion)
return do_continue
# Initialize alive sequence and score trackers and expand to beam size
alive_log_probs = tf.zeros([batch_size, beam_size])
# Initialize decoded sequences
alive_sequences = tf.expand_dims(batch_to_beam(initial_ids, beam_size), 2)
# Initialize finished sequence, score, and flag trackers
finished_sequences = tf.expand_dims(batch_to_beam(initial_ids, beam_size), 2)
finished_scores = tf.ones([batch_size, beam_size]) * (-1. * 1e7) # initialize to a low value
finished_eos_flags = tf.zeros([batch_size, beam_size], dtype=tf.bool)
# Initialize memories
alive_memories = initial_memories
# Execute the auto-regressive decoding step via while loop
_, alive_sequences, alive_log_probs, finished_sequences, finished_scores, finished_eos_flags, _ = \
tf.while_loop(
_continue_decoding,
_decoding_step,
[tf.constant(1), alive_sequences, alive_log_probs, finished_sequences, finished_scores, finished_eos_flags,
alive_memories],
shape_invariants=[tf.TensorShape([]),
tf.TensorShape([None, None, None]),
alive_log_probs.get_shape(),
tf.TensorShape([None, None, None]),
finished_scores.get_shape(),
finished_eos_flags.get_shape(),
get_memory_invariants(alive_memories)],
parallel_iterations=10,
swap_memory=False,
back_prop=False)
alive_sequences.set_shape((None, beam_size, None))
finished_sequences.set_shape((None, beam_size, None))
# Account for the case in which a particular sequence never terminates in <EOS>;
# in that case, copy the contents of the alive beam for that item into the finished beam (sequence + score)
# tf.reduce_any(finished_eos_flags, 1) is False if there exists no completed translation hypothesis for a source
# sentence in either of the beams , i.e. no replacement takes place if there is at least one finished translation
finished_sequences = tf.where(tf.reduce_any(finished_eos_flags, 1), finished_sequences, alive_sequences)
# Attention: alive_scores are not length normalized!
finished_scores = tf.where(tf.reduce_any(finished_eos_flags, 1), finished_scores, alive_log_probs)
# Truncate initial <GO> in finished sequences
finished_sequences = finished_sequences[:, :, 1:]
return finished_sequences, finished_scores
| {
"repo_name": "rsennrich/nematus",
"path": "nematus/transformer_inference.py",
"copies": "1",
"size": "33603",
"license": "bsd-3-clause",
"hash": -5566696101750246000,
"line_mean": 49.8366111952,
"line_max": 128,
"alpha_frac": 0.6047376722,
"autogenerated": false,
"ratio": 4.217243975903615,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0028301062554938784,
"num_lines": 661
} |
"""Adapted from Nematode: https://github.com/demelin/nematode """
import tensorflow as tf
from transformer_layers import \
ProcessingLayer, \
FeedForwardNetwork
from transformer_attention_modules import MultiHeadAttentionLayer
# from attention_modules import SingleHeadAttentionLayer, FineGrainedAttentionLayer
class AttentionBlock(object):
""" Defines a single attention block (referred to as 'sub-layer' in the paper) comprising of a single multi-head
attention layer preceded by a pre-processing layer and followed by a post-processing layer. """
def __init__(self,
config,
float_dtype,
self_attention,
training,
from_rnn=False,
tie_attention=False):
# Set attributes
self.self_attention = self_attention
if not tie_attention:
if self_attention:
attn_name = 'self_attn'
else:
attn_name = 'cross_attn'
else:
attn_name = 'tied_attn'
memory_size = config.state_size
if from_rnn:
memory_size *= 2
# Build layers
self.pre_attn = ProcessingLayer(config.state_size,
use_layer_norm=True,
dropout_rate=0.,
training=training,
name='pre_{:s}_sublayer'.format(attn_name))
self.attn = MultiHeadAttentionLayer(memory_size,
config.state_size,
config.state_size,
config.state_size,
config.state_size,
config.transformer_num_heads,
float_dtype,
dropout_attn=config.transformer_dropout_attn,
training=training,
name='{:s}_sublayer'.format(attn_name))
self.post_attn = ProcessingLayer(config.state_size,
use_layer_norm=False,
dropout_rate=config.transformer_dropout_residual,
training=training,
name='post_{:s}_sublayer'.format(attn_name))
def forward(self, inputs, memory_context, attn_mask, layer_memories=None):
""" Propagates input data through the block. """
if not self.self_attention:
assert (memory_context is not None), \
'Encoder memories have to be provided for encoder-decoder attention computation.'
attn_inputs = self.pre_attn.forward(inputs)
attn_outputs, layer_memories = self.attn.forward(attn_inputs, memory_context, attn_mask, layer_memories)
block_out = self.post_attn.forward(attn_outputs, residual_inputs=inputs)
return block_out, layer_memories
class FFNBlock(object):
""" Defines a single feed-forward network block (referred to as 'sub-layer' in the transformer paper) comprising of
a single feed-forward network preceded by a pre-processing layer and followed by a post-processing layer. """
def __init__(self,
config,
ffn_dims,
float_dtype,
is_final,
training):
# Set attributes
self.is_final = is_final
# Build layers
self.pre_ffn = ProcessingLayer(config.state_size,
use_layer_norm=True,
dropout_rate=0.,
training=training,
name='pre_ffn_sublayer')
self.ffn = FeedForwardNetwork(ffn_dims,
float_dtype,
use_bias=True,
activation=tf.nn.relu,
use_layer_norm=False,
dropout_rate=config.transformer_dropout_relu,
training=training,
name='ffn_sublayer')
self.post_ffn = ProcessingLayer(config.state_size,
use_layer_norm=False,
dropout_rate=config.transformer_dropout_residual,
training=training,
name='post_ffn_sublayer')
if is_final:
self.pre_final = ProcessingLayer(config.state_size,
use_layer_norm=True,
dropout_rate=0.,
training=training,
name='final_transform')
def forward(self, inputs):
""" Propagates input data through the block. """
ffn_inputs = self.pre_ffn.forward(inputs)
ffn_outputs = self.ffn.forward(ffn_inputs)
block_out = self.post_ffn.forward(ffn_outputs, residual_inputs=inputs)
if self.is_final:
block_out = self.pre_final.forward(block_out)
return block_out
| {
"repo_name": "rsennrich/nematus",
"path": "nematus/transformer_blocks.py",
"copies": "1",
"size": "5479",
"license": "bsd-3-clause",
"hash": -8484212468743595000,
"line_mean": 44.2809917355,
"line_max": 119,
"alpha_frac": 0.4822047819,
"autogenerated": false,
"ratio": 5.008226691042047,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5990431472942047,
"avg_score": null,
"num_lines": null
} |
"""Adapted from Nematode: https://github.com/demelin/nematode """
# TODO: Add an attention visualization component - very important (~easy)
""" Layer implementations. """
import numpy as np
import tensorflow as tf
from tensorflow.python.ops.init_ops import glorot_uniform_initializer
def matmul_nd(nd_tensor, matrix):
""" Performs matrix multiplication for n-dimensional inputs. """
tensor_shape = get_shape_list(nd_tensor)
matrix_shape = get_shape_list(matrix)
initial_tensor_dims = tensor_shape[:-1]
flat_first_dim = tf.reduce_prod(initial_tensor_dims)
tensor_2d = tf.reshape(nd_tensor, [flat_first_dim, tensor_shape[-1]])
result_2d = tf.matmul(tensor_2d, matrix)
result_3d = tf.reshape(result_2d, initial_tensor_dims + [matrix_shape[-1]])
return result_3d
def get_shape_list(inputs):
""" Returns a list of input dimensions, statically where possible; adopted from the tensor2tensor library. """
inputs = tf.convert_to_tensor(inputs)
# If inputs rank is unknown, return dynamic shape
if inputs.get_shape().dims is None:
dims_list = tf.shape(inputs)
else:
static_dims = inputs.get_shape().as_list()
shape = tf.shape(inputs)
# Filter out non-specified dimensions and replace them with static shape definitions
dims_list = list()
for i in range(len(static_dims)):
dim = static_dims[i]
if dim is None:
dim = shape[i]
dims_list.append(dim)
return dims_list
def get_right_context_mask(time_steps):
""" Generates the mask preventing the decoder from attending to unseen positions. """
# Generate mask that limits decoder self-attention up to and including the current position
attn_mask = tf.matrix_band_part(tf.ones([time_steps, time_steps]), -1, 0)
# Expand mask to 4d. so as to be compatible with attention weights
attn_mask = tf.expand_dims(tf.expand_dims(attn_mask, 0), 0)
# Illegal connections will be set to -inf when fed into the softmax function
# Padding for non-masked positions is applied to prevent NaNs
attn_mask = -1e9 * (1.0 - attn_mask)
return attn_mask
def get_positional_signal(time_steps, depth, float_dtype, min_timescale=1, max_timescale=10000):
""" Generates a series of sinusoid functions capable of expressing the relative and absolute position
of a token within a longer sequence. """
# Convert to floats
min_timescale = tf.cast(min_timescale, float_dtype)
max_timescale = tf.cast(max_timescale, float_dtype)
# Obtain timing signal via sinusoids
num_timescales = tf.cast(depth // 2, float_dtype)
log_timescale_increment = tf.log(max_timescale / min_timescale) / (num_timescales - tf.cast(1.0, float_dtype))
# Introduce an offset between individual timescales to obtain different frequencies
incremented_timescales = \
min_timescale * tf.exp(tf.range(num_timescales, dtype=float_dtype) * -log_timescale_increment)
# Assign the designated number of time-scales per token position
positions = tf.cast(tf.range(time_steps), float_dtype)
scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(incremented_timescales, 0)
positional_signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
# Pad the signal tensor, if needed
pad_size = depth % 2
if pad_size != 0:
tf.pad(positional_signal, [[0, 0], [0, pad_size]])
# Reshape the signal to make it compatible with the target tensor
positional_signal = tf.reshape(positional_signal, [1, time_steps, depth])
return positional_signal
class EmbeddingLayer(object):
""" Looks up embeddings for the specified token sequence in the learned embedding table; allows for easy weight
scaling and tying. """
def __init__(self, vocabulary_size, embedding_size, hidden_size, float_dtype, name):
# Set arguments
self.vocabulary_size = vocabulary_size
self.hidden_size = hidden_size
self.float_dtype = float_dtype
self.name = name
# Create embedding matrix and its transposes
with tf.variable_scope(self.name):
self.embedding_table = tf.get_variable(name='embedding_table',
shape=[vocabulary_size, embedding_size],
dtype=float_dtype,
initializer=glorot_uniform_initializer(),
trainable=True)
self.projection_matrix = tf.transpose(self.embedding_table, name='vocab_projection_matrix')
def embed(self, one_hot_inputs):
""" Embeds one-hot-vectors corresponding to input tokens. """
embeddings = tf.nn.embedding_lookup(self.embedding_table, one_hot_inputs)
# Apply transformer-specific scaling
embeddings *= tf.sqrt(tf.cast(self.hidden_size, self.float_dtype))
return embeddings
def project(self, dec_out):
""" Projects the transformer decoder's output into the vocabulary space. """
projections = matmul_nd(dec_out, self.projection_matrix)
return projections
def get_embedding_table(self):
""" Recovers the learned embedding table. """
return self.embedding_table
def get_projection_matrix(self):
""" Recovers the pre-softmax projection matrix which is the inverse of the embedding table. """
return self.projection_matrix
def get_vocab_size(self):
""" Recovers the vocabulary size. """
return self.vocabulary_size
class LayerNormLayer(object):
""" Performs layer normalization by computing the mean and variance used for normalization from all of the
summed inputs to neurons in a layer. """
def __init__(self, dims_out, name=None, eps=1e-5):
if name is None:
name = 'layer_norm'
else:
name = '{:s}_layer_norm'.format(name)
with tf.variable_scope(name, values=[dims_out]):
self.offset = tf.get_variable(name='offset',
shape=[dims_out],
dtype=tf.float32,
initializer=tf.zeros_initializer())
self.scale = tf.get_variable(name='scale',
shape=[dims_out],
dtype=tf.float32,
initializer=tf.ones_initializer())
self.eps = tf.constant(eps)
def forward(self, inputs):
layer_mean, layer_var = tf.nn.moments(inputs, axes=-1, keep_dims=True)
normalized = tf.add(
tf.multiply(self.scale, tf.math.divide(tf.subtract(inputs, layer_mean),
tf.sqrt(tf.add(layer_var, self.eps)))),
self.offset)
return normalized
class ProcessingLayer(object):
""" Optionally applies residual connections, layer normalization, or dropout. """
def __init__(self, out_size, use_layer_norm, dropout_rate, training, name):
# Set attributes
self.use_layer_norm = use_layer_norm
self.dropout_rate = dropout_rate
self.training = training
self.name = name
# Initialize layer normalization, if specified
with tf.variable_scope(self.name):
if use_layer_norm:
self.layer_norm = LayerNormLayer(out_size)
def forward(self, inputs, residual_inputs=None):
with tf.variable_scope(self.name, values=[inputs, residual_inputs], reuse=True):
outputs = inputs
# Apply dropout
if self.dropout_rate > 0.0:
outputs = tf.layers.dropout(inputs, rate=self.dropout_rate, training=self.training)
# Apply residual connections
if residual_inputs is not None:
outputs = outputs + residual_inputs
# Apply layer normalization
if self.use_layer_norm:
outputs = self.layer_norm.forward(outputs)
return outputs
class FeedForwardLayer(object):
""" A single fully-connected feed-forward layer using standard dropout. """
def __init__(self,
in_size,
out_size,
float_dtype,
dropout_rate,
activation,
use_bias,
use_layer_norm,
training,
name):
# Set attributes
self.in_size = in_size
self.out_size = out_size
self.dropout_rate = dropout_rate
self.activation = activation
self.use_bias = use_bias
self.training = training
self.name = name
with tf.variable_scope(self.name):
# Set up layer normalization
if use_layer_norm:
self.layer_norm_layer = LayerNormLayer(out_size)
else:
self.layer_norm_layer = None
# Define parameters
weights_shape = [in_size, out_size] if out_size is not None else [in_size]
self.weights = tf.get_variable(name='dense_layer_weights',
shape=weights_shape,
dtype=float_dtype,
initializer=glorot_uniform_initializer(),
trainable=True)
if use_bias:
biases_shape = [out_size] if out_size is not None else [in_size]
self.biases = tf.get_variable(name='dense_layer_biases',
shape=biases_shape,
dtype=float_dtype,
initializer=tf.zeros_initializer(),
trainable=True)
def forward(self, inputs):
with tf.variable_scope(self.name, values=[inputs]):
# Optionally apply dropout
if self.dropout_rate > 0.0:
inputs = tf.layers.dropout(inputs, rate=self.dropout_rate, training=self.training)
# Feed through a dense layer
outputs = matmul_nd(inputs, self.weights)
if self.use_bias:
outputs += self.biases
if self.activation is not None:
outputs = self.activation(outputs)
# Optionally apply layer normalization
if self.layer_norm_layer is not None:
outputs = self.layer_norm_layer(outputs)
return outputs
class FeedForwardNetwork(object):
""" A fully connected feed-forward network that is applied to each position separately and identically. """
def __init__(self,
layer_dims,
float_dtype,
use_bias,
activation,
use_layer_norm,
dropout_rate,
training,
name=None):
# Set attributes
self.layer_dims = layer_dims
self.float_dtype = float_dtype
self.use_bias = use_bias
self.activation = activation
self.use_layer_norm = use_layer_norm
self.dropout_rate = dropout_rate
self.training = training
self.name = name
# Container for network layers
self.layers = list()
self._initialize_layers()
def _initialize_layers(self):
""" Builds the network from fully-connected layers. """
num_layers = len(self.layer_dims)
for layer_id in range(num_layers):
# Assure that no non-linearity or dropout is applied at the final layer
if layer_id == num_layers - 1:
layer_activation = None
dropout_rate = 0.0
else:
layer_activation = self.activation
dropout_rate = self.dropout_rate
# Add layer
if layer_id == 0:
input_dims = self.layer_dims[-1] # input and output dimensions of the sub-layer are identical
else:
input_dims = self.layer_dims[layer_id - 1]
self.layers.append(FeedForwardLayer(input_dims,
self.layer_dims[layer_id],
self.float_dtype,
dropout_rate=dropout_rate,
activation=layer_activation,
use_bias=self.use_bias,
use_layer_norm=self.use_layer_norm,
training=self.training,
name='ff_layer_{:d}'.format(layer_id + 1)))
def forward(self, inputs):
""" Propagates input data through the specified layers. """
with tf.variable_scope(self.name, values=[inputs]):
for layer in self.layers:
inputs = layer.forward(inputs)
return inputs
class PReLU(object):
""" Implements the adaptive Parametric Rectified Linear Unit activation function. """
def __init__(self,
in_size,
initial_slope=1.0,
name=None):
with tf.variable_scope(name, default_name='PReLu'):
self.slope = tf.Variable(initial_slope * np.ones((in_size,)).astype('float32'), name='slope')
def forward(self, inputs):
pos = tf.nn.relu(inputs)
neg = inputs - pos
outputs = pos + self.slope * neg
return outputs
class MaskedCrossEntropy(object):
""" Implements the cross-entropy loss with optionally applied label smoothing for better model generalization. """
def __init__(self, vocab_size, label_smoothing_discount, int_dtype, float_dtype, time_major, name=None):
# Set attributes
self.vocab_size = vocab_size
self.label_smoothing_discount = label_smoothing_discount
self.int_dtype = int_dtype
self.float_dtype = float_dtype
self.time_dim = int(not time_major) # i.e. 0 is time_major, 1 if batch_major
self.name = name
def _get_smoothing_parameters(self):
""" Calculates the confidence values used for label smoothing application. """
# Assign low confidence, i.e. the label smoothing discount value, to all non-true labels
one_out_vocab = tf.cast(self.vocab_size - 1, self.float_dtype)
# For cross-entropy, each row of the labels matrix must be a valid probability distribution
low_confidence = self.label_smoothing_discount / one_out_vocab
high_confidence = 1.0 - self.label_smoothing_discount
# Normalizing constant for better readability, which is the best cross-entropy value with soft targets
# Has no impact on training
normalizing_factor = -(1.0 * high_confidence * tf.log(high_confidence)
+ one_out_vocab * low_confidence * tf.log(low_confidence + 1e-20))
return high_confidence, low_confidence, normalizing_factor
def forward(self, logits, targets, target_mask, training):
with tf.name_scope(self.name, values=[logits, targets, target_mask]):
# Get smoothing parameters (no smoothing/ normalization at test time)
high_confidence, low_confidence, normalizing_factor = \
tf.cond(tf.logical_and(training, tf.greater(self.label_smoothing_discount, 0.0)),
self._get_smoothing_parameters,
lambda: (1.0, 0.0, 0.0))
# If necessary, pad the label and the label-mask to match the length of decoder output
# Not sure if that's a sensible thing to do
targets_shape = tf.shape(targets)
logits_shape = tf.shape(logits)
targets_length = targets_shape[self.time_dim]
logits_length = logits_shape[self.time_dim]
def _get_pad_shape(shape_to_pad, shape_to_match):
""" Calculates the shape of the padding to be applied to the logits or targets. """
time_steps_to_pad = shape_to_match[self.time_dim] - shape_to_pad[self.time_dim]
if self.time_dim == 0:
pad_shape = [time_steps_to_pad, shape_to_pad[1]]
else:
pad_shape = [shape_to_pad[0], time_steps_to_pad]
return pad_shape
def _pad_targets(targets, target_mask, logits):
""" Pads the targets to match the size of the model-generated logits. """
pad_shape = _get_pad_shape(targets_shape, logits_shape)
targets = tf.concat([targets, tf.zeros(pad_shape, dtype=self.int_dtype)], axis=self.time_dim)
target_mask = tf.concat([target_mask, tf.zeros(pad_shape, dtype=self.float_dtype)], axis=self.time_dim)
return targets, target_mask, logits
def _pad_logits(targets, target_mask, logits):
""" Pads the logits to match the size of the ground-truth targets. """
pad_shape = _get_pad_shape(logits_shape, targets_shape)
logits = tf.concat([logits, tf.zeros(pad_shape + [logits_shape[-1]], dtype=self.float_dtype)],
axis=self.time_dim)
return targets, target_mask, logits
# For teacher-forcing with RNN models
targets, target_mask, logits = tf.cond(tf.equal(targets_length, logits_length),
lambda: (targets, target_mask, logits),
lambda: tf.cond(tf.less(targets_length, logits_length),
lambda: _pad_targets(targets, target_mask, logits),
lambda: _pad_logits(targets, target_mask, logits)))
# Project and optionally smooth target token ids
projected_targets = tf.one_hot(targets,
depth=self.vocab_size,
on_value=high_confidence,
off_value=low_confidence,
dtype=self.float_dtype)
# Compute token-level loss
flat_logits = tf.reshape(logits, [-1, self.vocab_size])
flat_targets = tf.reshape(projected_targets, [-1, self.vocab_size])
flat_loss = tf.nn.softmax_cross_entropy_with_logits_v2(logits=flat_logits, labels=flat_targets)
flat_normalized_loss = flat_loss - normalizing_factor
# Compute sentence- and batch-level losses (i.e. mean token-loss per sentence/ batch)
normalized_loss = tf.reshape(flat_normalized_loss, tf.shape(targets))
masked_loss = normalized_loss * target_mask
sentence_lengths = tf.reduce_sum(target_mask, axis=self.time_dim, keepdims=False)
sentence_loss = tf.math.divide(tf.reduce_sum(masked_loss, axis=self.time_dim, keepdims=False), sentence_lengths)
batch_loss = tf.reduce_mean(sentence_loss, keepdims=False)
return masked_loss, sentence_loss, batch_loss
| {
"repo_name": "rsennrich/nematus",
"path": "nematus/transformer_layers.py",
"copies": "1",
"size": "19406",
"license": "bsd-3-clause",
"hash": 3109951363511687700,
"line_mean": 45.9878934625,
"line_max": 124,
"alpha_frac": 0.5767803772,
"autogenerated": false,
"ratio": 4.308614564831261,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5385394942031261,
"avg_score": null,
"num_lines": null
} |
"""Adapted from Nematode: https://github.com/demelin/nematode """
# TODO: Add an attention visualization component - very important (~easy)
""" Layer implementations. """
import sys
import numpy as np
import tensorflow as tf
from tensorflow.python.ops.init_ops import glorot_uniform_initializer
# ModuleNotFoundError is new in 3.6; older versions will throw SystemError
if sys.version_info < (3, 6):
ModuleNotFoundError = SystemError
try:
from . import tf_utils
except (ModuleNotFoundError, ImportError) as e:
import tf_utils
def matmul_nd(nd_tensor, matrix):
""" Performs matrix multiplication for n-dimensional inputs. """
tensor_shape = tf_utils.get_shape_list(nd_tensor)
matrix_shape = tf_utils.get_shape_list(matrix)
initial_tensor_dims = tensor_shape[:-1]
flat_first_dim = tf.reduce_prod(input_tensor=initial_tensor_dims)
tensor_2d = tf.reshape(nd_tensor, [flat_first_dim, tensor_shape[-1]])
result_2d = tf.matmul(tensor_2d, matrix)
result_3d = tf.reshape(result_2d, initial_tensor_dims + [matrix_shape[-1]])
return result_3d
def get_right_context_mask(time_steps):
""" Generates the mask preventing the decoder from attending to unseen positions. """
# Generate mask that limits decoder self-attention up to and including the current position
attn_mask = tf.linalg.band_part(tf.ones([time_steps, time_steps]), -1, 0)
# Expand mask to 4d. so as to be compatible with attention weights
attn_mask = tf.expand_dims(tf.expand_dims(attn_mask, 0), 0)
# Illegal connections will be set to -inf when fed into the softmax function
# Padding for non-masked positions is applied to prevent NaNs
attn_mask = -1e9 * (1.0 - attn_mask)
return attn_mask
def get_positional_signal(time_steps, depth, float_dtype, min_timescale=1, max_timescale=10000):
""" Generates a series of sinusoid functions capable of expressing the relative and absolute position
of a token within a longer sequence. """
# Convert to floats
min_timescale = tf.cast(min_timescale, float_dtype)
max_timescale = tf.cast(max_timescale, float_dtype)
# Obtain timing signal via sinusoids
num_timescales = tf.cast(depth // 2, float_dtype)
log_timescale_increment = tf.math.log(max_timescale / min_timescale) / (num_timescales - tf.cast(1.0, float_dtype))
# Introduce an offset between individual timescales to obtain different frequencies
incremented_timescales = \
min_timescale * tf.exp(tf.range(num_timescales, dtype=float_dtype) * -log_timescale_increment)
# Assign the designated number of time-scales per token position
positions = tf.cast(tf.range(time_steps), float_dtype)
scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(incremented_timescales, 0)
positional_signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
# Pad the signal tensor, if needed
pad_size = depth % 2
if pad_size != 0:
tf.pad(tensor=positional_signal, paddings=[[0, 0], [0, pad_size]])
# Reshape the signal to make it compatible with the target tensor
positional_signal = tf.reshape(positional_signal, [1, time_steps, depth])
return positional_signal
class EmbeddingLayer(object):
""" Looks up embeddings for the specified token sequence in the learned embedding table; allows for easy weight
scaling and tying. """
def __init__(self, vocabulary_size, embedding_size, hidden_size, float_dtype, name):
# Set arguments
self.vocabulary_size = vocabulary_size
self.hidden_size = hidden_size
self.float_dtype = float_dtype
self.name = name
# Create embedding matrix and its transposes
with tf.compat.v1.variable_scope(self.name):
self.embedding_table = tf.compat.v1.get_variable(name='embedding_table',
shape=[vocabulary_size, embedding_size],
dtype=float_dtype,
initializer=glorot_uniform_initializer(),
trainable=True)
self.projection_matrix = tf.transpose(a=self.embedding_table, name='vocab_projection_matrix')
def embed(self, one_hot_inputs):
""" Embeds one-hot-vectors corresponding to input tokens. """
embeddings = tf.nn.embedding_lookup(params=self.embedding_table, ids=one_hot_inputs)
# Apply transformer-specific scaling
embeddings *= tf.sqrt(tf.cast(self.hidden_size, self.float_dtype))
return embeddings
def project(self, dec_out):
""" Projects the transformer decoder's output into the vocabulary space. """
projections = matmul_nd(dec_out, self.projection_matrix)
return projections
def get_embedding_table(self):
""" Recovers the learned embedding table. """
return self.embedding_table
def get_projection_matrix(self):
""" Recovers the pre-softmax projection matrix which is the inverse of the embedding table. """
return self.projection_matrix
def get_vocab_size(self):
""" Recovers the vocabulary size. """
return self.vocabulary_size
class LayerNormLayer(object):
""" Performs layer normalization by computing the mean and variance used for normalization from all of the
summed inputs to neurons in a layer. """
def __init__(self, dims_out, name=None, eps=1e-5):
if name is None:
name = 'layer_norm'
else:
name = '{:s}_layer_norm'.format(name)
with tf.compat.v1.variable_scope(name, values=[dims_out]):
self.offset = tf.compat.v1.get_variable(name='offset',
shape=[dims_out],
dtype=tf.float32,
initializer=tf.zeros_initializer())
self.scale = tf.compat.v1.get_variable(name='scale',
shape=[dims_out],
dtype=tf.float32,
initializer=tf.compat.v1.ones_initializer())
self.eps = tf.constant(eps)
def forward(self, inputs):
layer_mean, layer_var = tf.nn.moments(x=inputs, axes=-1, keepdims=True)
normalized = tf.add(
tf.multiply(self.scale, tf.math.divide(tf.subtract(inputs, layer_mean),
tf.sqrt(tf.add(layer_var, self.eps)))),
self.offset)
return normalized
class RMSNormLayer(object):
""" Performs root mean square layer normalization by computing root mean square of a layer and normalizing by this, thus re-scaling the layer.
In contrast to layer normalization, no mean re-centering is performed, making this computationally more efficient."""
def __init__(self, dims_out, name=None, eps=1e-5):
if name is None:
name = 'rms_norm'
else:
name = '{:s}_rms_norm'.format(name)
with tf.compat.v1.variable_scope(name, values=[dims_out]):
self.scale = tf.compat.v1.get_variable(name='scale',
shape=[dims_out],
dtype=tf.float32,
initializer=tf.compat.v1.ones_initializer())
self.eps = tf.constant(eps)
def forward(self, inputs):
meansquare = tf.reduce_mean(inputs**2, axis=-1, keepdims=True)
normalized = self.scale * inputs * tf.math.rsqrt(meansquare + self.eps)
return normalized
class ProcessingLayer(object):
""" Optionally applies residual connections, layer normalization, or dropout. """
def __init__(self, out_size, use_layer_norm, dropout_rate, training, name):
# Set attributes
self.use_layer_norm = use_layer_norm
self.training = training
self.name = name
with tf.compat.v1.variable_scope(self.name):
# Initialize layer normalization, if specified
if use_layer_norm is not False and use_layer_norm is not None:
self.layer_norm = use_layer_norm(out_size)
if dropout_rate > 0:
self.dropout = tf.keras.layers.Dropout(rate=dropout_rate)
else:
self.dropout = None
def forward(self, inputs, residual_inputs=None):
with tf.compat.v1.variable_scope(self.name, values=[inputs, residual_inputs], reuse=True):
outputs = inputs
# Apply dropout
if self.dropout is not None:
outputs = self.dropout(inputs, training=self.training)
# Apply residual connections
if residual_inputs is not None:
outputs = outputs + residual_inputs
# Apply layer normalization
if self.use_layer_norm:
outputs = self.layer_norm.forward(outputs)
return outputs
class FeedForwardLayer(object):
""" A single fully-connected feed-forward layer using standard dropout. """
def __init__(self,
in_size,
out_size,
float_dtype,
dropout_rate,
activation,
use_bias,
use_layer_norm,
training,
name):
# Set attributes
self.in_size = in_size
self.out_size = out_size
self.dropout_rate = dropout_rate
self.activation = activation
self.use_bias = use_bias
self.training = training
self.name = name
with tf.compat.v1.variable_scope(self.name):
# Set up layer normalization
if use_layer_norm is not False and use_layer_norm is not None:
self.layer_norm_layer = use_layer_norm(out_size)
else:
self.layer_norm_layer = None
if dropout_rate > 0:
self.dropout = tf.keras.layers.Dropout(rate=dropout_rate)
else:
self.dropout = None
# Define parameters
weights_shape = [in_size, out_size] if out_size is not None else [in_size]
self.weights = tf.compat.v1.get_variable(name='dense_layer_weights',
shape=weights_shape,
dtype=float_dtype,
initializer=glorot_uniform_initializer(),
trainable=True)
if use_bias:
biases_shape = [out_size] if out_size is not None else [in_size]
self.biases = tf.compat.v1.get_variable(name='dense_layer_biases',
shape=biases_shape,
dtype=float_dtype,
initializer=tf.zeros_initializer(),
trainable=True)
def forward(self, inputs):
with tf.compat.v1.variable_scope(self.name, values=[inputs]):
# Optionally apply dropout
if self.dropout is not None:
inputs = self.dropout(inputs, training=self.training)
# Feed through a dense layer
outputs = matmul_nd(inputs, self.weights)
if self.use_bias:
outputs += self.biases
if self.activation is not None:
outputs = self.activation(outputs)
# Optionally apply layer normalization
if self.layer_norm_layer is not None:
outputs = self.layer_norm_layer(outputs)
return outputs
class FeedForwardNetwork(object):
""" A fully connected feed-forward network that is applied to each position separately and identically. """
def __init__(self,
layer_dims,
float_dtype,
use_bias,
activation,
use_layer_norm,
dropout_rate,
training,
name=None):
# Set attributes
self.layer_dims = layer_dims
self.float_dtype = float_dtype
self.use_bias = use_bias
self.activation = activation
self.use_layer_norm = use_layer_norm
self.dropout_rate = dropout_rate
self.training = training
self.name = name
# Container for network layers
self.layers = list()
self._initialize_layers()
def _initialize_layers(self):
""" Builds the network from fully-connected layers. """
num_layers = len(self.layer_dims)
for layer_id in range(num_layers):
# Assure that no non-linearity or dropout is applied at the final layer
if layer_id == num_layers - 1:
layer_activation = None
dropout_rate = 0.0
else:
layer_activation = self.activation
dropout_rate = self.dropout_rate
# Add layer
if layer_id == 0:
input_dims = self.layer_dims[-1] # input and output dimensions of the sub-layer are identical
else:
input_dims = self.layer_dims[layer_id - 1]
self.layers.append(FeedForwardLayer(input_dims,
self.layer_dims[layer_id],
self.float_dtype,
dropout_rate=dropout_rate,
activation=layer_activation,
use_bias=self.use_bias,
use_layer_norm=self.use_layer_norm,
training=self.training,
name='ff_layer_{:d}'.format(layer_id + 1)))
def forward(self, inputs):
""" Propagates input data through the specified layers. """
with tf.compat.v1.variable_scope(self.name, values=[inputs]):
for layer in self.layers:
inputs = layer.forward(inputs)
return inputs
class PReLU(object):
""" Implements the adaptive Parametric Rectified Linear Unit activation function. """
def __init__(self,
in_size,
initial_slope=1.0,
name=None):
with tf.compat.v1.variable_scope(name, default_name='PReLu'):
self.slope = tf.Variable(initial_slope * np.ones((in_size,)).astype('float32'), name='slope')
def forward(self, inputs):
pos = tf.nn.relu(inputs)
neg = inputs - pos
outputs = pos + self.slope * neg
return outputs
class MaskedCrossEntropy(object):
""" Implements the cross-entropy loss with optionally applied label smoothing for better model generalization. """
def __init__(self, vocab_size, label_smoothing_discount, int_dtype, float_dtype, time_major, name=None):
# Set attributes
self.vocab_size = vocab_size
self.label_smoothing_discount = label_smoothing_discount
self.int_dtype = int_dtype
self.float_dtype = float_dtype
self.time_dim = int(not time_major) # i.e. 0 is time_major, 1 if batch_major
self.name = name
def _get_smoothing_parameters(self):
""" Calculates the confidence values used for label smoothing application. """
# Assign low confidence, i.e. the label smoothing discount value, to all non-true labels
one_out_vocab = tf.cast(self.vocab_size - 1, self.float_dtype)
# For cross-entropy, each row of the labels matrix must be a valid probability distribution
low_confidence = self.label_smoothing_discount / one_out_vocab
high_confidence = 1.0 - self.label_smoothing_discount
# Normalizing constant for better readability, which is the best cross-entropy value with soft targets
# Has no impact on training
normalizing_factor = -(1.0 * high_confidence * tf.math.log(high_confidence)
+ one_out_vocab * low_confidence * tf.math.log(low_confidence + 1e-20))
return high_confidence, low_confidence, normalizing_factor
def forward(self, logits, targets, target_mask, training):
with tf.compat.v1.name_scope(self.name, values=[logits, targets, target_mask]):
# Get smoothing parameters (no smoothing/ normalization at test time)
high_confidence, low_confidence, normalizing_factor = \
tf.cond(pred=tf.logical_and(training, tf.greater(self.label_smoothing_discount, 0.0)),
true_fn=self._get_smoothing_parameters,
false_fn=lambda: (1.0, 0.0, 0.0))
# If necessary, pad the label and the label-mask to match the length of decoder output
# Not sure if that's a sensible thing to do
targets_shape = tf.shape(input=targets)
logits_shape = tf.shape(input=logits)
targets_length = targets_shape[self.time_dim]
logits_length = logits_shape[self.time_dim]
def _get_pad_shape(shape_to_pad, shape_to_match):
""" Calculates the shape of the padding to be applied to the logits or targets. """
time_steps_to_pad = shape_to_match[self.time_dim] - shape_to_pad[self.time_dim]
if self.time_dim == 0:
pad_shape = [time_steps_to_pad, shape_to_pad[1]]
else:
pad_shape = [shape_to_pad[0], time_steps_to_pad]
return pad_shape
def _pad_targets(targets, target_mask, logits):
""" Pads the targets to match the size of the model-generated logits. """
pad_shape = _get_pad_shape(targets_shape, logits_shape)
targets = tf.concat([targets, tf.zeros(pad_shape, dtype=self.int_dtype)], axis=self.time_dim)
target_mask = tf.concat([target_mask, tf.zeros(pad_shape, dtype=self.float_dtype)], axis=self.time_dim)
return targets, target_mask, logits
def _pad_logits(targets, target_mask, logits):
""" Pads the logits to match the size of the ground-truth targets. """
pad_shape = _get_pad_shape(logits_shape, targets_shape)
logits = tf.concat([logits, tf.zeros(pad_shape + [logits_shape[-1]], dtype=self.float_dtype)],
axis=self.time_dim)
return targets, target_mask, logits
# For teacher-forcing with RNN models
targets, target_mask, logits = tf.cond(pred=tf.equal(targets_length, logits_length),
true_fn=lambda: (targets, target_mask, logits),
false_fn=lambda: tf.cond(pred=tf.less(targets_length, logits_length),
true_fn=lambda: _pad_targets(targets, target_mask, logits),
false_fn=lambda: _pad_logits(targets, target_mask, logits)))
# Project and optionally smooth target token ids
projected_targets = tf.one_hot(targets,
depth=self.vocab_size,
on_value=high_confidence,
off_value=low_confidence,
dtype=self.float_dtype)
# Compute token-level loss
flat_logits = tf.reshape(logits, [-1, self.vocab_size])
flat_targets = tf.reshape(projected_targets, [-1, self.vocab_size])
flat_loss = tf.nn.softmax_cross_entropy_with_logits(logits=flat_logits, labels=tf.stop_gradient(flat_targets))
flat_normalized_loss = flat_loss - normalizing_factor
# Compute sentence- and batch-level losses (i.e. mean token-loss per sentence/ batch)
normalized_loss = tf.reshape(flat_normalized_loss, tf.shape(input=targets))
masked_loss = normalized_loss * target_mask
sentence_lengths = tf.reduce_sum(input_tensor=target_mask, axis=self.time_dim, keepdims=False)
sentence_loss = tf.math.divide(tf.reduce_sum(input_tensor=masked_loss, axis=self.time_dim, keepdims=False), sentence_lengths)
batch_loss = tf.reduce_mean(input_tensor=sentence_loss, keepdims=False)
return masked_loss, sentence_loss, batch_loss
| {
"repo_name": "EdinburghNLP/nematus",
"path": "nematus/transformer_layers.py",
"copies": "1",
"size": "20686",
"license": "bsd-3-clause",
"hash": -7928485361675858000,
"line_mean": 46.4449541284,
"line_max": 146,
"alpha_frac": 0.5801024848,
"autogenerated": false,
"ratio": 4.253752827472754,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0027155867270864057,
"num_lines": 436
} |
from random import random
from math import log, ceil
class Node(object):
__slots__ = 'value', 'next', 'width'
def __init__(self, value, next, width):
self.value, self.next, self.width = value, next, width
class End(object):
'Sentinel object that always compares greater than another object'
def __cmp__(self, other):
return 1
NIL = Node(End(), [], []) # Singleton terminator node
class IndexableSkiplist:
'Sorted collection supporting O(lg n) insertion, removal, and lookup by rank.'
def __init__(self, expected_size=100):
self.size = 0
self.maxlevels = int(1 + log(expected_size, 2))
self.head = Node('HEAD', [NIL]*self.maxlevels, [1]*self.maxlevels)
def __len__(self):
return self.size
def __getitem__(self, i):
node = self.head
i += 1
for level in reversed(range(self.maxlevels)):
while node.width[level] <= i:
i -= node.width[level]
node = node.next[level]
return node.value
def insert(self, value):
# find first node on each level where node.next[levels].value > value
chain = [None] * self.maxlevels
steps_at_level = [0] * self.maxlevels
node = self.head
for level in reversed(range(self.maxlevels)):
while node.next[level].value <= value:
steps_at_level[level] += node.width[level]
node = node.next[level]
chain[level] = node
# insert a link to the newnode at each level
d = min(self.maxlevels, 1 - int(log(random(), 2.0)))
newnode = Node(value, [None]*d, [None]*d)
steps = 0
for level in range(d):
prevnode = chain[level]
newnode.next[level] = prevnode.next[level]
prevnode.next[level] = newnode
newnode.width[level] = prevnode.width[level] - steps
prevnode.width[level] = steps + 1
steps += steps_at_level[level]
for level in range(d, self.maxlevels):
chain[level].width[level] += 1
self.size += 1
def remove(self, value):
# find first node on each level where node.next[levels].value >= value
chain = [None] * self.maxlevels
node = self.head
for level in reversed(range(self.maxlevels)):
while node.next[level].value < value:
node = node.next[level]
chain[level] = node
if value != chain[0].next[0].value:
raise KeyError('Not Found')
# remove one link at each level
d = len(chain[0].next[0].next)
for level in range(d):
prevnode = chain[level]
prevnode.width[level] += prevnode.next[level].width[level] - 1
prevnode.next[level] = prevnode.next[level].next[level]
for level in range(d, self.maxlevels):
chain[level].width[level] -= 1
self.size -= 1
def __iter__(self):
'Iterate over values in sorted order'
node = self.head.next[0]
while node is not NIL:
yield node.value
node = node.next[0] | {
"repo_name": "isdal/raspberrypi-fan-controller",
"path": "running_median/__init__.py",
"copies": "1",
"size": "3310",
"license": "apache-2.0",
"hash": 8865347572014921000,
"line_mean": 34.9891304348,
"line_max": 82,
"alpha_frac": 0.5734138973,
"autogenerated": false,
"ratio": 3.7401129943502824,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9801332816035349,
"avg_score": 0.0024388151229869097,
"num_lines": 92
} |
# adapted from:
# https://gist.github.com/cliffano/9868180
# https://github.com/petems/ansible-json.git
# https://github.com/jlafon/ansible-profile
# https://github.com/kalosoid/ansible-sumo-logs
import json
import logging
import logging.handlers
import uuid
import platform
import time
from datetime import datetime, timedelta
log = logging.getLogger("ansible")
#fh = logging.FileHandler('sample.log')
#log.addHandler(fh)
def json_log(res, uuid, play, role, task, state):
host = platform.node()
if type(res) == type(dict()):
if 'verbose_override' not in res:
res.update({"host":host})
res.update({"uuid":uuid})
res.update({"play":play})
res.update({"role":role})
res.update({"state":state})
if task != None:
res.update({"task":task})
# print('play: '+dumps(res))
log.info(json.dumps(res, sort_keys=True))
class CallbackModule(object):
start_time = datetime.now()
uuid = None
def __init__(self):
self.node = platform.node()
self.stats = {}
self.current = None
self.role = None
self.task = None
self.play = None
self.uuid = str(uuid.uuid4())
start_time = datetime.now()
def days_hours_minutes_seconds(self, timedelta):
minutes = (timedelta.seconds//60)%60
r_seconds = timedelta.seconds - (minutes * 60)
return timedelta.days, timedelta.seconds//3600, minutes, r_seconds
def on_any(self, *args, **kwargs):
self.play = self.playbook.filename
task = getattr(self, 'task', None)
# if task:
# print "play = %s, role = %s, task = %s, args = %s, kwargs = %s" % (self.play, self.role, self.task,args,kwargs)
def runner_on_failed(self, host, res, ignore_errors=False):
json_log(res, self.uuid, self.play, self.role, self.task,'failed')
def runner_on_ok(self, host, res):
json_log(res, self.uuid, self.play, self.role, self.task, 'ok')
def runner_on_error(self, host, msg, res):
res.update({"error-msg":msg})
json_log(res, self.uuid, self.play, self.role, self.task,'error')
def runner_on_skipped(self, host, item=None):
pass
def runner_on_unreachable(self, host, res):
json_log(res, self.uuid, self.play, self.role, self.task,'unreachable')
def runner_on_no_hosts(self):
pass
def runner_on_async_poll(self, host, res, jid, clock):
json_log(res, self.uuid, self.play, self.role, self.task,'async_poll')
def runner_on_async_ok(self, host, res, jid):
json_log(res, self.uuid, self.play, self.role, self.task,'async_ok')
def runner_on_async_failed(self, host, res, jid):
json_log(res, self.uuid, self.play, self.role, self.task,'async_failed')
def playbook_on_start(self):
pass
def playbook_on_notify(self, host, handler):
pass
def playbook_on_no_hosts_matched(self):
pass
def playbook_on_no_hosts_remaining(self):
pass
def playbook_on_task_start(self, name, is_conditional):
#pass
my_list = name.split("|")
# Check to see if we are processing a role
if len(my_list) == 2:
self.role = str.strip(my_list[0])
self.task = str.strip(my_list[1])
# Check to see if we are procesing a new role. If so, calculate the duration of the previous role
if self.current is not None and self.current != self.role:
self.stats[self.current] = time.time() - self.stats[self.current]
# Check to see if we are processing a new role. If so, start the timer
if self.current is None or self.current != self.role:
self.current = self.role
self.stats[self.current] = time.time()
# We are now processing playbook level tasks
else:
self.task = str.strip(my_list[0])
if self.current != "NULL":
self.stats[self.current] = time.time() - self.stats[self.current]
self.current = "NULL"
def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
pass
def playbook_on_setup(self):
pass
def playbook_on_import_for_host(self, host, imported_file):
pass
def playbook_on_not_import_for_host(self, host, missing_file):
pass
def playbook_on_play_start(self, name):
pass
def playbook_on_stats(self, stats):
self.play = self.playbook.filename
res = dict([(h, stats.summarize(h)) for h in stats.processed])
end_time = datetime.now()
timedelta = end_time - self.start_time
duration = timedelta.total_seconds()
res.update({"start":str(self.start_time)})
res.update({"end":str(end_time)})
res.update({"play_duration":duration})
if self.current is not None and self.current != "NULL":
# Record the timing of the very last task
self.stats[self.current] = time.time() - self.stats[self.current]
res.update({"role_duration":self.stats})
json_log(res, self.uuid, self.play, self.role, None,'Play Completed')
| {
"repo_name": "gadouryd/ansible-to-sumo",
"path": "plugins/callback/sumo_logs.py",
"copies": "3",
"size": "5346",
"license": "mit",
"hash": -7385371099894007000,
"line_mean": 30.8214285714,
"line_max": 144,
"alpha_frac": 0.5993265993,
"autogenerated": false,
"ratio": 3.5616255829447034,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005087559133691735,
"num_lines": 168
} |
# Adapted from
# https://gist.github.com/jtriley/1108174
# pylint: disable=bare-except,unpacking-non-sequence
import os
import shlex
import struct
import platform
import subprocess
def get_terminal_size():
""" getTerminalSize()
- get width and height of console
- works on linux,os x,windows,cygwin(windows)
originally retrieved from:
http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python
"""
current_os = platform.system()
tuple_xy = None
if current_os == 'Windows':
tuple_xy = _get_terminal_size_windows()
if tuple_xy is None:
# needed for window's python in cygwin's xterm
tuple_xy = _get_terminal_size_tput()
if current_os in ['Linux', 'Darwin'] or current_os.startswith('CYGWIN'):
tuple_xy = _get_terminal_size_linux()
if tuple_xy is None or tuple_xy == (0, 0):
tuple_xy = (80, 25) # assume "standard" terminal
return tuple_xy
def _get_terminal_size_windows():
# pylint: disable=unused-variable,redefined-outer-name,too-many-locals
try:
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom,
maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
except:
pass
def _get_terminal_size_tput():
# get terminal width
# src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window
try:
cols = int(subprocess.check_call(shlex.split('tput cols')))
rows = int(subprocess.check_call(shlex.split('tput lines')))
return (cols, rows)
except:
pass
def _get_terminal_size_linux():
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
cr = struct.unpack('hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
return cr
except:
pass
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (os.environ['LINES'], os.environ['COLUMNS'])
except:
return None
return int(cr[1]), int(cr[0])
if __name__ == "__main__":
sizex, sizey = get_terminal_size()
print 'width =', sizex, 'height =', sizey
| {
"repo_name": "chase-qi/workload-automation",
"path": "wlauto/utils/terminalsize.py",
"copies": "4",
"size": "2862",
"license": "apache-2.0",
"hash": -1028641405077686400,
"line_mean": 29.7741935484,
"line_max": 104,
"alpha_frac": 0.5828092243,
"autogenerated": false,
"ratio": 3.5116564417177916,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.014663338453661036,
"num_lines": 93
} |
# Adapted from
# https://github.com/codekansas/keras-language-modeling/blob/master/attention_lstm.py
# Licensed under MIT
from __future__ import absolute_import
import keras
from keras.layers import LSTM, activations
class AttentionLSTM(LSTM):
def __init__(self, output_dim, attention_vec, attn_activation='tanh',
attn_inner_activation='tanh', single_attn=False,
n_attention_dim=None, **kwargs):
self.attention_vec = attention_vec
self.attn_activation = activations.get(attn_activation)
self.attn_inner_activation = activations.get(attn_inner_activation)
self.single_attention_param = single_attn
self.n_attention_dim = output_dim if n_attention_dim is None else n_attention_dim
super(AttentionLSTM, self).__init__(output_dim, **kwargs)
def build(self, input_shape):
super(AttentionLSTM, self).build(input_shape)
if hasattr(self.attention_vec, '_keras_shape'):
attention_dim = self.attention_vec._keras_shape[1]
else:
raise Exception('Layer could not be build: No information about expected input shape.')
self.U_a = self.inner_init((self.output_dim, self.output_dim), name='{}_U_a'.format(self.name))
self.b_a = keras.backend.zeros((self.output_dim,), name='{}_b_a'.format(self.name))
self.U_m = self.inner_init((attention_dim, self.output_dim), name='{}_U_m'.format(self.name))
self.b_m = keras.backend.zeros((self.output_dim,), name='{}_b_m'.format(self.name))
if self.single_attention_param:
self.U_s = self.inner_init((self.output_dim, 1), name='{}_U_s'.format(self.name))
self.b_s = keras.backend.zeros((1,), name='{}_b_s'.format(self.name))
else:
self.U_s = self.inner_init((self.output_dim, self.output_dim), name='{}_U_s'.format(self.name))
self.b_s = keras.backend.zeros((self.output_dim,), name='{}_b_s'.format(self.name))
self.trainable_weights += [self.U_a, self.U_m, self.U_s, self.b_a, self.b_m, self.b_s]
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def step(self, x, states):
h, [h, c] = super(AttentionLSTM, self).step(x, states)
attention = states[4]
m = self.attn_inner_activation(keras.backend.dot(h, self.U_a) * attention + self.b_a)
# Intuitively it makes more sense to use a sigmoid (was getting some NaN problems
# which I think might have been caused by the exponential function -> gradients blow up)
s = self.attn_activation(keras.backend.dot(m, self.U_s) + self.b_s)
if self.single_attention_param:
h = h * keras.backend.repeat_elements(s, self.output_dim, axis=1)
else:
h = h * s
return h, [h, c]
def get_constants(self, x):
constants = super(AttentionLSTM, self).get_constants(x)
constants.append(keras.backend.dot(self.attention_vec, self.U_m) + self.b_m)
return constants
| {
"repo_name": "UKPLab/semeval2017-scienceie",
"path": "code/attention_lstm.py",
"copies": "1",
"size": "3078",
"license": "apache-2.0",
"hash": 967699502250229200,
"line_mean": 44.2647058824,
"line_max": 107,
"alpha_frac": 0.6328784925,
"autogenerated": false,
"ratio": 3.320388349514563,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44532668420145627,
"avg_score": null,
"num_lines": null
} |
# adapted from
# https://github.com/leporo/tornado-redis/blob/master/demos/websockets
from json import loads, dumps
from itertools import chain
import toredis
from tornado.web import authenticated
from tornado.websocket import WebSocketHandler
from tornado.gen import engine, Task
from future.utils import viewvalues
from moi import r_client
from qiita_pet.handlers.base_handlers import BaseHandler
from qiita_db.analysis import Analysis
from qiita_core.util import execute_as_transaction
class MessageHandler(WebSocketHandler):
def __init__(self, *args, **kwargs):
super(MessageHandler, self).__init__(*args, **kwargs)
# The redis server
self.r_client = r_client
# The toredis server that allows event-based message handling
self.toredis = toredis.Client()
self.toredis.connect()
self.channel = None
self.channel_messages = None
def get_current_user(self):
user = self.get_secure_cookie("user")
if user is None:
raise ValueError("No user associated with the websocket!")
else:
return user.strip('" ')
@authenticated
def on_message(self, msg):
# When the websocket receives a message from the javascript client,
# parse into JSON
msginfo = loads(msg)
# Determine which Redis communication channel the server needs to
# listen on
self.channel = msginfo.get('user', None)
if self.channel is not None:
self.channel_messages = '%s:messages' % self.channel
self.listen()
def listen(self):
# Attach a callback on the channel to listen too. This callback is
# executed when anything is placed onto the channel.
self.toredis.subscribe(self.channel, callback=self.callback)
# Potential race-condition where a separate process may have placed
# messages into the queue before we've been able to attach listen.
oldmessages = self.r_client.lrange(self.channel_messages, 0, -1)
if oldmessages is not None:
for message in oldmessages:
self.write_message(message)
def callback(self, msg):
message_type, channel, payload = msg
# if a compute process wrote to the Redis channel that we are
# listening too, and if it is actually a message, send the payload to
# the javascript client via the websocket
if channel == self.channel and message_type == 'message':
self.write_message(payload)
@engine
def on_close(self):
yield Task(self.toredis.unsubscribe, self.channel)
self.r_client.delete('%s:messages' % self.channel)
self.redis.disconnect()
class SelectedSocketHandler(WebSocketHandler, BaseHandler):
"""Websocket for removing samples on default analysis display page"""
@authenticated
@execute_as_transaction
def on_message(self, msg):
# When the websocket receives a message from the javascript client,
# parse into JSON
msginfo = loads(msg)
default = Analysis(self.current_user.default_analysis)
if 'remove_sample' in msginfo:
data = msginfo['remove_sample']
default.remove_samples([data['proc_data']], data['samples'])
elif 'remove_pd' in msginfo:
data = msginfo['remove_pd']
default.remove_samples([data['proc_data']])
elif 'clear' in msginfo:
data = msginfo['clear']
default.remove_samples(data['pids'])
self.write_message(msg)
class SelectSamplesHandler(WebSocketHandler, BaseHandler):
"""Websocket for selecting and deselecting samples on list studies page"""
@authenticated
@execute_as_transaction
def on_message(self, msg):
"""Selects samples on a message from the user
Parameters
----------
msg : JSON str
Message containing sample and prc_data information, in the form
{proc_data_id': [s1, s2, ...], ...]}
"""
msginfo = loads(msg)
default = Analysis(self.current_user.default_analysis)
default.add_samples(msginfo['sel'])
# Count total number of unique samples selected and return
self.write_message(dumps({
'sel': len(set(
chain.from_iterable(s for s in viewvalues(msginfo['sel']))))
}))
| {
"repo_name": "adamrp/qiita",
"path": "qiita_pet/handlers/websocket_handlers.py",
"copies": "1",
"size": "4398",
"license": "bsd-3-clause",
"hash": 3708839880658285600,
"line_mean": 35.0491803279,
"line_max": 78,
"alpha_frac": 0.6457480673,
"autogenerated": false,
"ratio": 4.311764705882353,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 122
} |
# adapted from
# https://github.com/leporo/tornado-redis/blob/master/demos/websockets
from json import loads
import toredis
from tornado.web import authenticated
from tornado.websocket import WebSocketHandler
from tornado.gen import engine, Task
from moi import r_client
class MessageHandler(WebSocketHandler):
def __init__(self, *args, **kwargs):
super(MessageHandler, self).__init__(*args, **kwargs)
# The redis server
self.r_client = r_client
# The toredis server that allows event-based message handling
self.toredis = toredis.Client()
self.toredis.connect()
self.channel = None
self.channel_messages = None
def get_current_user(self):
user = self.get_secure_cookie("user")
if user is None:
raise ValueError("No user associated with the websocket!")
else:
return user.strip('" ')
@authenticated
def on_message(self, msg):
# When the websocket receives a message from the javascript client,
# parse into JSON
msginfo = loads(msg)
# Determine which Redis communication channel the server needs to
# listen on
self.channel = msginfo.get('user', None)
if self.channel is not None:
self.channel_messages = '%s:messages' % self.channel
self.listen()
def listen(self):
# Attach a callback on the channel to listen too. This callback is
# executed when anything is placed onto the channel.
self.toredis.subscribe(self.channel, callback=self.callback)
# Potential race-condition where a separate process may have placed
# messages into the queue before we've been able to attach listen.
oldmessages = self.r_client.lrange(self.channel_messages, 0, -1)
if oldmessages is not None:
for message in oldmessages:
self.write_message(message)
def callback(self, msg):
message_type, channel, payload = msg
# if a compute process wrote to the Redis channel that we are
# listening too, and if it is actually a message, send the payload to
# the javascript client via the websocket
if channel == self.channel and message_type == 'message':
self.write_message(payload)
@engine
def on_close(self):
yield Task(self.toredis.unsubscribe, self.channel)
self.r_client.delete('%s:messages' % self.channel)
self.redis.disconnect()
| {
"repo_name": "wasade/qiita",
"path": "qiita_pet/handlers/websocket_handlers.py",
"copies": "1",
"size": "2506",
"license": "bsd-3-clause",
"hash": -5054827661224869000,
"line_mean": 33.8055555556,
"line_max": 77,
"alpha_frac": 0.6508379888,
"autogenerated": false,
"ratio": 4.335640138408304,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5486478127208304,
"avg_score": null,
"num_lines": null
} |
# Adapted from
# https://github.com/pytorch/pytorch/blob/master/torch/nn/utils/weight_norm.py
# and https://github.com/salesforce/awd-lstm-lm/blob/master/weight_drop.py
import logging
import torch
from torch.nn import Parameter
from functools import wraps
def _norm(p, dim):
"""Computes the norm over all dimensions except dim"""
if dim is None:
return p.norm()
elif dim == 0:
output_size = (p.size(0),) + (1,) * (p.dim() - 1)
return p.contiguous().view(p.size(0), -1).norm(dim=1).view(*output_size)
elif dim == p.dim() - 1:
output_size = (1,) * (p.dim() - 1) + (p.size(-1),)
return p.contiguous().view(-1, p.size(-1)).norm(dim=0).view(*output_size)
else:
return _norm(p.transpose(0, dim), 0).transpose(0, dim)
def _dummy(*args, **kwargs):
# We need to replace flatten_parameters with a nothing function
return
class WeightNorm(torch.nn.Module):
def __init__(self, weights, dim):
super(WeightNorm, self).__init__()
self.weights = weights
self.dim = dim
def compute_weight(self, module, name):
g = getattr(module, name + '_g')
v = getattr(module, name + '_v')
return v * (g / _norm(v, self.dim))
@staticmethod
def apply(module, weights, dim):
# Terrible temporary solution to an issue regarding compacting weights
# re: CUDNN RNN
if issubclass(type(module), torch.nn.RNNBase):
module.flatten_parameters = _dummy
if weights is None: # do for all weight params
weights = [w for w in module._parameters.keys() if 'weight' in w]
fn = WeightNorm(weights, dim)
for name in weights:
if hasattr(module, name):
logging.debug(
'Applying weight norm to {} - {}'.format(str(module), name))
weight = getattr(module, name)
# remove w from parameter list
del module._parameters[name]
# add g and v as new parameters and express w as g/||v|| * v
module.register_parameter(
name + '_g', Parameter(_norm(weight, dim).data))
module.register_parameter(name + '_v', Parameter(weight.data))
setattr(module, name, fn.compute_weight(module, name))
# recompute weight before every forward()
module.register_forward_pre_hook(fn)
return fn
def remove(self, module):
for name in self.weights:
weight = self.compute_weight(module)
delattr(module, name)
del module._parameters[name + '_g']
del module._parameters[name + '_v']
module.register_parameter(name, Parameter(weight.data))
def __call__(self, module, inputs):
for name in self.weights:
setattr(module, name, self.compute_weight(module, name))
def weight_norm(module, weights=None, dim=0):
WeightNorm.apply(module, weights, dim)
return module
if __name__ == '__main__':
import torch.nn as nn
from torch.autograd import Variable
m = weight_norm(nn.LSTM(16, 32, 2)).cuda()
x = Variable(torch.rand(5, 1, 16).cuda())
y = m(x)
print(y)
| {
"repo_name": "eladhoffer/seq2seq.pytorch",
"path": "seq2seq/models/modules/weight_norm.py",
"copies": "1",
"size": "3220",
"license": "mit",
"hash": 685610828713549400,
"line_mean": 33.623655914,
"line_max": 81,
"alpha_frac": 0.5875776398,
"autogenerated": false,
"ratio": 3.7139561707035758,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9799550666418849,
"avg_score": 0.00039662881694527813,
"num_lines": 93
} |
# adapted from
# http://smallshire.org.uk/sufficientlysmall/2010/04/11/\
# a-hindley-milner-type-inference-implementation-in-python/
import gast
from copy import deepcopy
from numpy import floating, integer, complexfloating
from pythran.tables import MODULES, attributes
import pythran.typing as typing
from pythran.syntax import PythranSyntaxError
from pythran.utils import isnum
class PythranTypeError(PythranSyntaxError):
"A new type to distinguish general syntax errors from typing issues"
class InferenceError(Exception):
"Raised if the type inference algorithm cannot infer types successfully"
symbol_of = {
gast.And: 'and',
gast.Or: 'or',
gast.Add: '+',
gast.Sub: '-',
gast.Mult: '*',
gast.Div: '/',
gast.Mod: '%',
gast.Pow: '**',
gast.LShift: '<<',
gast.RShift: '>>',
gast.BitOr: '|',
gast.BitXor: '^',
gast.BitAnd: '&',
gast.FloorDiv: '//',
gast.Invert: '~',
gast.MatMult: '@',
gast.Not: '!',
gast.UAdd: '+',
gast.USub: '-',
}
NoneType_ = type(None)
# =======================================================#
# Types and type constructors
class TypeVariable(object):
"""A type variable standing for an arbitrary type.
All type variables have a unique id, but names are only assigned lazily,
when required.
"""
_cached_names = {}
def __init__(self):
self.instance = None
self.name = None
def __str__(self):
if self.instance:
return str(self.instance)
else:
return 'T{}'.format(
TypeVariable._cached_names.setdefault(
self,
len(TypeVariable._cached_names)
)
)
class TypeOperator(object):
"""An n-ary type constructor which builds a new type from old"""
def __init__(self, name, types):
self.name = name
self.types = types
def __str__(self):
num_types = len(self.types)
if num_types == 0:
return self.name
elif self.name == 'fun':
return 'Callable[[{0}], {1}]'.format(
', '.join(map(str, self.types[:-1])), self.types[-1])
elif self.name == 'option':
return 'Option[{0}]'.format(self.types[0])
else:
return "{0}[{1}]" .format(self.name.capitalize(),
', '.join(map(str, self.types)))
class Collection(TypeOperator):
def __init__(self, holder_type, key_type, value_type, iter_type):
super(Collection, self).__init__("collection",
[holder_type, key_type, value_type,
iter_type])
def __str__(self):
t0 = prune(self.types[0])
if isinstance(t0, TypeVariable):
if isinstance(prune(self.types[1]), TypeVariable):
return 'Iterable[{}]'.format(self.types[3])
else:
return 'Collection[{}, {}]'.format(self.types[1],
self.types[2])
if isinstance(t0, TypeOperator) and t0.name == 'traits':
if all(isinstance(prune(t), TypeVariable) for t in t0.types):
return 'Collection[{}, {}]'.format(self.types[1],
self.types[2])
elif all(isinstance(prune(t), TypeVariable)
for t in t0.types[:1] + t0.types[2:]):
t01 = prune(t0.types[1])
if isinstance(t01, TypeOperator) and t01.name == LenTrait.name:
return 'Sized'
t00 = prune(t0.types[0])
if isinstance(t00, TypeOperator):
type_trait = t00.name
if type_trait == 'list':
return 'List[{}]'.format(self.types[2])
if type_trait == 'set':
return 'Set[{}]'.format(self.types[2])
if type_trait == 'dict':
return 'Dict[{}, {}]'.format(self.types[1], self.types[2])
if type_trait == 'str':
return 'str'
if type_trait == 'file':
return 'IO[str]'
if type_trait == 'tuple':
return 'Tuple[{}]'.format(', '.join(map(str, self.types[1:])))
if type_trait == 'array':
t01 = prune(t0.types[1])
hasnolen = (isinstance(t01, TypeOperator) and
t01.name == NoLenTrait.name)
if hasnolen:
return str(self.types[2])
def rec(n):
pn = prune(n)
if isinstance(pn, Collection):
traits = prune(pn.types[0])
# a scalar or array?
if isinstance(traits, TypeVariable):
return pn.types[3], 0
len_trait = prune(traits.types[1])
# an array?
haslen = (isinstance(len_trait, TypeOperator) and
len_trait.name == LenTrait.name)
if haslen:
t, n = rec(pn.types[3])
return t, n + 1
# a scalar or array?
else:
return pn.types[2], 0
else:
return pn, 0
t, n = rec(self)
if isinstance(t, TypeVariable):
return 'Array[{} d+, {}]'.format(n, t)
else:
return 'Array[{}d, {}]'.format(n, t)
if type_trait == 'gen':
return 'Generator[{}]'.format(self.types[2])
return super(Collection, self).__str__()
def TupleTrait(of_types):
return TypeOperator('tuple', of_types)
ListTrait = TypeOperator('list', [])
SetTrait = TypeOperator('set', [])
DictTrait = TypeOperator('dict', [])
StrTrait = TypeOperator('str', [])
FileTrait = TypeOperator('file', [])
ArrayTrait = TypeOperator('array', [])
GenerableTrait = TypeOperator('gen', [])
LenTrait = TypeOperator("len", [])
NoLenTrait = TypeOperator("no_len", [])
SliceTrait = TypeOperator("slice", [])
NoSliceTrait = TypeOperator("no_slice", [])
def File():
return Collection(Traits([FileTrait, NoLenTrait, NoSliceTrait]),
InvalidKey, Str(), Str())
def List(of_type):
return Collection(Traits([ListTrait, LenTrait, SliceTrait]),
Integer(), of_type, of_type)
def Set(of_type):
return Collection(Traits([SetTrait, LenTrait, NoSliceTrait]),
InvalidKey, of_type, of_type)
def Dict(key_type, value_type):
return Collection(Traits([DictTrait, LenTrait, NoSliceTrait]),
key_type, value_type, key_type)
def Str(rec=6):
Next = Str(rec - 1) if rec else TypeVariable()
return Collection(Traits([StrTrait, LenTrait, SliceTrait]),
Integer(),
Next,
Next)
def Array(of_type, dim):
return Collection(Traits([ArrayTrait, LenTrait, SliceTrait]),
AnyType,
AnyType,
Array(of_type, dim - 1) if dim > 1 else of_type)
def Iterable(of_type, dim):
return Collection(Traits([TypeVariable(), LenTrait, SliceTrait]),
AnyType,
AnyType,
Iterable(of_type, dim - 1) if dim > 1 else of_type)
def Generator(of_type):
return Collection(Traits([GenerableTrait, NoLenTrait, NoSliceTrait]),
InvalidKey, of_type, of_type)
def Tuple(of_types):
return Collection(Traits([TupleTrait(of_types), LenTrait, SliceTrait]),
Integer(), TypeVariable(), TypeVariable())
class Scalar(TypeOperator):
def __init__(self, types=None):
if not isinstance(types, list):
dtype = types
if dtype == 'complex':
types = [ComplexTrait, TypeVariable(),
TypeVariable(), TypeVariable()]
elif dtype == 'float':
types = [TypeVariable(), FloatTrait,
TypeVariable(), TypeVariable()]
elif dtype == 'int':
types = [TypeVariable(), TypeVariable(),
IntegerTrait, TypeVariable()]
elif dtype == 'bool':
types = [TypeVariable(), TypeVariable(),
TypeVariable(), BoolTrait]
else:
assert dtype is None
types = [TypeVariable(), TypeVariable(),
TypeVariable(), TypeVariable()]
super(Scalar, self).__init__('scalar', types)
def __str__(self):
if isinstance(prune(self.types[0]), TypeOperator):
return 'complex'
if isinstance(prune(self.types[1]), TypeOperator):
return 'float'
if isinstance(prune(self.types[2]), TypeOperator):
return 'int'
if isinstance(prune(self.types[3]), TypeOperator):
return 'bool'
return 'Scalar'
def Complex():
return Collection(Traits([ArrayTrait, NoLenTrait, NoSliceTrait]),
InvalidKey, Scalar('complex'), InvalidKey)
def Float():
return Collection(Traits([ArrayTrait, NoLenTrait, NoSliceTrait]),
InvalidKey, Scalar('float'), InvalidKey)
def Integer():
return Collection(Traits([ArrayTrait, NoLenTrait, NoSliceTrait]),
InvalidKey, Scalar('int'), InvalidKey)
def Bool():
return Collection(Traits([ArrayTrait, NoLenTrait, NoSliceTrait]),
InvalidKey, Scalar('bool'), InvalidKey)
def DType():
return Collection(Traits([ArrayTrait, NoLenTrait, NoSliceTrait]),
InvalidKey, Scalar(), InvalidKey)
def Function(from_types, to_type):
"""A binary type constructor which builds function types"""
return TypeOperator('fun', list(from_types) + [to_type])
def OptionType(of_type):
return TypeOperator("option", [of_type])
def Traits(of_types):
return TypeOperator("traits", of_types)
ExceptionType = TypeOperator("exception", [])
# Basic types are constructed with a null type constructor
IntegerTrait = TypeOperator("int", []) # any integer
FloatTrait = TypeOperator("float", []) # any float
ComplexTrait = TypeOperator("complex", [])
BoolTrait = TypeOperator("bool", [])
InvalidKey = TypeOperator("invalid-key", []) # for non-indexable collection
NoneType = TypeOperator("none", [])
AnyType = TypeOperator("any", [])
InvalidType = TypeOperator("invalid-type", [])
Slice = TypeOperator("slice", []) # slice
def is_none(t):
pt = prune(t)
return isinstance(pt, TypeOperator) and pt.name == "none"
def is_option_type(t):
pt = prune(t)
return isinstance(pt, TypeOperator) and pt.name == "option"
def maybe_array_type(t):
pt = prune(t)
if isinstance(pt, TypeVariable):
return True # maybe an array :-/
if isinstance(pt, TypeOperator) and pt.name == "collection":
st = prune(pt.types[0])
if isinstance(st, TypeOperator) and st.name == "traits":
tt = prune(st.types[0])
if isinstance(tt, TypeVariable):
return True # maybe
return isinstance(tt, TypeOperator) and tt.name == "array"
return False
def is_test_is_none(node):
if not isinstance(node, gast.Compare):
return False
left = node.left
comparators = node.comparators
ops = node.ops
if len(ops) != 1:
return False
op = ops[0]
if type(op) not in (gast.Is, gast.Eq):
return False
comparator = comparators[0]
if not isinstance(comparator, gast.Attribute):
return False
return comparator.attr == 'None' and isinstance(left, gast.Name)
def is_tuple_type(t):
pt = prune(t)
if isinstance(pt, TypeOperator) and pt.name == "collection":
st = prune(pt.types[0])
if isinstance(st, TypeOperator) and st.name == "traits":
tt = prune(st.types[0])
return isinstance(tt, TypeOperator) and tt.name == "tuple"
return False
def is_getattr(node):
if not isinstance(node, gast.Call):
return False
if not isinstance(node.func, gast.Attribute):
return False
return node.func.attr == 'getattr'
class MultiType(object):
"""A binary type constructor which builds function types"""
def __init__(self, types):
self.name = 'multitype'
self.types = types
def __str__(self):
return '\n'.join(sorted(map(str, self.types)))
def tr(t):
def rec_tr(t, env):
if isinstance(t, typing.TypeVar):
if t in env:
return env[t]
else:
env[t] = TypeVariable()
return env[t]
elif t is typing.Any:
return TypeVariable()
elif isinstance(t, NoneType_):
return NoneType
elif t is bool:
return Bool()
elif issubclass(t, slice):
return Slice
elif issubclass(t, (complex, complexfloating)):
return Complex()
elif issubclass(t, (float, floating)):
return Float()
elif issubclass(t, (int, integer)):
return Integer()
elif issubclass(t, NoneType_):
return NoneType
elif t is str:
return Str()
elif isinstance(t, typing.Generator):
return Generator(rec_tr(t.__args__[0], env))
elif isinstance(t, typing.List):
return List(rec_tr(t.__args__[0], env))
elif isinstance(t, typing.Optional):
return OptionType(rec_tr(t.__args__[0], env))
elif isinstance(t, typing.Set):
return Set(rec_tr(t.__args__[0], env))
elif isinstance(t, typing.Dict):
return Dict(rec_tr(t.__args__[0], env), rec_tr(t.__args__[1], env))
elif isinstance(t, typing.Tuple):
return Tuple([rec_tr(tp, env) for tp in t.__args__])
elif isinstance(t, typing.NDArray):
return Array(rec_tr(t.__args__[0], env), len(t.__args__[1:]))
elif isinstance(t, typing.Pointer):
return Array(rec_tr(t.__args__[0], env), 1)
elif isinstance(t, typing.Union):
return MultiType([rec_tr(ut, env) for ut in t.__args__])
elif t is typing.File:
return File()
elif isinstance(t, typing.Iterable):
return Collection(TypeVariable(), TypeVariable(), TypeVariable(),
rec_tr(t.__args__[0], env))
elif t is typing.Sized:
return Collection(
Traits([TypeVariable(), LenTrait, TypeVariable()]),
TypeVariable(), TypeVariable(), TypeVariable()
)
elif isinstance(t, typing.Fun):
return Function([rec_tr(at, env) for at in t.__args__[:-1]],
rec_tr(t.__args__[-1], env))
else:
raise NotImplementedError(t)
if isinstance(t, dict):
return t
elif hasattr(t, 'signature'):
return rec_tr(t.signature, {})
else:
return rec_tr(t, {})
####
def analyse_body(body, env, non_generic):
# first step to gather global symbols
for stmt in body:
if isinstance(stmt, gast.FunctionDef):
new_type = TypeVariable()
env[stmt.name] = new_type
# second to perform local inference
for stmt in body:
analyse(stmt, env, non_generic)
class HasYield(gast.NodeVisitor):
def __init__(self):
super(HasYield, self).__init__()
self.has_yield = False
def visit_FunctionDef(self, node):
pass
def visit_Yield(self, node):
self.has_yield = True
def analyse(node, env, non_generic=None):
"""Computes the type of the expression given by node.
The type of the node is computed in the context of the context of the
supplied type environment env. Data types can be introduced into the
language simply by having a predefined set of identifiers in the initial
environment. Environment; this way there is no need to change the syntax
or more importantly, the type-checking program when extending the language.
Args:
node: The root of the abstract syntax tree.
env: The type environment is a mapping of expression identifier names
to type assignments.
non_generic: A set of non-generic variables, or None
Returns:
The computed type of the expression.
Raises:
InferenceError: The type of the expression could not be inferred,
PythranTypeError: InferenceError with user friendly message + location
"""
if non_generic is None:
non_generic = set()
# expr
if isinstance(node, gast.Name):
if isinstance(node.ctx, (gast.Store)):
new_type = TypeVariable()
non_generic.add(new_type)
env[node.id] = new_type
return get_type(node.id, env, non_generic)
elif isinstance(node, gast.Constant):
if isinstance(node.value, str):
return Str()
elif isinstance(node.value, int):
return Integer()
elif isinstance(node.value, float):
return Float()
elif isinstance(node.value, complex):
return Complex()
elif node.value is None:
return NoneType
else:
raise NotImplementedError
elif isinstance(node, gast.Compare):
left_type = analyse(node.left, env, non_generic)
comparators_type = [analyse(comparator, env, non_generic)
for comparator in node.comparators]
ops_type = [analyse(op, env, non_generic)
for op in node.ops]
prev_type = left_type
result_type = TypeVariable()
for op_type, comparator_type in zip(ops_type, comparators_type):
try:
unify(Function([prev_type, comparator_type], result_type),
op_type)
prev_type = comparator_type
except InferenceError:
raise PythranTypeError(
"Invalid comparison, between `{}` and `{}`".format(
prev_type,
comparator_type
),
node)
return result_type
elif isinstance(node, gast.Call):
if is_getattr(node):
self_type = analyse(node.args[0], env, non_generic)
attr_name = node.args[1].value
_, attr_signature = attributes[attr_name]
attr_type = tr(attr_signature)
result_type = TypeVariable()
try:
unify(Function([self_type], result_type), attr_type)
except InferenceError:
if isinstance(prune(attr_type), MultiType):
msg = 'no attribute found, tried:\n{}'.format(attr_type)
else:
msg = 'tried {}'.format(attr_type)
raise PythranTypeError(
"Invalid attribute for getattr call with self"
"of type `{}`, {}".format(self_type, msg), node)
else:
fun_type = analyse(node.func, env, non_generic)
arg_types = [analyse(arg, env, non_generic) for arg in node.args]
result_type = TypeVariable()
try:
unify(Function(arg_types, result_type), fun_type)
except InferenceError:
# recover original type
fun_type = analyse(node.func, env, non_generic)
if isinstance(prune(fun_type), MultiType):
msg = 'no overload found, tried:\n{}'.format(fun_type)
else:
msg = 'tried {}'.format(fun_type)
raise PythranTypeError(
"Invalid argument type for function call to "
"`Callable[[{}], ...]`, {}"
.format(', '.join('{}'.format(at) for at in arg_types),
msg),
node)
return result_type
elif isinstance(node, gast.IfExp):
test_type = analyse(node.test, env, non_generic)
unify(Function([test_type], Bool()),
tr(MODULES['builtins']['bool']))
if is_test_is_none(node.test):
none_id = node.test.left.id
body_env = env.copy()
body_env[none_id] = NoneType
else:
none_id = None
body_env = env
body_type = analyse(node.body, body_env, non_generic)
if none_id:
orelse_env = env.copy()
if is_option_type(env[none_id]):
orelse_env[none_id] = prune(env[none_id]).types[0]
else:
orelse_env[none_id] = TypeVariable()
else:
orelse_env = env
orelse_type = analyse(node.orelse, orelse_env, non_generic)
try:
return merge_unify(body_type, orelse_type)
except InferenceError:
raise PythranTypeError(
"Incompatible types from different branches:"
"`{}` and `{}`".format(
body_type,
orelse_type
),
node
)
elif isinstance(node, gast.UnaryOp):
operand_type = analyse(node.operand, env, non_generic)
op_type = analyse(node.op, env, non_generic)
result_type = TypeVariable()
try:
unify(Function([operand_type], result_type), op_type)
return result_type
except InferenceError:
raise PythranTypeError(
"Invalid operand for `{}`: `{}`".format(
symbol_of[type(node.op)],
operand_type
),
node
)
elif isinstance(node, gast.BinOp):
left_type = analyse(node.left, env, non_generic)
op_type = analyse(node.op, env, non_generic)
right_type = analyse(node.right, env, non_generic)
result_type = TypeVariable()
try:
unify(Function([left_type, right_type], result_type), op_type)
except InferenceError:
raise PythranTypeError(
"Invalid operand for `{}`: `{}` and `{}`".format(
symbol_of[type(node.op)],
left_type,
right_type),
node
)
return result_type
elif isinstance(node, gast.Pow):
return tr(MODULES['numpy']['power'])
elif isinstance(node, gast.Sub):
return tr(MODULES['operator']['sub'])
elif isinstance(node, (gast.USub, gast.UAdd)):
return tr(MODULES['operator']['pos'])
elif isinstance(node, (gast.Eq, gast.NotEq, gast.Lt, gast.LtE, gast.Gt,
gast.GtE, gast.Is, gast.IsNot)):
return tr(MODULES['operator']['eq'])
elif isinstance(node, (gast.In, gast.NotIn)):
contains_sig = tr(MODULES['operator']['contains'])
contains_sig.types[:-1] = reversed(contains_sig.types[:-1])
return contains_sig
elif isinstance(node, gast.Add):
return tr(MODULES['operator']['add'])
elif isinstance(node, gast.Mult):
return tr(MODULES['operator']['mul'])
elif isinstance(node, gast.MatMult):
return tr(MODULES['operator']['matmul'])
elif isinstance(node, (gast.Div, gast.FloorDiv)):
return tr(MODULES['operator']['floordiv'])
elif isinstance(node, gast.Mod):
return tr(MODULES['operator']['mod'])
elif isinstance(node, (gast.LShift, gast.RShift)):
return tr(MODULES['operator']['lshift'])
elif isinstance(node, (gast.BitXor, gast.BitAnd, gast.BitOr)):
return tr(MODULES['operator']['lshift'])
elif isinstance(node, gast.List):
new_type = TypeVariable()
for elt in node.elts:
elt_type = analyse(elt, env, non_generic)
try:
unify(new_type, elt_type)
except InferenceError:
raise PythranTypeError(
"Incompatible list element type `{}` and `{}`".format(
new_type, elt_type),
node
)
return List(new_type)
elif isinstance(node, gast.Set):
new_type = TypeVariable()
for elt in node.elts:
elt_type = analyse(elt, env, non_generic)
try:
unify(new_type, elt_type)
except InferenceError:
raise PythranTypeError(
"Incompatible set element type `{}` and `{}`".format(
new_type, elt_type),
node
)
return Set(new_type)
elif isinstance(node, gast.Dict):
new_key_type = TypeVariable()
for key in node.keys:
key_type = analyse(key, env, non_generic)
try:
unify(new_key_type, key_type)
except InferenceError:
raise PythranTypeError(
"Incompatible dict key type `{}` and `{}`".format(
new_key_type, key_type),
node
)
new_value_type = TypeVariable()
for value in node.values:
value_type = analyse(value, env, non_generic)
try:
unify(new_value_type, value_type)
except InferenceError:
raise PythranTypeError(
"Incompatible dict value type `{}` and `{}`".format(
new_value_type, value_type),
node
)
return Dict(new_key_type, new_value_type)
elif isinstance(node, gast.Tuple):
return Tuple([analyse(elt, env, non_generic) for elt in node.elts])
elif isinstance(node, gast.Slice):
def unify_int_or_none(t, name):
try:
unify(t, Integer())
except InferenceError:
try:
unify(t, NoneType)
except InferenceError:
raise PythranTypeError(
"Invalid slice {} type `{}`, expecting int or None"
.format(name, t)
)
if node.lower:
lower_type = analyse(node.lower, env, non_generic)
unify_int_or_none(lower_type, 'lower bound')
else:
lower_type = Integer()
if node.upper:
upper_type = analyse(node.upper, env, non_generic)
unify_int_or_none(upper_type, 'upper bound')
else:
upper_type = Integer()
if node.step:
step_type = analyse(node.step, env, non_generic)
unify_int_or_none(step_type, 'step')
else:
step_type = Integer()
return Slice
elif isinstance(node, gast.Subscript):
new_type = TypeVariable()
value_type = prune(analyse(node.value, env, non_generic))
try:
slice_type = prune(analyse(node.slice, env, non_generic))
except PythranTypeError as e:
raise PythranTypeError(e.msg, node)
if isinstance(node.slice, gast.Tuple):
nbslice = len(node.slice.elts)
dtype = TypeVariable()
try:
unify(Array(dtype, nbslice), clone(value_type))
except InferenceError:
raise PythranTypeError(
"Dimension mismatch when slicing `{}`".format(value_type),
node)
return TypeVariable() # FIXME
else:
# handle tuples in a special way
num = isnum(node.slice)
if num and is_tuple_type(value_type):
try:
unify(prune(prune(value_type.types[0]).types[0])
.types[node.slice.value],
new_type)
return new_type
except IndexError:
raise PythranTypeError(
"Invalid tuple indexing, "
"out-of-bound index `{}` for type `{}`".format(
node.slice.value,
value_type),
node)
try:
unify(tr(MODULES['operator']['getitem']),
Function([value_type, slice_type], new_type))
except InferenceError:
raise PythranTypeError(
"Invalid subscripting of `{}` by `{}`".format(
value_type,
slice_type),
node)
return new_type
return new_type
elif isinstance(node, gast.Attribute):
from pythran.utils import attr_to_path
obj, path = attr_to_path(node)
if obj.signature is typing.Any:
return TypeVariable()
else:
return tr(obj)
# stmt
elif isinstance(node, gast.Import):
for alias in node.names:
if alias.name not in MODULES:
raise NotImplementedError("unknown module: %s " % alias.name)
if alias.asname is None:
target = alias.name
else:
target = alias.asname
env[target] = tr(MODULES[alias.name])
return env
elif isinstance(node, gast.ImportFrom):
if node.module not in MODULES:
raise NotImplementedError("unknown module: %s" % node.module)
for alias in node.names:
if alias.name not in MODULES[node.module]:
raise NotImplementedError(
"unknown function: %s in %s" % (alias.name, node.module))
if alias.asname is None:
target = alias.name
else:
target = alias.asname
env[target] = tr(MODULES[node.module][alias.name])
return env
elif isinstance(node, gast.FunctionDef):
ftypes = []
for i in range(1 + len(node.args.defaults)):
new_env = env.copy()
new_non_generic = non_generic.copy()
# reset return special variables
new_env.pop('@ret', None)
new_env.pop('@gen', None)
hy = HasYield()
for stmt in node.body:
hy.visit(stmt)
new_env['@gen'] = hy.has_yield
arg_types = []
istop = len(node.args.args) - i
for arg in node.args.args[:istop]:
arg_type = TypeVariable()
new_env[arg.id] = arg_type
new_non_generic.add(arg_type)
arg_types.append(arg_type)
for arg, expr in zip(node.args.args[istop:],
node.args.defaults[-i:]):
arg_type = analyse(expr, new_env, new_non_generic)
new_env[arg.id] = arg_type
analyse_body(node.body, new_env, new_non_generic)
result_type = new_env.get('@ret', NoneType)
if new_env['@gen']:
result_type = Generator(result_type)
ftype = Function(arg_types, result_type)
ftypes.append(ftype)
if len(ftypes) == 1:
ftype = ftypes[0]
env[node.name] = ftype
else:
env[node.name] = MultiType(ftypes)
return env
elif isinstance(node, gast.Module):
analyse_body(node.body, env, non_generic)
return env
elif isinstance(node, (gast.Pass, gast.Break, gast.Continue)):
return env
elif isinstance(node, gast.Expr):
analyse(node.value, env, non_generic)
return env
elif isinstance(node, gast.Delete):
for target in node.targets:
if isinstance(target, gast.Name):
if target.id in env:
del env[target.id]
else:
raise PythranTypeError(
"Invalid del: unbound identifier `{}`".format(
target.id),
node)
else:
analyse(target, env, non_generic)
return env
elif isinstance(node, gast.Print):
if node.dest is not None:
analyse(node.dest, env, non_generic)
for value in node.values:
analyse(value, env, non_generic)
return env
elif isinstance(node, gast.Assign):
defn_type = analyse(node.value, env, non_generic)
for target in node.targets:
target_type = analyse(target, env, non_generic)
try:
unify(target_type, defn_type)
except InferenceError:
raise PythranTypeError(
"Invalid assignment from type `{}` to type `{}`".format(
target_type,
defn_type),
node)
return env
elif isinstance(node, gast.AugAssign):
# FIMXE: not optimal: evaluates type of node.value twice
fake_target = deepcopy(node.target)
fake_target.ctx = gast.Load()
fake_op = gast.BinOp(fake_target, node.op, node.value)
gast.copy_location(fake_op, node)
res_type = analyse(fake_op, env, non_generic)
target_type = analyse(node.target, env, non_generic)
try:
unify(target_type, res_type)
except InferenceError:
raise PythranTypeError(
"Invalid update operand for `{}`: `{}` and `{}`".format(
symbol_of[type(node.op)],
res_type,
target_type
),
node
)
return env
elif isinstance(node, gast.Raise):
return env # TODO
elif isinstance(node, gast.Return):
if env['@gen']:
return env
if node.value is None:
ret_type = NoneType
else:
ret_type = analyse(node.value, env, non_generic)
if '@ret' in env:
try:
ret_type = merge_unify(env['@ret'], ret_type)
except InferenceError:
raise PythranTypeError(
"function may returns with incompatible types "
"`{}` and `{}`".format(env['@ret'], ret_type),
node
)
env['@ret'] = ret_type
return env
elif isinstance(node, gast.Yield):
assert env['@gen']
assert node.value is not None
if node.value is None:
ret_type = NoneType
else:
ret_type = analyse(node.value, env, non_generic)
if '@ret' in env:
try:
ret_type = merge_unify(env['@ret'], ret_type)
except InferenceError:
raise PythranTypeError(
"function may yields incompatible types "
"`{}` and `{}`".format(env['@ret'], ret_type),
node
)
env['@ret'] = ret_type
return env
elif isinstance(node, gast.For):
iter_type = analyse(node.iter, env, non_generic)
target_type = analyse(node.target, env, non_generic)
unify(Collection(TypeVariable(), TypeVariable(), TypeVariable(),
target_type),
iter_type)
analyse_body(node.body, env, non_generic)
analyse_body(node.orelse, env, non_generic)
return env
elif isinstance(node, gast.If):
test_type = analyse(node.test, env, non_generic)
unify(Function([test_type], Bool()),
tr(MODULES['builtins']['bool']))
body_env = env.copy()
body_non_generic = non_generic.copy()
if is_test_is_none(node.test):
none_id = node.test.left.id
body_env[none_id] = NoneType
else:
none_id = None
analyse_body(node.body, body_env, body_non_generic)
orelse_env = env.copy()
orelse_non_generic = non_generic.copy()
if none_id:
if is_option_type(env[none_id]):
orelse_env[none_id] = prune(env[none_id]).types[0]
else:
orelse_env[none_id] = TypeVariable()
analyse_body(node.orelse, orelse_env, orelse_non_generic)
for var in body_env:
if var not in env:
if var in orelse_env:
try:
new_type = merge_unify(body_env[var], orelse_env[var])
except InferenceError:
raise PythranTypeError(
"Incompatible types from different branches for "
"`{}`: `{}` and `{}`".format(
var,
body_env[var],
orelse_env[var]
),
node
)
else:
new_type = body_env[var]
env[var] = new_type
for var in orelse_env:
if var not in env:
# may not be unified by the prev loop if a del occured
if var in body_env:
new_type = merge_unify(orelse_env[var], body_env[var])
else:
new_type = orelse_env[var]
env[var] = new_type
if none_id:
try:
new_type = merge_unify(body_env[none_id], orelse_env[none_id])
except InferenceError:
msg = ("Inconsistent types while merging values of `{}` from "
"conditional branches: `{}` and `{}`")
err = msg.format(none_id,
body_env[none_id],
orelse_env[none_id])
raise PythranTypeError(err, node)
env[none_id] = new_type
return env
elif isinstance(node, gast.While):
test_type = analyse(node.test, env, non_generic)
unify(Function([test_type], Bool()),
tr(MODULES['builtins']['bool']))
analyse_body(node.body, env, non_generic)
analyse_body(node.orelse, env, non_generic)
return env
elif isinstance(node, gast.Try):
analyse_body(node.body, env, non_generic)
for handler in node.handlers:
analyse(handler, env, non_generic)
analyse_body(node.orelse, env, non_generic)
analyse_body(node.finalbody, env, non_generic)
return env
elif isinstance(node, gast.ExceptHandler):
if(node.name):
new_type = ExceptionType
non_generic.add(new_type)
if node.name.id in env:
unify(env[node.name.id], new_type)
else:
env[node.name.id] = new_type
analyse_body(node.body, env, non_generic)
return env
elif isinstance(node, gast.Assert):
if node.msg:
analyse(node.msg, env, non_generic)
analyse(node.test, env, non_generic)
return env
elif isinstance(node, gast.UnaryOp):
operand_type = analyse(node.operand, env, non_generic)
return_type = TypeVariable()
op_type = analyse(node.op, env, non_generic)
unify(Function([operand_type], return_type), op_type)
return return_type
elif isinstance(node, gast.Invert):
return MultiType([Function([Bool()], Integer()),
Function([Integer()], Integer())])
elif isinstance(node, gast.Not):
return tr(MODULES['builtins']['bool'])
elif isinstance(node, gast.BoolOp):
op_type = analyse(node.op, env, non_generic)
value_types = [analyse(value, env, non_generic)
for value in node.values]
for value_type in value_types:
unify(Function([value_type], Bool()),
tr(MODULES['builtins']['bool']))
return_type = TypeVariable()
prev_type = value_types[0]
for value_type in value_types[1:]:
unify(Function([prev_type, value_type], return_type), op_type)
prev_type = value_type
return return_type
elif isinstance(node, (gast.And, gast.Or)):
x_type = TypeVariable()
return MultiType([
Function([x_type, x_type], x_type),
Function([TypeVariable(), TypeVariable()], TypeVariable()),
])
raise RuntimeError("Unhandled syntax node {0}".format(type(node)))
def get_type(name, env, non_generic):
"""Get the type of identifier name from the type environment env.
Args:
name: The identifier name
env: The type environment mapping from identifier names to types
non_generic: A set of non-generic TypeVariables
Raises:
ParseError: Raised if name is an undefined symbol in the type
environment.
"""
if name in env:
if isinstance(env[name], MultiType):
return clone(env[name])
return fresh(env[name], non_generic)
else:
print("W: Undefined symbol {0}".format(name))
return TypeVariable()
def fresh(t, non_generic):
"""Makes a copy of a type expression.
The type t is copied. The generic variables are duplicated and the
non_generic variables are shared.
Args:
t: A type to be copied.
non_generic: A set of non-generic TypeVariables
"""
mappings = {} # A mapping of TypeVariables to TypeVariables
def freshrec(tp):
p = prune(tp)
if isinstance(p, TypeVariable):
if is_generic(p, non_generic):
if p not in mappings:
mappings[p] = TypeVariable()
return mappings[p]
else:
return p
elif isinstance(p, dict):
return p # module
elif isinstance(p, Collection):
return Collection(*[freshrec(x) for x in p.types])
elif isinstance(p, Scalar):
return Scalar([freshrec(x) for x in p.types])
elif isinstance(p, TypeOperator):
return TypeOperator(p.name, [freshrec(x) for x in p.types])
elif isinstance(p, MultiType):
return MultiType([freshrec(x) for x in p.types])
else:
assert False, "missing freshrec case {}".format(type(p))
return freshrec(t)
def clone(t):
if isinstance(t, MultiType):
return MultiType([clone(tp) for tp in t.types])
else:
return fresh(t, {})
def unify(t1, t2):
"""Unify the two types t1 and t2.
Makes the types t1 and t2 the same.
Args:
t1: The first type to be made equivalent
t2: The second type to be be equivalent
Returns:
None
Raises:
InferenceError: Raised if the types cannot be unified.
"""
a = prune(t1)
b = prune(t2)
if isinstance(a, TypeVariable):
if a != b:
if occurs_in_type(a, b):
raise InferenceError("recursive unification")
a.instance = b
elif isinstance(b, TypeVariable):
unify(b, a)
elif isinstance(a, TypeOperator) and a.name == 'any':
return
elif isinstance(b, TypeOperator) and b.name == 'any':
return
elif isinstance(a, TypeOperator) and isinstance(b, TypeOperator):
if len(a.types) != len(b.types):
raise InferenceError("Type length differ")
else:
if a.name != b.name:
raise InferenceError("Type name differ")
try:
for p, q in zip(a.types, b.types):
unify(p, q)
except InferenceError:
raise
elif isinstance(a, MultiType) and isinstance(b, MultiType):
if len(a.types) != len(b.types):
raise InferenceError("Type lenght differ")
for p, q in zip(a.types, b.types):
unify(p, q)
elif isinstance(b, MultiType):
return unify(b, a)
elif isinstance(a, MultiType):
types = []
for t in a.types:
try:
t_clone = fresh(t, {})
b_clone = fresh(b, {})
unify(t_clone, b_clone)
types.append(t)
except InferenceError:
pass
if types:
if len(types) == 1:
unify(clone(types[0]), b)
else:
# too many overloads are found,
# so extract as many information as we can,
# and leave the remaining over-approximated
def try_unify(t, ts):
if isinstance(t, TypeVariable):
return
if any(isinstance(tp, TypeVariable) for tp in ts):
return
if any(len(tp.types) != len(t.types) for tp in ts):
return
for i, tt in enumerate(t.types):
its = [prune(tp.types[i]) for tp in ts]
if any(isinstance(it, TypeVariable) for it in its):
continue
it0 = its[0]
it0ntypes = len(it0.types)
if all(((it.name == it0.name) and
(len(it.types) == it0ntypes))
for it in its):
ntypes = [TypeVariable() for _ in range(it0ntypes)]
new_tt = TypeOperator(it0.name, ntypes)
new_tt.__class__ = it0.__class__
unify(tt, new_tt)
try_unify(prune(tt), [prune(it) for it in its])
try_unify(b, types)
else:
raise InferenceError("No overload")
else:
raise RuntimeError("Not unified {} and {}".format(type(a), type(b)))
def merge_unify(t1, t2):
p1 = prune(t1)
p2 = prune(t2)
if is_none(p1) and is_none(p2):
return p1
if is_none(p1):
if is_option_type(p2):
return p2
else:
return OptionType(p2)
if is_none(p2):
return merge_unify(p2, p1)
if is_option_type(p1) and is_option_type(p2):
unify(p1.types[0], p2.types[0])
return p1
if is_option_type(p1):
unify(p1.types[0], p2)
return p1
if is_option_type(p2):
return merge_unify(p2, p1)
unify(p1, p2)
return p1
def prune(t):
"""Returns the currently defining instance of t.
As a side effect, collapses the list of type instances. The function Prune
is used whenever a type expression has to be inspected: it will always
return a type expression which is either an uninstantiated type variable or
a type operator; i.e. it will skip instantiated variables, and will
actually prune them from expressions to remove long chains of instantiated
variables.
Args:
t: The type to be pruned
Returns:
An uninstantiated TypeVariable or a TypeOperator
"""
if isinstance(t, TypeVariable):
if t.instance is not None:
t.instance = prune(t.instance)
return t.instance
return t
def is_generic(v, non_generic):
"""Checks whether a given variable occurs in a list of non-generic variables
Note that a variables in such a list may be instantiated to a type term,
in which case the variables contained in the type term are considered
non-generic.
Note: Must be called with v pre-pruned
Args:
v: The TypeVariable to be tested for genericity
non_generic: A set of non-generic TypeVariables
Returns:
True if v is a generic variable, otherwise False
"""
return not occurs_in(v, non_generic)
def occurs_in_type(v, type2):
"""Checks whether a type variable occurs in a type expression.
Note: Must be called with v pre-pruned
Args:
v: The TypeVariable to be tested for
type2: The type in which to search
Returns:
True if v occurs in type2, otherwise False
"""
pruned_type2 = prune(type2)
if pruned_type2 == v:
return True
elif isinstance(pruned_type2, TypeOperator):
return occurs_in(v, pruned_type2.types)
return False
def occurs_in(t, types):
"""Checks whether a types variable occurs in any other types.
Args:
t: The TypeVariable to be tested for
types: The sequence of types in which to search
Returns:
True if t occurs in any of types, otherwise False
"""
return any(occurs_in_type(t, t2) for t2 in types)
def typecheck(node):
types = analyse(node, {'builtins': MODULES['builtins']})
return types
| {
"repo_name": "serge-sans-paille/pythran",
"path": "pythran/types/tog.py",
"copies": "1",
"size": "48907",
"license": "bsd-3-clause",
"hash": 4419343780957692400,
"line_mean": 33.3448033708,
"line_max": 80,
"alpha_frac": 0.5302103993,
"autogenerated": false,
"ratio": 4.117444014143795,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5147654413443795,
"avg_score": null,
"num_lines": null
} |
# Adapted from
# https://matplotlib.org/examples/user_interfaces/embedding_in_tk.html
# This gives the standard matplotlib interface, which for many applications would be
# great. But I just want a static plot...
# - Could just not use the `toolbar`
import matplotlib
print("Default backend appears to be:", matplotlib.get_backend())
matplotlib.use('TkAgg')
from numpy import arange, sin, pi
import numpy as np
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
# implement the default mpl key bindings
#from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
import sys
if sys.version_info[0] < 3:
import Tkinter as Tk
else:
import tkinter as Tk
root = Tk.Tk()
root.wm_title("Embedding in TK")
root.columnconfigure(0, weight=1)
root.rowconfigure(0, weight=1)
f = Figure(figsize=(5, 4), dpi=100)
a = f.add_subplot(111)
t = arange(0.0, 3.0, 0.01)
s = sin(2*pi*t)
a.plot(t, s)
# Loads of points...
x = np.random.random(size=100000)
y = np.random.random(size=100000)
a.scatter(x, y, marker="+", alpha=0.1, color="black")
# a tk.DrawingArea
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
f.set_size_inches(10, 10)
f.savefig("test.png", dpi=100,bbox_inches="tight")
#canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.get_tk_widget().grid(sticky=Tk.NSEW)
#toolbar = NavigationToolbar2TkAgg(canvas, root)
#toolbar.update()
#canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
#def on_key_event(event):
# print('you pressed %s' % event.key)
# key_press_handler(event, canvas, toolbar)
#canvas.mpl_connect('key_press_event', on_key_event)
def _quit():
root.quit() # stops mainloop
#root.destroy() # this is necessary on Windows to prevent
# Fatal Python Error: PyEval_RestoreThread: NULL tstate
button = Tk.Button(master=root, text='Quit', command=_quit)
#button.pack(side=Tk.BOTTOM)
button.grid()
Tk.mainloop()
# If you put root.destroy() here, it will cause an error if
# the window is closed with the window manager. | {
"repo_name": "QuantCrimAtLeeds/PredictCode",
"path": "open_cp/snippets/matplotlib_in_tk.py",
"copies": "1",
"size": "2090",
"license": "artistic-2.0",
"hash": 5935038219444695000,
"line_mean": 27.2567567568,
"line_max": 88,
"alpha_frac": 0.719138756,
"autogenerated": false,
"ratio": 3.028985507246377,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42481242632463767,
"avg_score": null,
"num_lines": null
} |
# Adapted from
# http://stackoverflow.com/questions/12301071/multidimensional-confidence-intervals
from matplotlib.patches import Ellipse
from matplotlib.pylab import *
import numpy as np
def plot_cov_ellipse(cov, pos, nstd=2, ax=None, **kwargs):
"""
Plots an `nstd` sigma error ellipse based on the specified covariance
matrix (`cov`). Additional keyword arguments are passed on to the
ellipse patch artist.
Parameters
----------
cov : The 2x2 covariance matrix to base the ellipse on
pos : The location of the center of the ellipse. Expects a 2-element
sequence of [x0, y0].
nstd : The radius of the ellipse in numbers of standard deviations.
Defaults to 2 standard deviations.
ax : The axis that the ellipse will be plotted on. Defaults to the
current axis.
Additional keyword arguments are pass on to the ellipse patch.
Returns
-------
A matplotlib ellipse artist
"""
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:,order]
if ax is None:
ax = plt.gca()
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
# Width and height are "full" widths, not radius
width, height = 2 * nstd * np.sqrt(vals)
ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, **kwargs)
ax.add_artist(ellip)
return ellip
def plot_feature_covariances(features, **kwargs):
for f in features:
plot_cov_ellipse(f.cov, f.mean, **kwargs)
def plot_feature_means(features, **kwargs):
means = np.vstack(d.mean for d in features)
return plot(means[:, 0], means[:, 1], **kwargs)
# Convenience function to plot a value with variances. Shades the n sigma
# region.
def plot_vars(x, y, y_vars, n=3.0, **kwargs):
y_sigma = sqrt(y_vars)
fill_between(x, y - n*y_sigma, y + n*y_sigma, **kwargs)
| {
"repo_name": "rjw57/starman",
"path": "doc/plotutils.py",
"copies": "1",
"size": "1979",
"license": "mit",
"hash": -5045370043697836000,
"line_mean": 32.5423728814,
"line_max": 83,
"alpha_frac": 0.6452753916,
"autogenerated": false,
"ratio": 3.572202166064982,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4717477557664982,
"avg_score": null,
"num_lines": null
} |
"""Adapted from:
@longcw faster_rcnn_pytorch: https://github.com/longcw/faster_rcnn_pytorch
@rbgirshick py-faster-rcnn https://github.com/rbgirshick/py-faster-rcnn
Licensed under The MIT License [see LICENSE for details]
"""
from __future__ import print_function
from utils.pytorch_parameters import VOC_CLASSES as labelmap
from utils.boxes import create_prior_boxes
from utils.boxes import to_point_form
from utils.inference import detect
from utils.pytorch_datasets import VOCDetection
from utils.pytorch_datasets import AnnotationTransform
from utils.pytorch_datasets import BaseTransform
from models import SSD300
import sys
import os
import time
import argparse
import numpy as np
import pickle
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
voc_root = '../datasets/VOCdevkit'
prior_boxes = to_point_form(create_prior_boxes())
parser = argparse.ArgumentParser(description='Single Shot MultiBox Detection')
parser.add_argument('--trained_model',
default='weights/ssd300_mAP_77.43_v2.pth',
type=str, help='Trained state_dict file path to open')
parser.add_argument('--save_folder', default='eval/', type=str,
help='File path to save results')
parser.add_argument('--confidence_threshold', default=0.01, type=float,
help='Detection confidence threshold')
parser.add_argument('--top_k', default=5, type=int,
help='Further restrict the number of predictions to parse')
parser.add_argument('--cuda', default=True, type=str2bool,
help='Use cuda to train model')
parser.add_argument('--voc_root', default=voc_root,
help='Location of VOC root directory')
args = parser.parse_args()
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
devkit_path = '../datasets/VOCdevkit/VOC2007/'
annopath = os.path.join(args.voc_root, 'VOC2007', 'Annotations', '%s.xml')
imgpath = os.path.join(args.voc_root, 'VOC2007', 'JPEGImages', '%s.jpg')
imgsetpath = os.path.join(args.voc_root, 'VOC2007',
'ImageSets', 'Main', '{:s}.txt')
YEAR = '2007'
dataset_mean = (104, 117, 123)
set_type = 'test'
class Timer(object):
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if average:
return self.average_time
else:
return self.diff
def parse_rec(filename):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text) - 1,
int(bbox.find('ymin').text) - 1,
int(bbox.find('xmax').text) - 1,
int(bbox.find('ymax').text) - 1]
objects.append(obj_struct)
return objects
def get_output_dir(name, phase):
"""Return the directory where experimental artifacts are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
filedir = os.path.join(name, phase)
if not os.path.exists(filedir):
os.makedirs(filedir)
return filedir
def get_voc_results_file_template(image_set, cls):
# VOCdevkit/VOC2007/results/det_test_aeroplane.txt
filename = 'det_' + image_set + '_%s.txt' % (cls)
filedir = os.path.join(devkit_path, 'results')
if not os.path.exists(filedir):
os.makedirs(filedir)
path = os.path.join(filedir, filename)
return path
def write_voc_results_file(all_boxes, dataset):
for cls_ind, cls in enumerate(labelmap):
print('Writing {:s} VOC results file'.format(cls))
filename = get_voc_results_file_template(set_type, cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(dataset.ids):
dets = all_boxes[cls_ind+1][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index[1], dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
def do_python_eval(output_dir='output', use_07=True):
cachedir = os.path.join(devkit_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = use_07
print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(labelmap):
filename = get_voc_results_file_template(set_type, cls)
rec, prec, ap = voc_eval(
filename, annopath, imgsetpath.format(set_type), cls, cachedir,
ovthresh=0.5, use_07_metric=use_07_metric)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
print('Mean AP = {:.4f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print('{:.3f}'.format(ap))
print('{:.3f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('--------------------------------------------------------------')
def voc_ap(rec, prec, use_07_metric=True):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
cachedir,
ovthresh=0.5,
use_07_metric=True):
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
cachefile = os.path.join(cachedir, 'annots.pkl')
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
if not os.path.isfile(cachefile):
# load annots
recs = {}
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(annopath % (imagename))
if i % 100 == 0:
print('Reading annotation for {:d}/{:d}'.format(
i + 1, len(imagenames)))
# save
print('Saving cached annotations to {:s}'.format(cachefile))
with open(cachefile, 'wb') as f:
pickle.dump(recs, f)
else:
# load
with open(cachefile, 'rb') as f:
recs = pickle.load(f)
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read dets
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
if any(lines) == 1:
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_ind = np.argsort(-confidence)
# sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin, 0.)
ih = np.maximum(iymax - iymin, 0.)
inters = iw * ih
uni = ((bb[2] - bb[0]) * (bb[3] - bb[1]) +
(BBGT[:, 2] - BBGT[:, 0]) *
(BBGT[:, 3] - BBGT[:, 1]) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
else:
rec = -1.
prec = -1.
ap = -1.
return rec, prec, ap
def test_net(save_folder, net, cuda, dataset, transform, top_k,
im_size=300, thresh=0.05):
"""Test a Fast R-CNN network on an image database."""
num_images = len(dataset)
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in range(num_images)]
for _ in range(len(labelmap)+1)]
# timers
_t = {'im_detect': Timer(), 'misc': Timer()}
output_dir = get_output_dir('ssd300_120000', set_type)
det_file = os.path.join(output_dir, 'detections.pkl')
# detect = Detect(21, 0, 200, 0.01, .45)
for i in range(num_images):
im, gt, h, w = dataset.pull_item(i)
keras_image = np.squeeze(im)
# keras_image = substract_mean(x)
keras_image_input = np.expand_dims(keras_image, axis=0)
keras_output = net.predict(keras_image_input)
detections = detect(keras_output, prior_boxes)
# detections = detect.forward(keras_output, prior_boxes)
_t['im_detect'].tic()
# detections = net(x).data
detect_time = _t['im_detect'].toc(average=False)
# skip j = 0, because it's the background class
detection_size = 21
# for j in range(1, detections.size(1)):
for j in range(1, detection_size):
dets = detections[0, j, :]
mask = np.squeeze(dets[:, 0] > 0.01)
dets = dets[mask]
if len(dets) == 0:
continue
boxes = dets[:, 1:]
boxes[:, 0] *= w
boxes[:, 2] *= w
boxes[:, 1] *= h
boxes[:, 3] *= h
# scores = dets[:, 0].cpu().numpy()
scores = dets[:, 0]
cls_dets = np.hstack((boxes, scores[:, np.newaxis])).astype(
np.float32, copy=False)
all_boxes[j][i] = cls_dets
print('im_detect: {:d}/{:d} {:.3f}s'.format(i + 1,
num_images, detect_time))
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
print('Evaluating detections')
evaluate_detections(all_boxes, output_dir, dataset)
def evaluate_detections(box_list, output_dir, dataset):
write_voc_results_file(box_list, dataset)
do_python_eval(output_dir)
if __name__ == '__main__':
weights_path = '../trained_models/SSD300_weights_new.hdf5'
# weights_path = '../trained_models/weights.07-3.59.hdf5'
# weights_path = '../trained_models/weights.03-3.37.hdf5'
# weights_path = '../trained_models/weights.150-3.57.hdf5'
# weights_path = '../trained_models/weights.02-3.29.hdf5'
# weights_path = '../trained_models/weights.12-4.20.hdf5'
# weights_path = '../trained_models/weights.26-5.21.hdf5'
# weights_path = '../trained_models/weights.02-3.44.hdf5'
# weights_path = '../trained_models/weights.22-5.01.hdf5'
# weights_path = '../trained_models/weights.79-6.66.hdf5'
# weights_path = '../trained_models/weights.02-1.63.hdf5'
# weights_path = '../trained_models/weights.22-3.85.hdf5' # 69
# weights_path = '../trained_models/weights.50-3.92.hdf5' # 69 ?
# weights_path = '../trained_models/weights.97-3.98.hdf5' # 70
# weights_path = '../trained_models/weights.52-7.71.hdf5'
# weights_path = '../trained_models/weights.04-3.79.hdf5' # 71
# weights_path = '../trained_models/weights.17-4.40.hdf5' # 71
# weights_path = '../trained_models/weights.64-5.21.hdf5'
# weights_path = '../trained_models/weights.116-5.49.hdf5'
# weights_path = '../trained_models/weights.28-3.42.hdf5'
# weights_path = '../trained_models/weights.19-3.37.hdf5' # 74
net = SSD300(weights_path=weights_path)
print('Finished loading model!')
R_MEAN = 123
G_MEAN = 117
B_MEAN = 104
dataset = VOCDetection(args.voc_root, [('2007', set_type)],
BaseTransform(300, (R_MEAN, G_MEAN, B_MEAN)),
AnnotationTransform())
test_net(args.save_folder, net, args.cuda, dataset,
BaseTransform(300, dataset_mean), args.top_k, 300,
thresh=args.confidence_threshold)
| {
"repo_name": "oarriaga/single_shot_multibox_detector",
"path": "src/evaluate.py",
"copies": "1",
"size": "15854",
"license": "mit",
"hash": -3218134673464111000,
"line_mean": 35.6143187067,
"line_max": 79,
"alpha_frac": 0.5490727892,
"autogenerated": false,
"ratio": 3.3789428815004263,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44280156707004265,
"avg_score": null,
"num_lines": null
} |
import bcrypt
from website import settings
def generate_password_hash(password, rounds=None):
"""Generates a password hash using `bcrypt`. Specifying `log_rounds` sets
the log_rounds parameter of `bcrypt.gensalt()` which determines the
complexity of the salt. 12 is the default value.
Returns the hashed password.
"""
if rounds is None:
rounds = settings.BCRYPT_LOG_ROUNDS
if not password:
raise ValueError('Password must be non-empty.')
pw_hash = bcrypt.hashpw(
password.encode(),
bcrypt.gensalt(rounds)
)
return pw_hash
def constant_time_compare(val1, val2):
"""Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
"""
if len(val1) != len(val2):
return False
result = 0
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0
def check_password_hash(pw_hash, password):
"""Checks a hashed password against a password.
Returns `True` if the password matched, `False` otherwise.
"""
return constant_time_compare(
bcrypt.hashpw(
password.encode(),
pw_hash.encode()
),
pw_hash
)
| {
"repo_name": "adlius/osf.io",
"path": "framework/bcrypt/__init__.py",
"copies": "6",
"size": "1516",
"license": "apache-2.0",
"hash": -2756777924942150700,
"line_mean": 23.0634920635,
"line_max": 77,
"alpha_frac": 0.6431398417,
"autogenerated": false,
"ratio": 3.7339901477832513,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7377129989483251,
"avg_score": null,
"num_lines": null
} |
import bcrypt
from website import settings
def generate_password_hash(password, rounds=None):
'''Generates a password hash using `bcrypt`. Specifying `log_rounds` sets
the log_rounds parameter of `bcrypt.gensalt()` which determines the
complexity of the salt. 12 is the default value.
Returns the hashed password.
'''
if rounds is None:
rounds = settings.BCRYPT_LOG_ROUNDS
if not password:
raise ValueError('Password must be non-empty.')
pw_hash = bcrypt.hashpw(
unicode(password).encode('utf-8'),
bcrypt.gensalt(rounds)
)
return pw_hash
def constant_time_compare(val1, val2):
'''Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
'''
if len(val1) != len(val2):
return False
result = 0
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0
def check_password_hash(pw_hash, password):
'''Checks a hashed password against a password.
Returns `True` if the password matched, `False` otherwise.
'''
return constant_time_compare(
bcrypt.hashpw(
unicode(password).encode('utf-8'),
unicode(pw_hash).encode('utf-8')
),
pw_hash
)
| {
"repo_name": "doublebits/osf.io",
"path": "framework/bcrypt/__init__.py",
"copies": "62",
"size": "1564",
"license": "apache-2.0",
"hash": -8834487231165424000,
"line_mean": 23.8253968254,
"line_max": 77,
"alpha_frac": 0.6445012788,
"autogenerated": false,
"ratio": 3.688679245283019,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
import bcrypt
from website import settings
def generate_password_hash(password, rounds=None):
"""Generates a password hash using `bcrypt`. Specifying `log_rounds` sets
the log_rounds parameter of `bcrypt.gensalt()` which determines the
complexity of the salt. 12 is the default value.
Returns the hashed password.
"""
if rounds is None:
rounds = settings.BCRYPT_LOG_ROUNDS
if not password:
raise ValueError('Password must be non-empty.')
pw_hash = bcrypt.hashpw(
unicode(password).encode('utf-8'),
bcrypt.gensalt(rounds)
)
return pw_hash
def constant_time_compare(val1, val2):
"""Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
"""
if len(val1) != len(val2):
return False
result = 0
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0
def check_password_hash(pw_hash, password):
"""Checks a hashed password against a password.
Returns `True` if the password matched, `False` otherwise.
"""
return constant_time_compare(
bcrypt.hashpw(
unicode(password).encode('utf-8'),
unicode(pw_hash).encode('utf-8')
),
pw_hash
)
| {
"repo_name": "HalcyonChimera/osf.io",
"path": "framework/bcrypt/__init__.py",
"copies": "8",
"size": "1564",
"license": "apache-2.0",
"hash": -1382400554379888400,
"line_mean": 23.8253968254,
"line_max": 77,
"alpha_frac": 0.6445012788,
"autogenerated": false,
"ratio": 3.688679245283019,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003112356053532524,
"num_lines": 63
} |
import bcrypt
_log_rounds = [12]
def generate_password_hash(password, rounds=None):
'''Generates a password hash using `bcrypt`. Specifying `log_rounds` sets
the log_rounds parameter of `bcrypt.gensalt()` which determines the
complexity of the salt. 12 is the default value.
Returns the hashed password.
'''
if rounds is None:
rounds = _log_rounds[0]
if not password:
raise ValueError('Password must be non-empty.')
pw_hash = bcrypt.hashpw(
unicode(password).encode('utf-8'),
bcrypt.gensalt(rounds)
)
return pw_hash
def constant_time_compare(val1, val2):
'''Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
'''
if len(val1) != len(val2):
return False
result = 0
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0
def check_password_hash(pw_hash, password):
'''Checks a hashed password against a password.
Returns `True` if the password matched, `False` otherwise.
'''
return constant_time_compare(
bcrypt.hashpw(
unicode(password).encode('utf-8'),
unicode(pw_hash).encode('utf-8')
),
pw_hash
)
| {
"repo_name": "GaryKriebel/osf.io",
"path": "framework/bcrypt/__init__.py",
"copies": "11",
"size": "1544",
"license": "apache-2.0",
"hash": -8977148429624079000,
"line_mean": 22.7538461538,
"line_max": 77,
"alpha_frac": 0.6353626943,
"autogenerated": false,
"ratio": 3.624413145539906,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 65
} |
# --------------------------------------------------------
# Fully Convolutional Instance-aware Semantic Segmentation
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Written by Haochen Zhang
# --------------------------------------------------------
import numpy as np
import utils.image as image
import matplotlib.pyplot as plt
import random
import cv2
def show_masks(im, detections, masks, class_names, binary_thresh=0.4, scale=1.0, show = True):
"""
visualize all detections in one image
:param im_array: [b=1 c h w] in rgb
:param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
:param class_names: list of names in imdb
:param scale: visualize the scaled image
:return:
"""
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) # for matplotlib
plt.cla()
plt.axis("off")
plt.imshow(im)
for ix, name in enumerate([c for c in class_names if c.lower() != "__background__"]): # ignore bg class
# if name == '__background__':
# continue
dets = detections[ix]
msks = masks[ix]
for det, msk in zip(dets, msks):
color = (random.random(), random.random(), random.random()) # generate a random color
bbox = det[:4] * scale
cod = bbox.astype(int)
if im[cod[1]:cod[3], cod[0]:cod[2], 0].size > 0:
msk = cv2.resize(msk, im[cod[1]:cod[3]+1, cod[0]:cod[2]+1, 0].T.shape)
bimsk = msk >= binary_thresh
bimsk = bimsk.astype(int)
bimsk = np.repeat(bimsk[:, :, np.newaxis], 3, axis=2)
mskd = im[cod[1]:cod[3]+1, cod[0]:cod[2]+1, :] * bimsk
clmsk = np.ones(bimsk.shape) * bimsk
clmsk[:, :, 0] = clmsk[:, :, 0] * color[0] * 256
clmsk[:, :, 1] = clmsk[:, :, 1] * color[1] * 256
clmsk[:, :, 2] = clmsk[:, :, 2] * color[2] * 256
im[cod[1]:cod[3]+1, cod[0]:cod[2]+1, :] = im[cod[1]:cod[3]+1, cod[0]:cod[2]+1, :] + 0.8 * clmsk - 0.8 * mskd
score = det[-1]
plt.gca().text((bbox[2]+bbox[0])/2, bbox[1],
'{:s} {:.3f}'.format(name, score),
bbox=dict(facecolor=color, alpha=0.9), fontsize=8, color='white')
plt.imshow(im)
if show:
plt.show()
im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
return im
| {
"repo_name": "vincentlooi/FCIS",
"path": "lib/utils/show_masks.py",
"copies": "1",
"size": "2454",
"license": "apache-2.0",
"hash": 3907408163121617000,
"line_mean": 40.593220339,
"line_max": 124,
"alpha_frac": 0.5150774246,
"autogenerated": false,
"ratio": 3.1181702668360862,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4133247691436086,
"avg_score": null,
"num_lines": null
} |
# Adapted from
###########################################################################
# pbkdf2 - PKCS#5 v2.0 Password-Based Key Derivation #
# #
# Copyright (C) 2007-2011 Dwayne C. Litzenberger <dlitz@dlitz.net> #
# #
# Permission is hereby granted, free of charge, to any person obtaining #
# a copy of this software and associated documentation files (the #
# "Software"), to deal in the Software without restriction, including #
# without limitation the rights to use, copy, modify, merge, publish, #
# distribute, sublicense, and/or sell copies of the Software, and to #
# permit persons to whom the Software is furnished to do so, subject to #
# the following conditions: #
# #
# The above copyright notice and this permission notice shall be #
# included in all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, #
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF #
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND #
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE #
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION #
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION #
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #
# #
# Country of origin: Canada #
# #
###########################################################################
import hmac
from struct import pack
from random import randint
from hashlib import sha1
from hashlib import sha256
from hashlib import sha512
from base64 import b64encode as _b64encode
from binascii import b2a_hex as _b2a_hex
__all__ = ['PBKDF2', 'crypt', 'encrypt', 'verify']
_0xffffffffL = 0xffffffff
# A dict of supported hash functions, to get from a string
# (as stored as part of `crypt`'s output) to a digestmodule
algorithms = {
'sha1': sha1,
'sha256': sha256,
'sha512': sha512,
}
def isunicode(s):
return isinstance(s, str)
def isbytes(s):
return isinstance(s, bytes)
def isinteger(n):
return isinstance(n, int)
def callable(obj):
return hasattr(obj, '__call__')
def binxor(a, b):
return bytes([x ^ y for (x, y) in zip(a, b)])
def b64encode(data, chars="+/"):
if isunicode(chars):
return _b64encode(data, chars.encode('utf-8')).decode('utf-8')
else:
return _b64encode(data, chars) # pragma nocover
def b2a_hex(s):
return _b2a_hex(s).decode('us-ascii')
class PBKDF2:
"""PBKDF2.py : PKCS#5 v2.0 Password-Based Key Derivation
This implementation takes a passphrase and a salt (and optionally an
iteration count, a digest module, and a MAC module and a secret_kay
for additional salting) and provides a file-like object from which
an arbitrarily-sized key can be read.
If the passphrase and/or salt are unicode objects, they are encoded as
UTF-8 before they are processed.
The idea behind PBKDF2 is to derive a cryptographic key from a
passphrase and a salt.
PBKDF2 may also be used as a strong salted password hash. The
'crypt' function is provided for that purpose.
Remember: Keys generated using PBKDF2 are only as strong as the
passphrases they are derived from.
"""
def __init__(self, passphrase, salt, iterations=24000,
digestmodule=sha256, macmodule=hmac, secret_key=None):
self.__macmodule = macmodule
self.__digestmodule = digestmodule
if isinstance(secret_key, str):
secret_key = secret_key.encode('latin-1')
self.__secret_key = secret_key
self._setup(passphrase, salt, iterations, self._pseudorandom)
def _pseudorandom(self, key, msg):
"""Pseudorandom function. e.g. HMAC-SHA256"""
# We need to generate a derived key from our base key. We can do this
# by passing the secret key and our base key through a pseudo-random
# function and SHA1 works nicely.
if self.__secret_key:
key = sha1(self.__secret_key + key).digest()
return self.__macmodule.new(key=key, msg=msg,
digestmod=self.__digestmodule).digest()
def read(self, bytes):
"""Read the specified number of key bytes."""
if self.closed:
raise ValueError("file-like object is closed")
size = len(self.__buf)
blocks = [self.__buf]
i = self.__blockNum
while size < bytes:
i += 1
if i > _0xffffffffL or i < 1:
# We could return "" here, but
raise OverflowError("derived key too long") # pragma nocover
block = self.__f(i)
blocks.append(block)
size += len(block)
buf = b"".join(blocks)
retval = buf[:bytes]
self.__buf = buf[bytes:]
self.__blockNum = i
return retval
def __f(self, i):
# i must fit within 32 bits
assert 1 <= i <= _0xffffffffL
U = self.__prf(self.__passphrase, self.__salt + pack("!L", i))
result = U
for j in range(2, 1+self.__iterations):
U = self.__prf(self.__passphrase, U)
result = binxor(result, U)
return result
def hexread(self, octets):
"""Read the specified number of octets. Return them as hexadecimal.
Note that len(obj.hexread(n)) == 2*n.
"""
return b2a_hex(self.read(octets))
def _setup(self, passphrase, salt, iterations, prf):
# Sanity checks:
# passphrase and salt must be str or unicode (in the latter
# case, we convert to UTF-8)
if isunicode(passphrase):
passphrase = passphrase.encode("UTF-8")
elif not isbytes(passphrase):
raise TypeError("passphrase must be str or unicode")
if isunicode(salt):
salt = salt.encode("UTF-8")
elif not isbytes(salt):
raise TypeError("salt must be str or unicode")
# iterations must be an integer >= 1
if not isinteger(iterations):
raise TypeError("iterations must be an integer")
if iterations < 1:
raise ValueError("iterations must be at least 1")
# prf must be callable
if not callable(prf):
raise TypeError("prf must be callable")
self.__passphrase = passphrase
self.__salt = salt
self.__iterations = iterations
self.__prf = prf
self.__blockNum = 0
self.__buf = b""
self.closed = False
def close(self):
"""Close the stream."""
if not self.closed:
del self.__passphrase
del self.__salt
del self.__iterations
del self.__prf
del self.__blockNum
del self.__buf
self.closed = True
def crypt(word, salt=None, iterations=24000, digestmodule=sha256,
secret_key=None):
"""PBKDF2-based unix crypt(3) replacement.
The number of iterations specified in the salt overrides the 'iterations'
parameter.
The effective hash length is dependant on the used `digestmodule`.
"""
# Generate a (pseudo-)random salt if the user hasn't provided one.
if salt is None:
salt = _makesalt()
# salt must be a string or the us-ascii subset of unicode
if isunicode(salt):
salt = salt.encode('us-ascii').decode('us-ascii')
elif isbytes(salt):
salt = salt.decode('us-ascii')
else:
raise TypeError("salt must be a string")
# word must be a string or unicode
# (in the latter case, we convert to UTF-8)
if isunicode(word):
word = word.encode("UTF-8")
elif not isbytes(word):
raise TypeError("word must be a string or unicode")
# Try to extract the real salt and iteration count from the salt
if salt.startswith("$p5k2$"):
(digest_name, iterations, salt) = salt.split("$")[2:5]
converted = int(iterations, 16)
if iterations != "%x" % converted: # lowercase hex, minimum digits
raise ValueError("Invalid salt") # pragma nocover
iterations = converted
if not (iterations >= 1):
raise ValueError("Invalid salt")
if digest_name in algorithms:
digestmodule = algorithms[digest_name]
else:
raise ValueError("Digest algorithm=%s not supported!"
% digest_name)
# Instantiate a `digestmodule`, so we can inspect
# it's `name` and `digest_size`
digest = digestmodule()
# Make sure the salt matches the allowed character set
allow = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789./"
for ch in salt:
if ch not in allow:
raise ValueError("Illegal character %r in salt" % (ch,))
salt = "$p5k2$%s$%x$%s" % (digest.name.lower(), iterations, salt)
rawhash = PBKDF2(word, salt, iterations, digestmodule,
secret_key=secret_key).read(digest.digest_size)
return salt + "$" + b64encode(rawhash, "./")
# Add crypt as a static method of the PBKDF2 class
# This makes it easier to do "from PBKDF2 import PBKDF2" and still use
# crypt.
PBKDF2.crypt = staticmethod(crypt)
def _makesalt(altchars="./"):
"""Return a 64-bit pseudorandom salt for crypt().
This function is not suitable for generating cryptographic secrets.
"""
binarysalt = b"".join([pack("@H", randint(0, 0xffff)) for i in range(3)])
return b64encode(binarysalt, altchars)
def encrypt(word, secret_key=None, salt_length=None, *args, **kwargs):
"""This function exist for compatibility with lux auth mechanism.
Use `crypt` directly for PBKDF2 cryptic algorithm.
"""
return crypt(word, secret_key=secret_key, *args, **kwargs).encode('utf-8')
def verify(hashpass, raw, key, salt_length=None, *args, **kwargs):
"""Check if provided password match hash stored in db.
"""
if isinstance(hashpass, bytes):
hashpass = hashpass.decode('utf-8')
if isinstance(raw, bytes):
raw = raw.decode('utf-8')
return hashpass == crypt(raw, hashpass, secret_key=key)
| {
"repo_name": "quantmind/lux",
"path": "lux/utils/crypt/pbkdf2.py",
"copies": "1",
"size": "10785",
"license": "bsd-3-clause",
"hash": 6673127586773677000,
"line_mean": 34.8305647841,
"line_max": 78,
"alpha_frac": 0.5844228095,
"autogenerated": false,
"ratio": 4.1624855268236205,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.524690833632362,
"avg_score": null,
"num_lines": null
} |
"""Adapted from
sphinx.transforms.post_transforms.ReferencesResolver.resolve_anyref
If 'py' is one of the domains and `py:class` is defined,
the Python domain will be processed before the 'std' domain.
License for Sphinx
==================
Copyright (c) 2007-2019 by the Sphinx team (see AUTHORS file).
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from contextlib import suppress
from docutils import nodes
from sphinx.transforms.post_transforms import ReferencesResolver
class CustomReferencesResolver(ReferencesResolver):
def resolve_anyref(self, refdoc, node, contnode):
"""Resolve reference generated by the "any" role."""
stddomain = self.env.get_domain('std')
target = node['reftarget']
# process 'py' domain first for python classes
if "py:class" in node:
with suppress(KeyError):
py_domain = self.env.domains['py']
py_ref = py_domain.resolve_any_xref(
self.env, refdoc, self.app.builder, target, node, contnode)
if py_ref:
return self.create_node(py_ref[0])
# resolve :term:
term_ref = stddomain.resolve_xref(self.env, refdoc, self.app.builder,
'term', target, node, contnode)
if term_ref:
# replace literal nodes with inline nodes
if not isinstance(term_ref[0], nodes.inline):
inline_node = nodes.inline(rawsource=term_ref[0].rawsource,
classes=term_ref[0].get('classes'))
if term_ref[0]:
inline_node.append(term_ref[0][0])
term_ref[0] = inline_node
return self.create_node(("std:term", term_ref))
# next, do the standard domain
std_ref = stddomain.resolve_any_xref(
self.env, refdoc, self.app.builder, target, node, contnode)
if std_ref:
return self.create_node(std_ref[0])
for domain in self.env.domains.values():
try:
ref = domain.resolve_any_xref(
self.env, refdoc, self.app.builder, target, node, contnode)
if ref:
return self.create_node(ref[0])
except NotImplementedError:
# the domain doesn't yet support the new interface
# we have to manually collect possible references (SLOW)
for role in domain.roles:
res = domain.resolve_xref(self.env, refdoc,
self.app.builder, role, target,
node, contnode)
if res and isinstance(res[0], nodes.Element):
result = ('%s:%s' % (domain.name, role), res)
return self.create_node(result)
# no results considered to be <code>
contnode['classes'] = []
return contnode
def create_node(self, result):
res_role, newnode = result
# Override "any" class with the actual role type to get the styling
# approximately correct.
res_domain = res_role.split(':')[0]
if (len(newnode) > 0 and isinstance(newnode[0], nodes.Element)
and newnode[0].get('classes')):
newnode[0]['classes'].append(res_domain)
newnode[0]['classes'].append(res_role.replace(':', '-'))
return newnode
def setup(app):
if (hasattr(app.registry, "get_post_transforms")
and callable(app.registry.get_post_transforms)):
post_transforms = app.registry.get_post_transforms()
else:
# Support sphinx 1.6.*
post_transforms = app.post_transforms
for i, transform_class in enumerate(post_transforms):
if transform_class == ReferencesResolver:
post_transforms[i] = CustomReferencesResolver
break
else:
raise RuntimeError("ReferencesResolver not found")
| {
"repo_name": "mbeyeler/pulse2percept",
"path": "doc/_ext/custom_references_resolver.py",
"copies": "2",
"size": "5233",
"license": "bsd-3-clause",
"hash": 2802063057664619000,
"line_mean": 41.5447154472,
"line_max": 79,
"alpha_frac": 0.6273648003,
"autogenerated": false,
"ratio": 4.4460492778249785,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6073414078124978,
"avg_score": null,
"num_lines": null
} |
# Adapted from
# Transifex, https://github.com/transifex/transifex/blob/master/transifex/resources/formats/strings.py
# localizable https://github.com/chrisballinger/python-localizable/blob/master/localizable.py
# -*- coding: utf-8 -*-
# GPLv2
"""
Apple strings file handler/compiler
"""
from __future__ import print_function
from __future__ import absolute_import
import codecs, re, chardet
import plistlib, re
"""
Handler for Apple STRINGS translation files.
Apple strings files *must* be encoded in cls.ENCODING encoding.
"""
format_encoding = 'UTF-16'
def __unescape_key(s):
return s.replace('\\\n', '')
def __unescape(s):
s = s.replace('\\\n', '')
return s.replace('\\"', '"').replace(r'\n', '\n').replace(r'\r', '\r')
def __get_content(filename=None, content=None):
if content is not None:
if chardet.detect(content)['encoding'].startswith(format_encoding):
encoding = format_encoding
else:
encoding = 'UTF-8'
if isinstance(content, str):
content.decode(encoding)
else:
return content
if filename is None:
return None
return __get_content_from_file(filename, format_encoding)
def __get_content_from_file(filename, encoding):
f = open(filename, 'r')
try:
content = f.read()
if chardet.detect(content)['encoding'].startswith(format_encoding):
# f = f.decode(format_encoding)
encoding = format_encoding
else:
# f = f.decode(default_encoding)
encoding = 'utf-8'
f.close()
f = codecs.open(filename, 'r', encoding=encoding)
return f.read()
except IOError as e:
print("Error opening file %s with encoding %s: %s" % \
(filename, format_encoding, e.message))
except Exception as e:
print("Unhandled exception: %s" % e.message)
finally:
f.close()
def parse_strings(content="", filename=None):
"""Parse an apple .strings file and create a stringset with
all entries in the file.
See
http://developer.apple.com/library/mac/#documentation/MacOSX/Conceptual/BPInternational/Articles/StringsFiles.html
for details.
"""
if filename is not None:
content = __get_content(filename=filename)
if not content:
return None
stringset = []
f = content
if f.startswith(u'\ufeff'):
f = f.lstrip(u'\ufeff')
# regex for finding all comments in a file
cp = r'(?:/\*(?P<comment>(?:[^*]|(?:\*+[^*/]))*\**)\*/)'
p = re.compile(
r"(?:%s[ \t]*[\n]|[\r\n]|[\r]){0,1}(?P<line>((\"(?P<key>[^\"\\]*(?:\\.[^\"\\]*)*)\")|(?P<property>\w+))\s*=\s*\"(?P<value>[^\"\\]*(?:\\.[^\"\\]*)*)\"\s*;)" % cp,
re.DOTALL | re.U)
c = re.compile(r'//[^\n]*\n|/\*(?:.|[\r\n])*?\*/', re.U)
ws = re.compile(r'\s+', re.U)
end = 0
start = 0
for i in p.finditer(f):
start = i.start('line')
end_ = i.end()
key = i.group('key')
comment = i.group('comment') or None
if not key:
key = i.group('property')
value = i.group('value')
error_line = None
while end < start:
m = c.match(f, end, start) or ws.match(f, end, start)
if not m or m.start() != end:
error_line = f[end:start]
print("Invalid syntax. Please confirm.: %s" % f[end:start])
stringset.append({'key': None, 'value': None, 'comment': None, 'error': error_line})
break
end = m.end()
end = end_
key = __unescape_key(key)
''' _unescape(value) // don't needed this. becase \n is just \n in .strings '''
stringset.append({'key': key, 'value': value, 'comment': comment, 'error': None})
return stringset
| {
"repo_name": "metasmile/transync",
"path": "strsync/strparser.py",
"copies": "2",
"size": "3809",
"license": "mit",
"hash": -2948284440874760700,
"line_mean": 31.2796610169,
"line_max": 169,
"alpha_frac": 0.5607771069,
"autogenerated": false,
"ratio": 3.4595821980018164,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5020359304901817,
"avg_score": null,
"num_lines": null
} |
import collections
import contextlib
import logging
import os
import shutil
import subprocess
import sys
import tempfile
from PIL import Image
def _images_are_equal(filename1, filename2):
# We need to convert both images to the same format, as the resulting one
# may have lost the alpha channel (alpha=255) or may be now indexed
# (L or P mode).
# We also need to check whether the alpha value is '\x00' in which case the
# RGB value is not important.
img1 = Image.open(filename1).convert('RGBA')
img2 = Image.open(filename2).convert('RGBA')
img1_bytes = img1.tobytes()
img2_bytes = img2.tobytes()
if len(img1_bytes) != len(img2_bytes):
return False
# HACK to support comparison in both Python 2 and 3. Subscripting a
# bytes (string) in Python 2 returns a string, whereas in Python 3 returns
# ints.
null_byte = b'\x00'[0]
for i in range(len(img1_bytes) // 4):
pos = 4 * i
if (img1_bytes[pos + 3] == null_byte and
img2_bytes[pos + 3] == null_byte):
continue
if img1_bytes[pos:pos + 4] != img2_bytes[pos:pos + 4]:
return False
return True
def _get_temporary_filename(prefix='tmp'):
temp_file = tempfile.NamedTemporaryFile(prefix=prefix)
temp_name = temp_file.name
temp_file.close()
return temp_name
@contextlib.contextmanager
def _temporary_filenames(total):
"""Context manager to create temporary files and remove them after use."""
temp_files = [_get_temporary_filename('optimage-') for i in range(total)]
yield temp_files
for temp_file in temp_files:
try:
os.remove(temp_file)
except OSError:
# Continue in case we could not remove the file. One reason is that
# the fail was never created.
pass
class InvalidExtension(Exception):
"""The file extension does not correspond to the file contents."""
if sys.version_info.major == 2:
FileNotFoundError = OSError
else:
FileNotFoundError = FileNotFoundError
class MissingBinary(FileNotFoundError):
"""The binary does not exist."""
def _call_binary(args):
try:
return subprocess.check_output(args, stderr=subprocess.STDOUT)
except FileNotFoundError as error:
raise MissingBinary(error.errno, 'binary not found', args[0])
def _pngcrush(input_filename, output_filename):
_call_binary(['pngcrush', '-rem', 'alla', '-reduce', '-brute', '-q',
input_filename, output_filename])
def _optipng(input_filename, output_filename):
_call_binary(['optipng', '-out', output_filename, '-o9', '-quiet',
input_filename])
def _jpegtran(input_filename, output_filename):
_call_binary(['jpegtran', '-copy', 'none', '-optimize', '-perfect',
'-outfile', output_filename, input_filename])
def _jpegoptim(input_filename, output_filename):
# jpegoptim replaces the input file with the compressed version, so we first
# need to copy the input file to the output file.
shutil.copy(input_filename, output_filename)
_call_binary(['jpegoptim', '--strip-all', '--quiet', output_filename])
_CompressorResult = collections.namedtuple('_CompressorResult',
['size', 'filename', 'compressor'])
def _process(compressor, input_filename, output_filename):
"""Helper function to compress an image.
Returns:
_CompressorResult named tuple, with the resulting size, the name of the
output file and the name of the compressor.
"""
compressor(input_filename, output_filename)
result_size = os.path.getsize(output_filename)
return _CompressorResult(result_size, output_filename, compressor.__name__)
def _compress_with(input_filename, output_filename, compressors):
"""Helper function to compress an image with several compressors.
In case the compressors do not improve the filesize or in case the resulting
image is not equivalent to the source, then the output will be a copy of the
input.
"""
with _temporary_filenames(len(compressors)) as temp_filenames:
results = []
for compressor, temp_filename in zip(compressors, temp_filenames):
results.append(_process(compressor, input_filename, temp_filename))
best_result = min(results)
os.rename(best_result.filename, output_filename)
best_compressor = best_result.compressor
if best_result.size >= os.path.getsize(input_filename):
best_compressor = None
if (best_compressor is not None and
not _images_are_equal(input_filename, output_filename)):
logging.info('Compressor "%s" generated an invalid image for "%s"',
best_compressor, input_filename)
best_compressor = None
if best_compressor is None:
shutil.copy(input_filename, output_filename)
logging.info('%s: best compressor for "%s"', best_compressor,
input_filename)
def jpeg_compressor(input_filename, output_filename):
"""Loslessly recompress a JPEG.
"""
_compress_with(input_filename, output_filename, [_jpegtran, _jpegoptim])
def png_compressor(input_filename, output_filename):
"""Loslessly recompress a JPEG.
"""
_compress_with(input_filename, output_filename, [_pngcrush, _optipng])
_EXTENSION_MAPPING = {
'.jpeg': jpeg_compressor,
'.jpg': jpeg_compressor,
'.png': png_compressor,
}
def optimage(filename):
_, extension = os.path.splitext(filename)
extension = extension.lower()
compressor = _EXTENSION_MAPPING.get(extension)
if compressor is None:
print(
'No lossless compressor defined for extension "{}"\n'.format(
extension))
return
with _temporary_filenames(1) as temp_filenames:
output_filename = temp_filenames[0]
try:
compressor(filename, output_filename)
except MissingBinary as error:
print(
'The executable "{}" was not found. '.format(error.filename) +
'Please install it and re-run this command.\n')
return
except subprocess.CalledProcessError as error:
print(
'Error when running the command:\n ' +
'{}\n'.format(' '.join(error.cmd)))
print('Status: {}\n'.format(error.returncode))
print('Output:\n')
print(error.output.decode('utf-8'))
return
original_size = os.path.getsize(filename)
new_size = os.path.getsize(output_filename)
reduction = original_size - new_size
reduction_percentage = reduction * 100 / original_size
savings = 'savings: {} bytes = {:.2f}%'.format(
reduction, reduction_percentage)
if new_size < original_size:
shutil.copy(output_filename, filename)
print('File was losslessly compressed to {} bytes ({})'.format(
new_size, savings))
else:
print('No further compression achieved')
| {
"repo_name": "jpscaletti/moar",
"path": "moar/optimage.py",
"copies": "2",
"size": "7743",
"license": "mit",
"hash": -832543496685370400,
"line_mean": 32.6652173913,
"line_max": 80,
"alpha_frac": 0.6471651815,
"autogenerated": false,
"ratio": 4.070977917981073,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5718143099481072,
"avg_score": null,
"num_lines": null
} |
# Adapted from Parag K. Mital, Jan 2016 convolutional_autoencoder.py
import tensorflow as tf
import numpy as np
import numpy.matlib as matlib
import math
from libs.activations import lrelu
from libs.utils import corrupt
DEFAULT_IMAGE_SIZE = 128;
def autoencoder(input_shape=[None, DEFAULT_IMAGE_SIZE*DEFAULT_IMAGE_SIZE*3], # [num_examples, num_bytes]
n_filters=[3, 10, 20, 40], # number of filters in each conv layer
filter_sizes=[3, 3, 3, 3]):
"""Build a deep autoencoder w/ tied weights.
Parameters
----------
input_shape : list, optional
Description
n_filters : list, optional
Description
filter_sizes : list, optional
Description
Returns
-------
x : Tensor
Input placeholder to the network
z : Tensor
Inner-most latent representation
y : Tensor
Output reconstruction of the input
cost : Tensor
Overall cost to use for training
Raises
------
ValueError
Description
"""
# input to the network
x = tf.placeholder(
tf.float32, input_shape, name='x')
# ensure 2-d is converted to square tensor.
if len(x.get_shape()) == 2: # assuming second dim of input_shape is num_bytes of an example
# convert 1D image into 3D and add fifth dimension for num_filters
x_dim = np.sqrt(x.get_shape().as_list()[1] / n_filters[0]) # assuming each image is square
if x_dim != int(x_dim): # not a square image
raise ValueError('Not a square image')
x_dim = int(x_dim)
x_tensor = tf.reshape(
x, [-1, x_dim, x_dim, n_filters[0]]) # reshape input samples to m * 2D image * 3 channel * 1 layer for input
elif len(x.get_shape()) == 4: # assuming we already did that
x_tensor = x
else:
raise ValueError('Unsupported input dimensions')
current_input = x_tensor
# Build the encoder
encoder = []
shapes = []
for layer_i, n_output in enumerate(n_filters[1:]): # enumerate the number of filters in each hidden layer
n_input = current_input.get_shape().as_list()[3] # number of filters in current input
shapes.append(current_input.get_shape().as_list()) # append shape of this layer's input
W = tf.Variable(
tf.random_uniform([
filter_sizes[layer_i],
filter_sizes[layer_i], # a filter_size x filter_size filter
n_input, n_output], # mapping n_inps to n_outs
-1.0 / math.sqrt(n_input),
1.0 / math.sqrt(n_input))) # create Weight mx W_ij = rand([-1,1])
b = tf.Variable(tf.zeros([n_output])) # create Bias vector
encoder.append(W)
output = lrelu( # apply non-linearity
tf.add(tf.nn.conv2d(
current_input, W, strides=[1, 2, 2, 1], padding='SAME'), b)) # add bias to output of conv(inps,W)
current_input = output
# store the latent representation
z = current_input
encoder.reverse() # going backwards for the decoder
shapes.reverse()
print(shapes)
# Build the decoder using the same weights
for layer_i, shape in enumerate(shapes):
W = encoder[layer_i] # using same weights as encoder
b = tf.Variable(tf.zeros([W.get_shape().as_list()[2]])) # but different biases
output = lrelu(tf.add(
tf.nn.conv2d_transpose( # transpose conv is deconv
current_input, W,
tf.pack([tf.shape(x)[0], shape[1], shape[2], shape[3]]), # output shape
strides=[1, 2, 2, 1], padding='SAME'), b))
current_input = output
# now have the reconstruction through the network
y = current_input
# cost function measures pixel-wise difference between output and input
cost = tf.reduce_sum(tf.square(y - x_tensor))
# %%
return {'x': x, 'z': z, 'y': y, 'cost': cost} # output of symbolic operations representing
# input, intermediate, output, and cost
# %%
def test_mandrill():
"""Test the convolutional autoencder using Mandrill Small image."""
# %%
import tensorflow as tf
import numpy as np
import scipy.io
import matplotlib.pyplot as plt
# Load Madrill Small data
mandrill_small = scipy.io.loadmat('mandrill_small.mat')
mandrill_small = mandrill_small['A']
mandrill_small = np.array(mandrill_small)
mean_img = np.tile(np.mean(mandrill_small, axis=2),(1,1,3));
mandrill_small = np.reshape(mandrill_small, [1,128*128*3])
mean_img = np.reshape(mean_img, [128*128*3])
print(mandrill_small.shape)
ae = autoencoder()
# %%
learning_rate = 0.01
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(ae['cost'])
# We create a session to use the graph
sess = tf.Session()
sess.run(tf.initialize_all_variables())
# Fit all training data
n_epochs = 1
for epoch_i in range(n_epochs):
batch_xs = mandrill_small
train = np.array([img - mean_img for img in batch_xs])
sess.run(optimizer, feed_dict={ae['x']: train})
print(epoch_i, sess.run(ae['cost'], feed_dict={ae['x']: train}))
# Plot example reconstructions
test_xs = mandrill_small
n_examples = 1
test_xs_norm = np.array([img - mean_img for img in test_xs])
recon = sess.run(ae['y'], feed_dict={ae['x']: test_xs_norm})
print(recon.shape)
fig, axs = plt.subplots(2, n_examples, figsize=(1, 2))
axs[0].imshow(
np.reshape(test_xs[0, :], (128, 128,3)))
axs[1].imshow(
np.reshape(
np.reshape(recon[0, ...], (3*128**2,)) + mean_img,
(128, 128,3)))
fig.show()
plt.draw()
plt.waitforbuttonpress()
if __name__ == '__main__':
test_mandrill()
| {
"repo_name": "apoorva-sharma/deep-frame-interpolation",
"path": "conv_auto_threechannel.py",
"copies": "1",
"size": "5777",
"license": "mit",
"hash": 1982798716517226800,
"line_mean": 33.3869047619,
"line_max": 120,
"alpha_frac": 0.6048121863,
"autogenerated": false,
"ratio": 3.5507068223724647,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9592450049939258,
"avg_score": 0.012613791746641397,
"num_lines": 168
} |
""" adapted from phidl.Geometry
"""
import rectpack
import numpy as np
from pp.component import Component
from numpy import ndarray
from typing import Any, Dict, List, Tuple
def _pack_single_bin(
rect_dict: Dict[int, Tuple[int, int]],
aspect_ratio: Tuple[int, int],
max_size: ndarray,
sort_by_area: bool,
density: float,
precision: float,
verbose: bool,
) -> Tuple[Dict[int, Tuple[int, int, int, int]], Dict[Any, Any]]:
""" Takes a `rect_dict` argument of the form {id:(w,h)} and tries to
pack it into a bin as small as possible with aspect ratio `aspect_ratio`
Will iteratively grow the bin size until everything fits or the bin size
reaches `max_size`.
Returns: a dictionary of of the packed rectangles in the form {id:(x,y,w,h)}, and a dictionary of remaining unpacked rects
"""
# Compute total area and use it for an initial estimate of the bin size
total_area = 0
for r in rect_dict.values():
total_area += r[0] * r[1]
aspect_ratio = np.asarray(aspect_ratio) / np.linalg.norm(aspect_ratio) # Normalize
# Setup variables
box_size = np.asarray(aspect_ratio * np.sqrt(total_area), dtype=np.float64)
box_size = np.clip(box_size, None, max_size)
if sort_by_area:
rp_sort = rectpack.SORT_AREA
else:
rp_sort = rectpack.SORT_NONE
# Repeatedly run the rectangle-packing algorithm with increasingly larger
# areas until everything fits or we've reached the maximum size
while True:
# Create the pack object
rect_packer = rectpack.newPacker(
mode=rectpack.PackingMode.Offline,
pack_algo=rectpack.MaxRectsBlsf,
sort_algo=rp_sort,
bin_algo=rectpack.PackingBin.BBF,
rotation=False,
)
# Add each rectangle to the pack, create a single bin, and pack
for rid, r in rect_dict.items():
rect_packer.add_rect(width=r[0], height=r[1], rid=rid)
rect_packer.add_bin(width=box_size[0], height=box_size[1])
rect_packer.pack()
# Adjust the box size for next time
box_size *= density # Increase area to try to fit
box_size = np.clip(box_size, None, max_size)
if verbose:
print(
"Trying to pack in bin size (%0.2f, %0.2f)"
% tuple(box_size * precision)
)
# Quit the loop if we've packed all the rectangles or reached the max size
if len(rect_packer.rect_list()) == len(rect_dict):
if verbose:
print("Success!")
break
elif all(box_size >= max_size):
if verbose:
print("Reached max_size, creating an additional bin")
break
# Separate packed from unpacked rectangles, make dicts of form {id:(x,y,w,h)}
packed_rect_dict = {r[-1]: r[:-1] for r in rect_packer[0].rect_list()}
unpacked_rect_dict = {}
for k, v in rect_dict.items():
if k not in packed_rect_dict:
unpacked_rect_dict[k] = v
return (packed_rect_dict, unpacked_rect_dict)
def pack(
D_list: List[Component],
spacing: int = 10,
aspect_ratio: Tuple[int, int] = (1, 1),
max_size: Tuple[None, None] = (None, None),
sort_by_area: bool = True,
density: float = 1.1,
precision: float = 1e-2,
verbose: bool = False,
) -> List[Component]:
""" takes a list of components and returns
Args:
D_list: Must be a list or tuple of Components
spacing: Minimum distance between adjacent shapes
aspect_ratio: (width, height) ratio of the rectangular bin
max_size: Limits the size into which the shapes will be packed
density: Values closer to 1 pack tighter but require more computation
sort_by_area (Boolean): Pre-sorts the shapes by area
verbose: False
"""
if density < 1.01:
raise ValueError(
"pack() was given a `density` argument that is"
+ " too small. The density argument must be >= 1.01"
)
# Santize max_size variable
max_size = [np.inf if v is None else v for v in max_size]
max_size = np.asarray(max_size, dtype=np.float64) # In case it's integers
max_size = max_size / precision
# Convert Components to rectangles
rect_dict = {}
for n, D in enumerate(D_list):
w, h = (D.size + spacing) / precision
w, h = int(w), int(h)
if (w > max_size[0]) or (h > max_size[1]):
raise ValueError(
"pack() failed because one of the objects "
+ "in `D_list` is has an x or y dimension larger than `max_size` and "
+ "so cannot be packed"
)
rect_dict[n] = (w, h)
packed_list = []
while len(rect_dict) > 0:
(packed_rect_dict, rect_dict) = _pack_single_bin(
rect_dict,
aspect_ratio=aspect_ratio,
max_size=max_size,
sort_by_area=sort_by_area,
density=density,
precision=precision,
verbose=verbose,
)
packed_list.append(packed_rect_dict)
D_packed_list = []
for rect_dict in packed_list:
D_packed = Component()
for n, rect in rect_dict.items():
x, y, w, h = rect
xcenter = x + w / 2 + spacing / 2
ycenter = y + h / 2 + spacing / 2
d = D_packed.add_ref(D_list[n])
d.center = (xcenter * precision, ycenter * precision)
D_packed_list.append(D_packed)
return D_packed_list
def _demo():
import pp
import phidl.geometry as pg
D_list = [pg.ellipse(radii=np.random.rand(2) * n + 2) for n in range(50)]
D_list += [pg.rectangle(size=np.random.rand(2) * n + 2) for n in range(50)]
D_packed_list = pack(
D_list, # Must be a list or tuple of Components
spacing=1.25, # Minimum distance between adjacent shapes
aspect_ratio=(2, 1), # (width, height) ratio of the rectangular bin
max_size=(None, None), # Limits the size into which the shapes will be packed
density=1.05, # Values closer to 1 pack tighter but require more computation
sort_by_area=True, # Pre-sorts the shapes by area
verbose=False,
)
D = D_packed_list[0] # Only one bin was created, so we plot that
pp.show(D) # show it in klayout
if __name__ == "__main__":
import pp
import phidl.geometry as pg
spacing = 1
ellipses = pack(
[pg.ellipse(radii=np.random.rand(2) * n + 2) for n in range(50)],
spacing=spacing,
)[0]
ellipses.name = "ellipses"
rectangles = pack(
[pg.rectangle(size=np.random.rand(2) * n + 2) for n in range(50)],
spacing=spacing,
)[0]
rectangles.name = "rectangles"
p = pack([ellipses, rectangles])
pp.show(p[0])
| {
"repo_name": "psiq/gdsfactory",
"path": "pp/pack.py",
"copies": "1",
"size": "6854",
"license": "mit",
"hash": -4270698949828082000,
"line_mean": 33.6161616162,
"line_max": 126,
"alpha_frac": 0.5922089291,
"autogenerated": false,
"ratio": 3.5112704918032787,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46034794209032787,
"avg_score": null,
"num_lines": null
} |
"""adapted from phidl.routing
temporary solution until we add Sbend routing functionality
"""
from typing import Optional
import gdspy
import numpy as np
from numpy import cos, mod, pi, sin
from numpy.linalg import norm
from pp.cell import cell
from pp.component import Component
from pp.config import TECH
from pp.snap import snap_to_grid
from pp.types import Layer, Port, Route
class RoutingError(ValueError):
pass
@cell
def route_basic(
port1: Port,
port2: Port,
path_type: str = "sine",
width_type: str = "straight",
width1: Optional[float] = None,
width2: Optional[float] = None,
num_path_pts: int = 99,
layer: Optional[Layer] = None,
) -> Component:
layer = layer or port1.layer
# Assuming they're both Ports for now
point_a = np.array(port1.midpoint)
if width1 is None:
width1 = port1.width
point_b = np.array(port2.midpoint)
if width2 is None:
width2 = port2.width
if round(abs(mod(port1.orientation - port2.orientation, 360)), 3) != 180:
raise RoutingError(
"Route() error: Ports do not face each other (orientations must be 180 apart)"
)
orientation = port1.orientation
separation = point_b - point_a # Vector drawn from A to B
distance = norm(separation) # Magnitude of vector from A to B
rotation = (
np.arctan2(separation[1], separation[0]) * 180 / pi
) # Rotation of vector from A to B
# If looking out along the normal of ``a``, the angle you would have to
# look to see ``b``
angle = rotation - orientation
forward_distance = distance * cos(angle * pi / 180)
lateral_distance = distance * sin(angle * pi / 180)
# Create a path assuming starting at the origin and setting orientation = 0
# use the "connect" function later to move the path to the correct location
xf = forward_distance
yf = lateral_distance
def curve_fun_straight(t):
return [xf * t, yf * t]
def curve_deriv_fun_straight(t):
return [xf + t * 0, t * 0]
def curve_fun_sine(t):
return [xf * t, yf * (1 - cos(t * pi)) / 2]
def curve_deriv_fun_sine(t):
return [xf + t * 0, yf * (sin(t * pi) * pi) / 2]
def width_fun_straight(t):
return (width2 - width1) * t + width1
def width_fun_sine(t):
return (width2 - width1) * (1 - cos(t * pi)) / 2 + width1
if path_type == "straight":
curve_fun = curve_fun_straight
curve_deriv_fun = curve_deriv_fun_straight
if path_type == "sine":
curve_fun = curve_fun_sine
curve_deriv_fun = curve_deriv_fun_sine
# if path_type == 'semicircle':
# def semicircle(t):
# t = np.array(t)
# x,y = np.zeros(t.shape), np.zeros(t.shape)
# ii = (0 <= t) & (t <= 0.5)
# jj = (0.5 < t) & (t <= 1)
# x[ii] = (cos(-pi/2 + t[ii]*pi/2))*xf
# y[ii] = (sin(-pi/2 + t[ii]*pi/2)+1)*yf*2
# x[jj] = (cos(pi*3/2 - t[jj]*pi)+2)*xf/2
# y[jj] = (sin(pi*3/2 - t[jj]*pi)+1)*yf/2
# return x,y
# curve_fun = semicircle
# curve_deriv_fun = None
if width_type == "straight":
width_fun = width_fun_straight
if width_type == "sine":
width_fun = width_fun_sine
route_path = gdspy.Path(width=width1, initial_point=(0, 0))
route_path.parametric(
curve_fun,
curve_deriv_fun,
number_of_evaluations=num_path_pts,
max_points=199,
final_width=width_fun,
final_distance=None,
)
route_path_polygons = route_path.polygons
# Make the route path into a Device with ports, and use "connect" to move it
# into the proper location
D = Component()
D.add_polygon(route_path_polygons, layer=layer)
p1 = D.add_port(name=1, midpoint=(0, 0), width=width1, orientation=180)
D.add_port(
name=2,
midpoint=[forward_distance, lateral_distance],
width=width2,
orientation=0,
)
D.info["length"] = route_path.length
D.rotate(angle=180 + port1.orientation - p1.orientation, center=p1.midpoint)
D.move(origin=p1, destination=port1)
return D
@cell
def _arc(
radius=10, width=0.5, theta=90, start_angle=0, angle_resolution=2.5, layer=(1, 0)
):
"""Creates an arc of arclength ``theta`` starting at angle ``start_angle``"""
inner_radius = radius - width / 2
outer_radius = radius + width / 2
angle1 = (start_angle) * pi / 180
angle2 = (start_angle + theta) * pi / 180
t = np.linspace(angle1, angle2, int(np.ceil(abs(theta) / angle_resolution)))
inner_points_x = (inner_radius * cos(t)).tolist()
inner_points_y = (inner_radius * sin(t)).tolist()
outer_points_x = (outer_radius * cos(t)).tolist()
outer_points_y = (outer_radius * sin(t)).tolist()
xpts = inner_points_x + outer_points_x[::-1]
ypts = inner_points_y + outer_points_y[::-1]
D = Component()
D.add_polygon(points=(xpts, ypts), layer=layer)
D.add_port(
name=1,
midpoint=(radius * cos(angle1), radius * sin(angle1)),
width=width,
orientation=start_angle - 90 + 180 * (theta < 0),
)
D.add_port(
name=2,
midpoint=(radius * cos(angle2), radius * sin(angle2)),
width=width,
orientation=start_angle + theta + 90 - 180 * (theta < 0),
)
D.info["length"] = (abs(theta) * pi / 180) * radius
return D
@cell
def _gradual_bend(
radius=10,
width=1.0,
angular_coverage=15,
num_steps=10,
angle_resolution=0.1,
start_angle=0,
direction="ccw",
layer=0,
):
"""
creates a 90-degree bent waveguide
the bending radius is gradually increased until it reaches the minimum
value of the radius at the "angular coverage" angle.
it essentially creates a smooth transition to a bent waveguide mode.
user can control number of steps provided.
direction determined by start angle and cw or ccw switch
############
with the default 10 "num_steps" and 15 degree coverage, effective radius is about 1.5*radius.
"""
angular_coverage = np.deg2rad(angular_coverage)
D = Component()
# determines the increment in radius through its inverse from 0 to 1/r
inc_rad = (radius ** -1) / (num_steps)
angle_step = angular_coverage / num_steps
# construct a series of sub-arcs with equal angles but gradually
# decreasing bend radius
arcs = []
prevPort = None
for x in range(num_steps):
A = _arc(
radius=1 / ((x + 1) * inc_rad),
width=width,
theta=np.rad2deg(angle_step),
start_angle=x * np.rad2deg(angle_step),
angle_resolution=angle_resolution,
layer=layer,
)
a = D.add_ref(A)
arcs.append(a)
if x > 0:
a.connect(port=1, destination=prevPort)
prevPort = a.ports[2]
D.absorb(a)
D.add_port(name=1, port=arcs[0].ports[1])
# now connect a regular bend for the normal curved portion
B = _arc(
radius=radius,
width=width,
theta=45 - np.rad2deg(angular_coverage),
start_angle=angular_coverage,
angle_resolution=angle_resolution,
layer=layer,
)
b = D.add_ref(B)
b.connect(port=1, destination=prevPort)
prevPort = b.ports[2]
D.add_port(name=2, port=prevPort)
# now create the overall structure
Total = Component()
# clone the half-curve into two objects and connect for a 90 deg bend.
D1 = Total.add_ref(D)
D2 = Total.add_ref(D)
D2.mirror(p1=[0, 0], p2=[1, 1])
D2.connect(port=2, destination=D1.ports[2])
Total.xmin = 0
Total.ymin = 0
# orient to default settings...
Total.mirror(p1=[0, 0], p2=[1, 1])
Total.mirror(p1=[0, 0], p2=[1, 0])
# orient to user-provided settings
if direction == "cw":
Total.mirror(p1=[0, 0], p2=[1, 0])
Total.rotate(angle=start_angle, center=Total.center)
Total.center = [0, 0]
Total.add_port(name=1, port=D1.ports[1])
Total.add_port(name=2, port=D2.ports[1])
Total.info["length"] = (abs(angular_coverage) * pi / 180) * radius
Total.absorb(D1)
Total.absorb(D2)
return Total
def _route_manhattan180(port1, port2, bendType="circular", layer=0, radius=20):
# this is a subroutine of route_manhattan() and should not be used by itself.
Total = Component()
width = port1.width
# first map into uniform plane with normal x,y coords
# allows each situation to be put into uniform cases of quadrants for routing.
# this is because bends change direction and positioning.
if port1.orientation == 0:
p2 = [port2.midpoint[0], port2.midpoint[1]]
p1 = [port1.midpoint[0], port1.midpoint[1]]
if port1.orientation == 90:
p2 = [port2.midpoint[1], -port2.midpoint[0]]
p1 = [port1.midpoint[1], -port1.midpoint[0]]
if port1.orientation == 180:
p2 = [-port2.midpoint[0], -port2.midpoint[1]]
p1 = [-port1.midpoint[0], -port1.midpoint[1]]
if port1.orientation == 270:
p2 = [-port2.midpoint[1], port2.midpoint[0]]
p1 = [-port1.midpoint[1], port1.midpoint[0]]
# create placeholder ports based on the imaginary coordinates we created
Total.add_port(name="t1", midpoint=[0, 0], orientation=0, width=width)
if port1.orientation != port2.orientation:
Total.add_port(
name="t2", midpoint=list(np.subtract(p2, p1)), orientation=180, width=width
)
else:
Total.add_port(
name="t2", midpoint=list(np.subtract(p2, p1)), orientation=0, width=width
)
if port1.orientation == port2.orientation:
# first quadrant target
if (p2[1] > p1[1]) & (p2[0] > p1[0]):
if bendType == "circular":
B1 = _arc(
radius=radius,
width=width,
layer=layer,
angle_resolution=1,
start_angle=0,
theta=90,
)
B2 = _arc(
radius=radius,
width=width,
layer=layer,
angle_resolution=1,
start_angle=90,
theta=90,
)
radiusEff = radius
if bendType == "gradual":
B1 = _gradual_bend(
radius=radius,
width=width,
layer=layer,
start_angle=0,
direction="ccw",
)
B2 = _gradual_bend(
radius=radius,
width=width,
layer=layer,
start_angle=90,
direction="ccw",
)
radiusEff = B1.xsize - width / 2
b1 = Total.add_ref(B1)
b2 = Total.add_ref(B2)
b1.connect(port=b1.ports[1], destination=Total.ports["t1"])
b1.move([p2[0] - p1[0], 0])
b2.connect(port=b2.ports[1], destination=b1.ports[2])
b2.move([0, p2[1] - p1[1] - radiusEff * 2])
R1 = route_basic(port1=Total.ports["t1"], port2=b1.ports[1], layer=layer)
r1 = Total.add_ref(R1)
R2 = route_basic(port1=b1.ports[2], port2=b2.ports[1], layer=layer)
r2 = Total.add_ref(R2)
Total.add_port(name=1, port=r1.ports[1])
Total.add_port(name=2, port=b2.ports[2])
# second quadrant target
if (p2[1] > p1[1]) & (p2[0] < p1[0]):
if bendType == "circular":
B1 = _arc(
radius=radius,
width=width,
layer=layer,
angle_resolution=1,
start_angle=0,
theta=90,
)
B2 = _arc(
radius=radius,
width=width,
layer=layer,
angle_resolution=1,
start_angle=90,
theta=90,
)
radiusEff = radius
if bendType == "gradual":
B1 = _gradual_bend(
radius=radius,
width=width,
layer=layer,
start_angle=0,
direction="ccw",
)
B2 = _gradual_bend(
radius=radius,
width=width,
layer=layer,
start_angle=90,
direction="ccw",
)
radiusEff = B1.xsize - width / 2
b1 = Total.add_ref(B1)
b2 = Total.add_ref(B2)
b1.connect(port=b1.ports[1], destination=Total.ports["t1"])
b2.connect(port=b2.ports[1], destination=b1.ports[2])
b2.move([0, p2[1] - p1[1] - radiusEff * 2])
R1 = route_basic(port1=b1.ports[2], port2=b2.ports[1], layer=layer)
r1 = Total.add_ref(R1)
R2 = route_basic(port1=b2.ports[2], port2=Total.ports["t2"], layer=layer)
r2 = Total.add_ref(R2)
Total.add_port(name=1, port=b1.ports[1])
Total.add_port(name=2, port=r2.ports[2])
# third quadrant target
if (p2[1] < p1[1]) & (p2[0] < p1[0]):
if bendType == "circular":
B1 = _arc(
radius=radius,
width=width,
layer=layer,
angle_resolution=1,
start_angle=0,
theta=-90,
)
B2 = _arc(
radius=radius,
width=width,
layer=layer,
angle_resolution=1,
start_angle=-90,
theta=-90,
)
radiusEff = radius
if bendType == "gradual":
B1 = _gradual_bend(
radius=radius,
width=width,
layer=layer,
start_angle=0,
direction="cw",
)
B2 = _gradual_bend(
radius=radius,
width=width,
layer=layer,
start_angle=-90,
direction="cw",
)
radiusEff = B1.xsize - width / 2
b1 = Total.add_ref(B1)
b2 = Total.add_ref(B2)
b1.connect(port=b1.ports[1], destination=Total.ports["t1"])
b2.connect(port=b2.ports[1], destination=b1.ports[2])
b2.move([0, p2[1] - p1[1] + radiusEff * 2])
R1 = route_basic(port1=b1.ports[2], port2=b2.ports[1], layer=layer)
r1 = Total.add_ref(R1)
R2 = route_basic(port1=b2.ports[2], port2=Total.ports["t2"], layer=layer)
r2 = Total.add_ref(R2)
Total.add_port(name=1, port=b1.ports[1])
Total.add_port(name=2, port=r2.ports[2])
# fourth quadrant target
if (p2[1] < p1[1]) & (p2[0] > p1[0]):
if bendType == "circular":
B1 = _arc(
radius=radius,
width=width,
layer=layer,
angle_resolution=1,
start_angle=0,
theta=-90,
)
B2 = _arc(
radius=radius,
width=width,
layer=layer,
angle_resolution=1,
start_angle=-90,
theta=-90,
)
radiusEff = radius
if bendType == "gradual":
B1 = _gradual_bend(
radius=radius,
width=width,
layer=layer,
start_angle=0,
direction="cw",
)
B2 = _gradual_bend(
radius=radius,
width=width,
layer=layer,
start_angle=-90,
direction="cw",
)
radiusEff = B1.xsize - width / 2
b1 = Total.add_ref(B1)
b2 = Total.add_ref(B2)
b1.connect(port=b1.ports[1], destination=Total.ports["t1"])
b1.move([p2[0] - p1[0], 0])
b2.connect(port=b2.ports[1], destination=b1.ports[2])
b2.move([0, p2[1] - p1[1] + radiusEff * 2])
R1 = route_basic(port1=Total.ports["t1"], port2=b1.ports[1], layer=layer)
r1 = Total.add_ref(R1)
R2 = route_basic(port1=b1.ports[2], port2=b2.ports[1], layer=layer)
r2 = Total.add_ref(R2)
Total.add_port(name=1, port=r1.ports[1])
Total.add_port(name=2, port=b2.ports[2])
# other port orientations are not supported:
elif np.round(np.abs(np.mod(port1.orientation - port2.orientation, 360)), 3) != 180:
raise ValueError(
"Route() error: Ports do not face each other (orientations must be 180 apart)"
)
# otherwise, they are 180 degrees apart:
else:
# first quadrant target
if (p2[1] > p1[1]) & (p2[0] > p1[0]):
if bendType == "circular":
B1 = _arc(
radius=radius,
width=width,
layer=layer,
angle_resolution=1,
start_angle=0,
theta=90,
)
B2 = _arc(
radius=radius,
width=width,
layer=layer,
angle_resolution=1,
start_angle=90,
theta=-90,
)
radiusEff = radius
if bendType == "gradual":
B1 = _gradual_bend(
radius=radius,
width=width,
layer=layer,
start_angle=0,
direction="ccw",
)
B2 = _gradual_bend(
radius=radius,
width=width,
layer=layer,
start_angle=90,
direction="cw",
)
radiusEff = B1.xsize - width / 2
b1 = Total.add_ref(B1)
b2 = Total.add_ref(B2)
b1.connect(port=b1.ports[1], destination=Total.ports["t1"])
b1.move([p2[0] - p1[0] - radiusEff * 2, 0])
b2.connect(port=b2.ports[1], destination=b1.ports[2])
b2.move([0, p2[1] - p1[1] - radiusEff * 2])
R1 = route_basic(port1=Total.ports["t1"], port2=b1.ports[1], layer=layer)
r1 = Total.add_ref(R1)
R2 = route_basic(port1=b1.ports[2], port2=b2.ports[1], layer=layer)
r2 = Total.add_ref(R2)
Total.add_port(name=1, port=r1.ports[1])
Total.add_port(name=2, port=b2.ports[2])
# second quadrant target
if (p2[1] > p1[1]) & (p2[0] < p1[0]):
if bendType == "circular":
B1 = _arc(
radius=radius,
width=width,
layer=layer,
angle_resolution=1,
start_angle=0,
theta=90,
)
B2 = _arc(
radius=radius,
width=width,
layer=layer,
angle_resolution=1,
start_angle=90,
theta=90,
)
B3 = _arc(
radius=radius,
width=width,
layer=layer,
angle_resolution=1,
start_angle=180,
theta=-90,
)
B4 = _arc(
radius=radius,
width=width,
layer=layer,
angle_resolution=1,
start_angle=90,
theta=-90,
)
radiusEff = radius
if bendType == "gradual":
B1 = _gradual_bend(
radius=radius,
width=width,
layer=layer,
start_angle=0,
direction="ccw",
)
B2 = _gradual_bend(
radius=radius,
width=width,
layer=layer,
start_angle=90,
direction="ccw",
)
B3 = _gradual_bend(
radius=radius,
width=width,
layer=layer,
start_angle=180,
direction="cw",
)
B4 = _gradual_bend(
radius=radius,
width=width,
layer=layer,
start_angle=90,
direction="cw",
)
radiusEff = B1.xsize - width / 2
b1 = Total.add_ref(B1)
b2 = Total.add_ref(B2)
b3 = Total.add_ref(B3)
b4 = Total.add_ref(B4)
b1.connect(port=b1.ports[1], destination=Total.ports["t1"])
b2.connect(port=b2.ports[1], destination=b1.ports[2])
b2.move([0, p2[1] - p1[1] - radiusEff * 4])
R1 = route_basic(port1=b1.ports[2], port2=b2.ports[1], layer=layer)
r1 = Total.add_ref(R1)
b3.connect(port=b3.ports[1], destination=b2.ports[2])
b3.move([p2[0] - p1[0], 0])
R2 = route_basic(port1=b2.ports[2], port2=b3.ports[1], layer=layer)
r2 = Total.add_ref(R2)
b4.connect(port=b4.ports[1], destination=b3.ports[2])
Total.add_port(name=1, port=r1.ports[1])
Total.add_port(name=2, port=b4.ports[2])
# third quadrant target
if (p2[1] < p1[1]) & (p2[0] < p1[0]):
if bendType == "circular":
B1 = _arc(
radius=radius,
width=width,
layer=layer,
angle_resolution=1,
start_angle=0,
theta=-90,
)
B2 = _arc(
radius=radius,
width=width,
layer=layer,
angle_resolution=1,
start_angle=-90,
theta=-90,
)
B3 = _arc(
radius=radius,
width=width,
layer=layer,
angle_resolution=1,
start_angle=-180,
theta=90,
)
B4 = _arc(
radius=radius,
width=width,
layer=layer,
angle_resolution=1,
start_angle=-90,
theta=90,
)
radiusEff = radius
if bendType == "gradual":
B1 = _gradual_bend(
radius=radius,
width=width,
layer=layer,
start_angle=0,
direction="cw",
)
B2 = _gradual_bend(
radius=radius,
width=width,
layer=layer,
start_angle=-90,
direction="cw",
)
B3 = _gradual_bend(
radius=radius,
width=width,
layer=layer,
start_angle=-180,
direction="ccw",
)
B4 = _gradual_bend(
radius=radius,
width=width,
layer=layer,
start_angle=-90,
direction="ccw",
)
radiusEff = B1.xsize - width / 2
b1 = Total.add_ref(B1)
b2 = Total.add_ref(B2)
b3 = Total.add_ref(B3)
b4 = Total.add_ref(B4)
b1.connect(port=b1.ports[1], destination=Total.ports["t1"])
b2.connect(port=b2.ports[1], destination=b1.ports[2])
b2.move([0, p2[1] - p1[1] + radiusEff * 4])
R1 = route_basic(port1=b1.ports[2], port2=b2.ports[1], layer=layer)
r1 = Total.add_ref(R1)
b3.connect(port=b3.ports[1], destination=b2.ports[2])
b3.move([p2[0] - p1[0], 0])
R2 = route_basic(port1=b2.ports[2], port2=b3.ports[1], layer=layer)
r2 = Total.add_ref(R2)
b4.connect(port=b4.ports[1], destination=b3.ports[2])
Total.add_port(name=1, port=r1.ports[1])
Total.add_port(name=2, port=b4.ports[2])
# fourth quadrant target
if (p2[1] < p1[1]) & (p2[0] > p1[0]):
if bendType == "circular":
B1 = _arc(
radius=radius,
width=width,
layer=layer,
angle_resolution=1,
start_angle=0,
theta=-90,
)
B2 = _arc(
radius=radius,
width=width,
layer=layer,
angle_resolution=1,
start_angle=-90,
theta=90,
)
radiusEff = radius
if bendType == "gradual":
B1 = _gradual_bend(
radius=radius,
width=width,
layer=layer,
start_angle=0,
direction="cw",
)
B2 = _gradual_bend(
radius=radius,
width=width,
layer=layer,
start_angle=-90,
direction="ccw",
)
radiusEff = B1.xsize - width / 2
b1 = Total.add_ref(B1)
b2 = Total.add_ref(B2)
b1.connect(port=b1.ports[1], destination=Total.ports["t1"])
b1.move([p2[0] - p1[0] - radiusEff * 2, 0])
b2.connect(port=b2.ports[1], destination=b1.ports[2])
b2.move([0, p2[1] - p1[1] + radiusEff * 2])
R1 = route_basic(port1=Total.ports["t1"], port2=b1.ports[1], layer=layer)
r1 = Total.add_ref(R1)
R2 = route_basic(port1=b1.ports[2], port2=b2.ports[1], layer=layer)
r2 = Total.add_ref(R2)
Total.add_port(name=1, port=r1.ports[1])
Total.add_port(name=2, port=b2.ports[2])
Total.rotate(angle=port1.orientation, center=p1)
Total.move(origin=Total.ports["t1"], destination=port1)
return Total
def _route_manhattan90(port1, port2, bendType="circular", layer=0, radius=20):
# this is a subroutine of route_manhattan() and should not be used by itself.
Total = Component()
width = port1.width
# first map into uniform plane with normal x,y coords
# allows each situation to be put into uniform cases of quadrants for routing.
# this is because bends change direction and positioning.
if port1.orientation == 0:
p2 = [port2.midpoint[0], port2.midpoint[1]]
p1 = [port1.midpoint[0], port1.midpoint[1]]
if port1.orientation == 90:
p2 = [port2.midpoint[1], -port2.midpoint[0]]
p1 = [port1.midpoint[1], -port1.midpoint[0]]
if port1.orientation == 180:
p2 = [-port2.midpoint[0], -port2.midpoint[1]]
p1 = [-port1.midpoint[0], -port1.midpoint[1]]
if port1.orientation == 270:
p2 = [-port2.midpoint[1], port2.midpoint[0]]
p1 = [-port1.midpoint[1], port1.midpoint[0]]
# create placeholder ports based on the imaginary coordinates we created
Total.add_port(name="t1", midpoint=[0, 0], orientation=0, width=width)
# CHECK THIS
# first quadrant target, route upward
if (p2[1] > p1[1]) & (p2[0] > p1[0]):
Total.add_port(
name="t2", midpoint=list(np.subtract(p2, p1)), orientation=-90, width=width
)
if bendType == "circular":
B1 = _arc(
radius=radius,
width=width,
layer=layer,
angle_resolution=1,
start_angle=0,
theta=90,
)
radiusEff = radius
if bendType == "gradual":
B1 = _gradual_bend(
radius=radius, width=width, layer=layer, start_angle=0, direction="ccw"
)
radiusEff = B1.xsize - width / 2
b1 = Total.add_ref(B1)
b1.connect(port=b1.ports[1], destination=Total.ports["t1"])
b1.move([p2[0] - p1[0] - radiusEff, 0])
R1 = route_basic(port1=Total.ports["t1"], port2=b1.ports[1], layer=layer)
R2 = route_basic(port1=b1.ports[2], port2=Total.ports["t2"], layer=layer)
r1 = Total.add_ref(R1)
r2 = Total.add_ref(R2)
Total.add_port(name=1, port=r1.ports[1])
Total.add_port(name=2, port=r2.ports[2])
# fourth quadrant target, route downward
if (p2[1] < p1[1]) & (p2[0] > p1[0]):
Total.add_port(
name="t2", midpoint=list(np.subtract(p2, p1)), orientation=90, width=width
)
if bendType == "circular":
B1 = _arc(
radius=radius,
width=width,
layer=layer,
angle_resolution=1,
start_angle=0,
theta=-90,
)
radiusEff = radius
if bendType == "gradual":
B1 = _gradual_bend(
radius=radius, width=width, layer=layer, start_angle=0, direction="cw"
)
radiusEff = B1.xsize - width / 2
b1 = Total.add_ref(B1)
b1.connect(port=b1.ports[1], destination=Total.ports["t1"])
b1.move([p2[0] - p1[0] - radiusEff, 0])
R1 = route_basic(port1=Total.ports["t1"], port2=b1.ports[1], layer=layer)
R2 = route_basic(port1=b1.ports[2], port2=Total.ports["t2"], layer=layer)
r1 = Total.add_ref(R1)
r2 = Total.add_ref(R2)
Total.add_port(name=1, port=r1.ports[1])
Total.add_port(name=2, port=r2.ports[2])
Total.rotate(angle=port1.orientation, center=p1)
Total.move(origin=Total.ports["t1"], destination=port1)
return Total
def route_manhattan(
port1: Port,
port2: Port,
bendType: str = "gradual",
layer: Optional[Layer] = None,
radius: float = TECH.waveguide.strip.radius,
):
"""Returns Route along cardinal directions between two ports
placed diagonally from each other
Args:
port1:
port2:
bendType: gradual, circular
"""
layer = layer or port1.layer
valid_bend_types = ["circular", "gradual"]
if bendType not in valid_bend_types:
raise ValueError(f"bendType={bendType} not in {valid_bend_types}")
if bendType == "gradual":
b = _gradual_bend(radius=radius)
radius_eff = b.xsize
else:
radius_eff = radius
if (
abs(port1.midpoint[0] - port2.midpoint[0]) < 2 * radius_eff
or abs(port1.midpoint[1] - port2.midpoint[1]) < 2 * radius_eff
):
raise RoutingError(
f"bend does not fit (radius = {radius_eff}) you need radius <",
min(
[
abs(port1.midpoint[0] - port2.midpoint[0]) / 2,
abs(port1.midpoint[1] - port2.midpoint[1]) / 2,
]
),
)
Total = Component()
references = []
width = port1.width
# first map into uniform plane with normal x,y coords
# allows each situation to be put into uniform cases of quadrants for routing.
# this is because bends change direction and positioning.
if port1.orientation == 0:
p2 = [port2.midpoint[0], port2.midpoint[1]]
p1 = [port1.midpoint[0], port1.midpoint[1]]
if port1.orientation == 90:
p2 = [port2.midpoint[1], -port2.midpoint[0]]
p1 = [port1.midpoint[1], -port1.midpoint[0]]
if port1.orientation == 180:
p2 = [-port2.midpoint[0], -port2.midpoint[1]]
p1 = [-port1.midpoint[0], -port1.midpoint[1]]
if port1.orientation == 270:
p2 = [-port2.midpoint[1], port2.midpoint[0]]
p1 = [-port1.midpoint[1], port1.midpoint[0]]
Total.add_port(name=1, port=port1)
Total.add_port(name=2, port=port2)
ports = {1: Total.ports[1], 2: Total.ports[2]}
if p2[1] == p1[1] or p2[0] == p1[0]:
raise RoutingError("Error - ports must be at different x AND y values.")
# if it is parallel or anti-parallel, route with 180 option
if (
np.round(np.abs(np.mod(port1.orientation - port2.orientation, 360)), 3) == 180
) or (np.round(np.abs(np.mod(port1.orientation - port2.orientation, 360)), 3) == 0):
R1 = _route_manhattan180(
port1=port1, port2=port2, bendType=bendType, layer=layer, radius=radius
)
r1 = Total.add_ref(R1)
references.append(r1)
else:
# first quadrant case
if (p2[1] > p1[1]) & (p2[0] > p1[0]):
# simple 90 degree single-bend case
if (
port2.orientation == port1.orientation - 90
or port2.orientation == port1.orientation + 270
):
R1 = _route_manhattan90(
port1=port1,
port2=port2,
bendType=bendType,
layer=layer,
radius=radius,
)
r1 = Total.add_ref(R1)
references.append(r1)
elif (
port2.orientation == port1.orientation + 90
or port2.orientation == port1.orientation - 270
):
if bendType == "circular":
B1 = _arc(
radius=radius,
width=width,
layer=layer,
angle_resolution=1,
start_angle=port1.orientation,
theta=90,
)
if bendType == "gradual":
B1 = _gradual_bend(
radius=radius,
width=width,
layer=layer,
start_angle=port1.orientation,
direction="ccw",
)
b1 = Total.add_ref(B1)
references.append(b1)
b1.connect(port=1, destination=port1)
R1 = _route_manhattan180(
port1=b1.ports[2],
port2=port2,
bendType=bendType,
layer=layer,
radius=radius,
)
r1 = Total.add_ref(R1)
references.append(r1)
# second quadrant case
if (p2[1] > p1[1]) & (p2[0] < p1[0]):
if (
np.abs(port1.orientation - port2.orientation) == 90
or np.abs(port1.orientation - port2.orientation) == 270
):
if bendType == "circular":
B1 = _arc(
radius=radius,
width=width,
layer=layer,
angle_resolution=1,
start_angle=port1.orientation,
theta=90,
)
if bendType == "gradual":
B1 = _gradual_bend(
radius=radius,
width=width,
layer=layer,
start_angle=port1.orientation,
direction="ccw",
)
b1 = Total.add_ref(B1)
b1.connect(port=1, destination=port1)
references.append(b1)
R1 = _route_manhattan180(
port1=b1.ports[2],
port2=port2,
bendType=bendType,
layer=layer,
radius=radius,
)
r1 = Total.add_ref(R1)
references.append(r1)
# third quadrant case
if (p2[1] < p1[1]) & (p2[0] < p1[0]):
if (
np.abs(port1.orientation - port2.orientation) == 90
or np.abs(port1.orientation - port2.orientation) == 270
):
if bendType == "circular":
B1 = _arc(
radius=radius,
width=width,
layer=layer,
angle_resolution=1,
start_angle=port1.orientation,
theta=-90,
)
# radiusEff = radius
if bendType == "gradual":
B1 = _gradual_bend(
radius=radius,
width=width,
layer=layer,
start_angle=port1.orientation,
direction="cw",
)
# radiusEff = B1.xsize - width / 2
b1 = Total.add_ref(B1)
b1.connect(port=1, destination=port1)
references.append(b1)
R1 = _route_manhattan180(
port1=b1.ports[2],
port2=port2,
bendType=bendType,
layer=layer,
radius=radius,
)
r1 = Total.add_ref(R1)
references.append(r1)
# fourth quadrant case
if (p2[1] < p1[1]) & (p2[0] > p1[0]):
# simple 90 degree single-bend case
if (
port2.orientation == port1.orientation + 90
or port2.orientation == port1.orientation - 270
):
R1 = _route_manhattan90(
port1=port1,
port2=port2,
bendType=bendType,
layer=layer,
radius=radius,
)
r1 = Total.add_ref(R1)
references.append(r1)
elif (
port2.orientation == port1.orientation - 90
or port2.orientation == port1.orientation + 270
):
if bendType == "circular":
B1 = _arc(
radius=radius,
width=width,
layer=layer,
angle_resolution=1,
start_angle=port1.orientation,
theta=-90,
)
# radiusEff = radius
if bendType == "gradual":
B1 = _gradual_bend(
radius=radius,
width=width,
layer=layer,
start_angle=port1.orientation,
direction="cw",
)
# radiusEff = B1.xsize - width / 2
b1 = Total.add_ref(B1)
b1.connect(port=1, destination=port1)
references.append(b1)
R1 = _route_manhattan180(
port1=b1.ports[2],
port2=port2,
bendType=bendType,
layer=layer,
radius=radius,
)
r1 = Total.add_ref(R1)
references.append(r1)
references = []
length = 0
for ref1 in Total.references:
for ref2 in ref1.parent.references:
references.append(ref2)
length += ref2.info["length"]
ports = (Total.ports[1], Total.ports[2])
length = snap_to_grid(length)
return Route(references=references, ports=ports, length=length)
if __name__ == "__main__":
import pp
c = pp.Component("test_route_manhattan_circular")
pitch = 9.0
ys1 = [0, 10, 20]
N = len(ys1)
ys2 = [15 + i * pitch for i in range(N)]
ports1 = [pp.Port(f"L_{i}", (0, ys1[i]), 0.5, 0) for i in range(N)]
ports2 = [pp.Port(f"R_{i}", (20, ys2[i]), 0.5, 180) for i in range(N)]
ports1 = [
# pp.Port("in1", (10, 5), 0.5, 90),
# pp.Port("in2", (-10, 20), 0.5, 0),
# pp.Port("in3", (10, 30), 0.5, 0),
# pp.Port("in4", (-10, -5), 0.5, 90),
pp.Port("in5", (0, 0), 0.5, 0),
# pp.Port("in6", (0, 0), 0.5, 0),
]
ports2 = [
# pp.Port("in1", (90, -60), 0.5, 180),
# pp.Port("in2", (-100, 20), 0.5, 0),
# pp.Port("in3", (100, -25), 0.5, 0),
# pp.Port("in4", (-150, -65), 0.5, 270),
pp.Port("in5", (15, 6), 0.5, 180),
# pp.Port("in6", (0, 12), 0.5, 180),
]
N = len(ports1)
for i in range(N):
# route = route_manhattan(ports1[i], ports2[i], radius=3, bendType="circular")
route = route_manhattan(ports1[i], ports2[i], radius=1, bendType="gradual")
c.add(route.references)
# references = route_basic(port1=ports1[i], port2=ports2[i])
# print(route.length)
# c = _gradual_bend()
# c = _arc(theta=20)
c.show(show_ports=True)
| {
"repo_name": "gdsfactory/gdsfactory",
"path": "pp/routing/routing.py",
"copies": "1",
"size": "41794",
"license": "mit",
"hash": -6407945316558603000,
"line_mean": 34.5693617021,
"line_max": 97,
"alpha_frac": 0.4698521319,
"autogenerated": false,
"ratio": 3.6150852002421936,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4584937332142193,
"avg_score": null,
"num_lines": null
} |
#Adapted from PIC
# This is sample code for learning the basics of the algorithm.
# It's not meant to be part of a production anti-spam system!
# Terrible for a large dataset
import sys
import os
import glob
import re
import math
import sqlite3
from decimal import *
DEFAULT_THRESHOLD = 0.7
def get_words(doc):
"""Splits text into words."""
splitter = re.compile('\\W*')
# Split the words by non-alpha characters
words = [s.lower() for s in splitter.split(doc)
if len(s)>2 and len(s)<20]
# Return the unique set of words only
return dict([(w,1) for w in words])
class Classifier:
def __init__(self, get_features, filename=None):
self.fc = {} # Counts of feature/category combinations
self.cc = {} # Counts of documents in each category
self.get_features = get_features # feature extraction function
def setup_db(self, dbfile):
"""Sets up the database."""
print "db:", dbfile
# "connect to the file" (or create it)
self.con = sqlite3.connect(dbfile)
# create the tables if they don't exist
self.con.execute(
'create table if not exists fc(feature,category,count)')
self.con.execute('create table if not exists cc(category,count)')
self.con.execute('create table if not exists ct(category,threshold)')
def inc_feature_count(self, f, cat):
"""Increases the count of a feture/category pair"""
count = self.feature_count(f, cat)
if count == 0:
self.con.execute("insert into fc values ('%s','%s',1)" % (f, cat))
else:
self.con.execute(
"update fc set count=%d where feature='%s' and category='%s'"
% (count+1, f, cat))
self.con.commit()
def feature_count(self, f, cat):
"""Returns the number of times a feature has appeared int a \
category.
"""
res = self.con.execute(
'select count from fc where feature="%s" and category="%s"'
%(f, cat)).fetchone()
if res == None:
return 0
else:
return float(res[0])
def inc_category_count(self, cat):
"""Increases the count of a category."""
count = self.category_count(cat)
if count == 0:
self.con.execute("insert into cc values ('%s',1)" % (cat))
else:
self.con.execute("update cc set count=%d where category='%s'"
% (count+1, cat))
self.con.commit()
def category_count(self, cat):
"""Returns the number of items in a category."""
res = self.con.execute('select count from cc where category="%s"'
%(cat)).fetchone()
if res == None:
return 0
else:
return float(res[0])
def categories(self):
"""Returns a list of categories."""
cur = self.con.execute('select category from cc');
return [d[0] for d in cur]
def total_count(self):
"""Returns the total number of items."""
res = self.con.execute('select sum(count) from cc').fetchone();
if res == None:
return 0
return res[0]
def train(self, item, cat):
"""Extracts the features from an item and increases the counts for \
this classification (category) for every feature.
Also increaes the total count for the category.
"""
features = self.get_features(item)
# Increment the count for every feature with this category
for f in features:
self.inc_feature_count(f, cat)
# Increment the count for this category
self.inc_category_count(cat)
def train_from_dir(self, path, cat):
"""Loads examples of a given category from a directory and uses them \
to perform the training.
"""
dirfiles = glob.glob(os.path.join(path, '*'))
total = len(dirfiles)
count = 0
for infile in dirfiles:
f = open(infile, "r")
text = f.read()
self.train(text, cat)
def feature_prob(self, f, cat):
"""Returns the probabiity that a feature is in a particular category.
"""
if self.category_count(cat) == 0:
return 0
# The total number of times this feature appeared in this
# category divided by the total number of items in this category
pfc = self.feature_count(f, cat)
pc = self.category_count(cat)
return float(pfc)/pc
def weighted_prob(self, f, cat, prf, weight=1.0, ap=0.5):
"""Returns the weighted probability that a feature is in a \
particular category. Adds an inital probability value for features \
with a specified weight.
"""
basicprob = prf(f, cat) # Calculate current probability
# Count the number of times this feature has appeared in all cats
totals = sum([self.feature_count(f, c) for c in self.categories()])
# Calculate the weighted average
bp = ((weight*ap)+(totals*basicprob))/(weight+totals)
return bp
class NaiveBayes(Classifier):
def __init__(self, get_features):
Classifier.__init__(self, get_features)
self.thresholds = {}
def doc_prob(self, doc, cat):
"""Returns the probability of the item belonging to category \
- Pr(Document | Category).
"""
features = self.get_features(doc)
# Multiply the probabilities of all the features together
p = Decimal(1)
for f in features:
p *= Decimal(str(self.weighted_prob(f, cat, self.feature_prob)))
return p
def prob(self, doc, cat):
"""Returns the probability that an document belongs to a category \
- Pr(Category | Document).
"""
catprob = self.category_count(cat) / self.total_count() # Pr(Category)
docprob = self.doc_prob(doc, cat) # Pr(Document | Category)
return docprob*Decimal(str(catprob)) # Pr(Category | Document)
def set_threshold(self, cat, t):
"""Sets the minimum probability that an item must have to be \
considered to belong to a particular category."""
self.con.execute("update ct set threshold=%f where category='%s'"
% (t, cat))
def get_threshold(self, cat):
"""Returns the threshold value of a category."""
t = self.con.execute('select threshold from ct where category="%s"'
%(cat)).fetchone()
if t is None:
return 1.0
return self.thresholds[cat]
def classify(self, doc, default=None):
"""Classify a document as belonging to a certain category.
Returns the category the document belongs to."""
probs = {}
# Find the category with the highest probability
max = Decimal(0)
for cat in self.categories():
probs[cat] = self.prob(doc, cat)
if probs[cat] > max:
max = probs[cat]
best = cat
if max == 0.0:
return default
# Make sure the probability exceeds threshold*next best
for cat in probs:
if cat == best:
continue
if probs[cat]*Decimal(str(self.get_threshold(best)))>probs[best]:
return default
print probs[best]
return best
def print_help():
print "python ", sys.argv[0], "train [database ][dataset_dir] [category] [default_treshold]"
print "python ", sys.argv[0], "classify [database] [file]"
print "python ", sys.argv[0], "threshold [database] [category] [treshold]"
def main():
if len(sys.argv) < 2:
print_help()
sys.exit(0)
filter = NaiveBayes(get_words)
filter.setup_db(sys.argv[2])
if sys.argv[1] == "train":
filter.train_from_dir(sys.argv[3], sys.argv[4])
filter.set_threshold(sys.argv[4], DEFAULT_THRESHOLD)
elif sys.argv[1] == "classify":
f = open(sys.argv[3])
text = f.read()
print filter.classify(text, default='unknown')
elif sys.argv[1] == "threshold":
t = float(sys.argv[4])
filter.set_threshold(sys.argv[3], t)
else:
print_help()
if __name__ == "__main__":
main() | {
"repo_name": "lrei/magical_code",
"path": "spamfilter.py",
"copies": "1",
"size": "8430",
"license": "mit",
"hash": -8115799704611686000,
"line_mean": 34.1291666667,
"line_max": 98,
"alpha_frac": 0.5723606168,
"autogenerated": false,
"ratio": 4.04510556621881,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9995055077696392,
"avg_score": 0.024482221064483677,
"num_lines": 240
} |
# adapted from pimoroni evdev support for the 7 inch capacitive screen
# added support for the resistive 3.5 and maybe others that doesn't depend upon SDL 1.2
import errno
import glob
import io
import os
import queue
import struct
import time
from collections import namedtuple
import logsupport
import config
import select
import debug
TOUCH_X = 0
TOUCH_Y = 1
TouchEvent = namedtuple('TouchEvent', ('timestamp', 'type', 'code', 'value'))
EV_SYN = 0
EV_ABS = 3
ABS_X = 0
ABS_Y = 1
EV_KEY = 1
BTN_TOUCH = 330
ABS_MT_SLOT = 0x2f # 47 MT slot being modified
ABS_MT_POSITION_X = 0x35 # 53 Center X of multi touch position
ABS_MT_POSITION_Y = 0x36 # 54 Center Y of multi touch position
ABS_MT_TRACKING_ID = 0x39 # 57 Unique ID of initiated contact
TS_PRESS = 1
TS_RELEASE = 0
TS_MOVE = 2
class Touch(object):
def __init__(self, slot, x, y):
self.slot = slot
self._x = x
self._y = y
self.last_x = -1
self.last_y = -1
self._id = -1
self.events = []
self.on_move = None
self.on_press = None
self.on_release = None
self.on_idle = None
@property
def position(self):
return self.x, self.y
@property
def last_position(self):
return self.last_x, self.last_y
@property
def valid(self):
return self.id > -1
@property
def id(self):
return self._id
@id.setter
def id(self, value):
if value != self._id:
if value == -1 and not TS_RELEASE in self.events:
self.events.append(TS_RELEASE)
elif not TS_PRESS in self.events:
self.events.append(TS_PRESS)
self._id = value
@property
def x(self):
return self._x
@x.setter
def x(self, value):
if value != self._x and not TS_MOVE in self.events:
self.events.append(TS_MOVE)
self.last_x = self._x
self._x = value
@property
def y(self):
return self._y
@y.setter
def y(self, value):
if value != self._y and not TS_MOVE in self.events:
self.events.append(TS_MOVE)
self.last_y = self._y
self._y = value
def handle_events(self):
"""Run outstanding press/release/move events"""
for event in self.events:
if event == TS_MOVE and callable(self.on_move):
self.on_move(event, self)
if event == TS_PRESS and callable(self.on_press):
self.on_press(event, self)
if event == TS_RELEASE and callable(self.on_release):
self.on_release(event, self)
self.events = []
class Touches(list):
@property
def valid(self):
return [tch for tch in self if tch.valid]
class Touchscreen(object):
EVENT_FORMAT = str('llHHi')
EVENT_SIZE = struct.calcsize(EVENT_FORMAT)
def DumpTouchParams(self):
return (self._capscreen, self._shiftx, self._shifty, self._flipx, self._flipy, self._scalex, self._scaley,
self._swapaxes)
def __init__(self, configdir, touchmod):
self.touchdefs = {}
self.touchmod = touchmod
# self.touchbuf = []
with open('touchdefinitions') as f:
defs = f.read().splitlines()
for l in defs:
touchitem = l.split('|')
self.touchdefs[touchitem[0]] = touchitem[1:]
# noinspection PyBroadException
try:
with open(configdir + '/touchdefinitions') as f:
defs = f.read().splitlines()
for l in defs:
touchitem = l.split('|')
self.touchdefs[touchitem[0]] = touchitem[1:]
except:
pass
self._use_multitouch = True
self.controller = "unknown"
self._shiftx = 0
self._shifty = 0
self._flipx = 0 # 0 for ok else size of x from which to subtract touch value
self._flipy = 0 # 0 for ok else size of y from which to subtract touch value
self._scalex = 1.0
self._scaley = 1.0
self._capscreen = True
self.a = None
self._running = False
self._thread = None
self._f_poll = select.poll()
self._f_device = io.open(self._touch_device(), 'rb', self.EVENT_SIZE)
self._f_poll.register(self._f_device, select.POLLIN)
self.position = Touch(0, 0, 0)
self.touches = Touches([Touch(x, 0, 0) for x in range(10)])
self._event_queue = queue.Queue()
self._touch_slot = 0
self.findidle = None
self.lasttouch = time.time() # time of last touch
self.sentidle = False
def _run(self):
self._running = True
while self._running:
self.poll()
# time.sleep(0.01)
def run(self):
self._run()
def stop(self):
if self._thread is None:
return
self._running = False
self._thread.join()
self._thread = None
@property
def _current_touch(self):
return self.touches[self._touch_slot]
def close(self):
self._f_device.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def __iter__(self):
pass
def _lazy_read(self):
while self._wait_for_events():
event = self._f_device.read(self.EVENT_SIZE)
if not event:
break
yield event
def _get_pending_events(self):
for event in self._lazy_read():
(tv_sec, tv_usec, ttype, code, value) = struct.unpack(self.EVENT_FORMAT, event)
self._event_queue.put(TouchEvent(tv_sec + (tv_usec / 1000000), ttype, code, value))
#self.touchbuf.append((time.time(),tv_sec, tv_usec, ttype, code, value))
def _wait_for_events(self, timeout=2):
return self._f_poll.poll(timeout)
def poll(self):
self._get_pending_events()
while not self._event_queue.empty():
event = self._event_queue.get()
debug.debugPrint('LLTouch', 'Touch: ' + str(event))
self._event_queue.task_done()
if event.type == EV_SYN: # Sync
for tch in self.touches:
self.findidle = tch
tch.handle_events()
self.lasttouch = time.time()
self.sentidle = False
# print('Syn')
# for e in self.touchbuf:
# print(e)
# print('----')
# self.touchbuf=[]
return self.touches
if event.type == EV_KEY and not self._capscreen:
if event.code == BTN_TOUCH:
self._touch_slot = 0
# self._current_touch.id = 1
if self.a is None:
self._current_touch.x = self.position.x
self._current_touch.y = self.position.y
else:
self._current_touch.x = (self.a[2] + self.a[0] * self.position.x + self.a[
1] * self.position.y) / self.a[6]
self._current_touch.y = (self.a[5] + self.a[3] * self.position.x + self.a[
4] * self.position.y) / self.a[6]
if self._flipx != 0:
self._current_touch.x = self._flipx - self._current_touch.x
if self._flipy != 0:
self._current_touch.y = self._flipy - self._current_touch.y
if event.value == 1:
self._current_touch.events.append(TS_PRESS)
self.sentidle = False
#print('Press')
else:
self._current_touch.events.append(TS_RELEASE)
self.sentidle = False
#print('Rel')
if event.type == EV_ABS: # Absolute cursor position
if event.code == ABS_MT_SLOT:
self._touch_slot = event.value
if event.code == ABS_MT_TRACKING_ID:
self._current_touch.id = event.value
self.sentidle = False
#print('ID')
if event.code == ABS_MT_POSITION_X:
tmp = event.value + self._shiftx
if self._flipx != 0:
tmp = self._flipx - event.value
if tmp < 0:
logsupport.Logs.Log('Negative touch position(x): {}'.format(tmp),
severity=logsupport.ConsoleWarning)
tmp = 0
self._current_touch.x = round(tmp * self._scalex)
if event.code == ABS_MT_POSITION_Y:
tmp = event.value + self._shifty
if self._flipy != 0:
tmp = self._flipy - event.value
if tmp < 0:
logsupport.Logs.Log('Negative touch position(y): {}'.format(tmp),
severity=logsupport.ConsoleWarning)
tmp = 0
self._current_touch.y = round(tmp * self._scaley)
if event.code == ABS_X:
self.position.x = event.value
if event.code == ABS_Y:
self.position.y = event.value
if time.time() - self.lasttouch > config.sysStore.MultiTapTime / 1000 and (
not self.sentidle or config.resendidle):
# print(time.time()-self.lasttouch, self.sentidle, config.resendidle)
self.sentidle = True
if self.findidle is not None:
self.findidle.on_idle()
return []
def _touch_device(self):
global ABS_MT_POSITION_Y, ABS_MT_POSITION_X
dev = 'unknown'
# return '/dev/input/touchscreen'
for evdev in glob.glob("/sys/class/input/event*"):
try:
with io.open(os.path.join(evdev, 'device', 'name'), 'r') as f:
dev = f.read().strip()
if self.touchmod != '':
dev = dev + '.' + self.touchmod
if dev in self.touchdefs:
self.controller = dev
vals = self.touchdefs[dev]
self._shiftx = int(vals[1])
self._shifty = int(vals[2])
self._flipx = int(vals[3])
self._flipy = int(vals[4])
self._scalex = float(vals[5])
self._scaley = float(vals[6])
if len(vals) > 7:
self._swapaxes = vals[7] in ('True', '1', 'true', 'TRUE')
else:
self._swapaxes = False
if self._swapaxes:
tmp = ABS_MT_POSITION_X
ABS_MT_POSITION_X = ABS_MT_POSITION_Y
ABS_MT_POSITION_Y = tmp
self._capscreen = vals[0] in ('True', '1', 'true', 'TRUE')
if not self._capscreen:
config.noisytouch = True
with open('/etc/pointercal', 'r') as pc: # skip empty lines that Adafruit install may leave
l = '\n'
while l == '\n':
l = next(pc)
self.a = list(int(x) for x in l.split())
return os.path.join('/dev', 'input', os.path.basename(evdev))
except IOError as e:
if e.errno != errno.ENOENT:
raise
raise RuntimeError('Unable to locate touchscreen device ({})'.format(dev))
def read(self):
return next(iter(self))
'''
if __name__ == "__main__":
import signal
pygame.init()
pygame.fastevent.init()
a = [5724, -6, -1330074, 26, 8427, -1034528, 65536]
b = [34, 952, 38, 943]
ts = Touchscreen()
def handle_event(event, tch):
#xx = (a[2] + a[0] * touch.x + a[1] * touch.y) / a[6]
#yy = (a[5] + a[3] * touch.x + a[4] * touch.y) / a[6]
#Xx = (touch.x - b[0]) * 320 / (b[1] - b[0])
#Xy = (touch.y - b[2]) * 480 / (b[3] - b[2])
print(["Release", "Press", "Move"][event],
tch.slot,
tch.x,
tch.y)
return
# noinspection PyUnreachableCode
if event == 1:
e = pygame.event.Event(pygame.MOUSEBUTTONDOWN, {'pos': (tch.x, tch.y)})
pygame.fastevent.post(e)
elif event == 0:
e = pygame.event.Event(pygame.MOUSEBUTTONUP, {'pos': (tch.x, tch.y)})
pygame.fastevent.post(e)
for touch in ts.touches:
touch.on_press = handle_event
touch.on_release = handle_event
touch.on_move = handle_event
# ts.run()
try:
signal.pause()
except KeyboardInterrupt:
print("Stopping thread...")
ts.stop()
exit()
'''
| {
"repo_name": "kevinkahn/softconsole",
"path": "touchhandler.py",
"copies": "1",
"size": "10413",
"license": "apache-2.0",
"hash": 4489486834519162400,
"line_mean": 24.7111111111,
"line_max": 108,
"alpha_frac": 0.6265245366,
"autogenerated": false,
"ratio": 2.7445967316816025,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8701799549645532,
"avg_score": 0.03386434372721379,
"num_lines": 405
} |
# adapted from pydicom source code
from __version__ import __version__
__version_info__ = __version__.split('.')
# some imports
from applicationentity import AE
from SOPclass import \
VerificationSOPClass,\
StorageSOPClass,\
MRImageStorageSOPClass,\
CTImageStorageSOPClass,\
PositronEmissionTomographyImageStorageSOPClass,\
CRImageStorageSOPClass,\
SCImageStorageSOPClass,\
RTImageStorageSOPClass,\
RTDoseStorageSOPClass,\
RTStructureSetStorageSOPClass,\
RTPlanStorageSOPClass,\
EnhancedSRSOPClass,\
XRayRadiationDoseStructuredReportSOPClass,\
DigitalXRayImageStorageForPresentationSOPClass,\
DigitalXRayImageStorageForProcessingSOPClass,\
DigitalMammographyXRayImageStorageForPresentationSOPClass,\
DigitalMammographyXRayImageStorageForProcessingSOPClass,\
DigitalIntraOralXRayImageStorageForPresentationSOPClass,\
DigitalIntraOralXRayImageStorageForProcessingSOPClass,\
XRayAngiographicImageStorageSOPClass,\
EnhancedXAImageStorageSOPClass,\
XRayRadiofluoroscopicImageStorageSOPClass,\
EnhancedXRFImageStorageSOPClass,\
EnhancedCTImageStorageSOPClass,\
NMImageStorageSOPClass,\
PatientRootFindSOPClass,\
PatientRootMoveSOPClass,\
PatientRootGetSOPClass,\
StudyRootFindSOPClass,\
StudyRootMoveSOPClass,\
StudyRootGetSOPClass,\
PatientStudyOnlyFindSOPClass,\
PatientStudyOnlyMoveSOPClass,\
PatientStudyOnlyGetSOPClass,\
ModalityWorklistInformationFindSOPClass
# UID prefix provided by https://www.medicalconnections.co.uk/Free_UID
pynetdicom_uid_prefix = '1.2.826.0.1.3680043.9.3811.'
# Set up logging system for the whole package. In each module, set
# logger=logging.getLogger('pynetdicom') and the same instance will be
# used by all At command line, turn on debugging for all pynetdicom
# functions with: import netdicom netdicom.debug(). Turn off debugging
# with netdicom.debug(False)
import logging
# pynetdicom defines a logger with a NullHandler only.
# Client code have the responsability to configure
# this logger.
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
# helper functions to configure the logger. This should be
# called by the client code.
def logger_setup():
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
logger.setLevel(logging.WARNING)
formatter = logging.Formatter("%(name)s %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
# logging.getLogger('netdicom.FSM').setLevel(logging.CRITICAL)
logging.getLogger('netdicom.DULprovider').setLevel(logging.CRITICAL)
logging.getLogger('netdicom.timer').setLevel(logging.CRITICAL)
def debug(debug_on=True):
"""Turn debugging of DICOM network operations on or off."""
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
| {
"repo_name": "patmun/pynetdicom",
"path": "netdicom/__init__.py",
"copies": "2",
"size": "2877",
"license": "mit",
"hash": -3806404514835162000,
"line_mean": 35.417721519,
"line_max": 72,
"alpha_frac": 0.7765033021,
"autogenerated": false,
"ratio": 3.6279949558638083,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5404498257963808,
"avg_score": null,
"num_lines": null
} |
from __future__ import print_function
import os
import sys
import pkg_resources
import platform
from setuptools import setup, find_packages, Command
from setuptools.command.install_egg_info import install_egg_info as _install_egg_info
from setuptools.dist import Distribution
class EntryPoints(Command):
"""Get entrypoints for a distribution."""
description = 'get entrypoints for a distribution'
user_options = [
('dist=', None, 'get entrypoints for specified distribution'),
]
def initialize_options(self):
"""Initialize options."""
self.dist = self.distribution.get_name()
def finalize_options(self):
"""Abstract method that is required to be overwritten."""
def run(self):
"""Run."""
req_entry_points = pkg_resources.get_entry_map(self.dist)
if req_entry_points and 'console_scripts' in req_entry_points:
for entry in list(req_entry_points['console_scripts'].values()):
print(entry, file=sys.stdout)
class install_egg_info(_install_egg_info): # noqa
"""Override the setuptools namespace package templates.
Customizes the "nspkg.pth" files so that they're compatible with
"--editable" packages.
See this pip issue for details:
https://github.com/pypa/pip/issues/3
Modifications to the original implementation are marked with CHANGED
"""
_nspkg_tmpl = (
# CHANGED: Add the import of pkgutil needed on the last line.
"import sys, types, os, pkgutil",
"p = os.path.join(sys._getframe(1).f_locals['sitedir'], *%(pth)r)",
"ie = os.path.exists(os.path.join(p, '__init__.py'))",
"m = not ie and "
"sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))",
"mp = (m or []) and m.__dict__.setdefault('__path__', [])",
"(p not in mp) and mp.append(p)",
# CHANGED: Fix the resulting __path__ on the namespace packages to
# properly traverse "--editable" packages too.
"mp[:] = m and pkgutil.extend_path(mp, %(pkg)r) or mp",
)
"lines for the namespace installer"
_nspkg_tmpl_multi = (
# CHANGED: Use "__import__" to ensure the parent package has been
# loaded before attempting to read it from sys.modules.
# This avoids a possible issue with nested namespace packages where the
# parent could be skipped due to an existing __init__.py file.
'm and __import__(%(parent)r) and setattr(sys.modules[%(parent)r], %(child)r, m)',
)
"additional line(s) when a parent package is indicated"
class GradleDistribution(Distribution, object):
"""GradleDistribution wiht requirements."""
PINNED_TXT = 'pinned.txt'
excluded_platform_packages = {}
def __init__(self, attrs):
"""Initialize options."""
attrs['name'] = os.getenv('PYGRADLE_PROJECT_NAME')
attrs['version'] = os.getenv('PYGRADLE_PROJECT_VERSION')
attrs['install_requires'] = list(self.load_pinned_deps())
super(GradleDistribution, self).__init__(attrs)
def get_command_class(self, command):
"""Return a customized command class or the base one."""
if command == 'install_egg_info':
return install_egg_info
elif command == 'entrypoints':
return EntryPoints
return super(GradleDistribution, self).get_command_class(command)
@property
def excluded_packages(self):
"""Excluded packages."""
platform_name = platform.system().lower()
if platform_name in self.excluded_platform_packages:
return set(pkg.lower() for pkg in
self.excluded_platform_packages[platform_name])
return set()
def load_pinned_deps(self):
"""Load a pinned.txt file.
The pinned.txt file contains a list of dependencies that this Python
project depends on. Although the PyGradle build system will ignore this
file and never install dependencies declared via this method, it is
important to declare the dependencies using this method to maintain
backwards compatibility with non-PyGradle build systems.
"""
# calculate this only once
blacklisted = self.excluded_packages
try:
reqs = []
with open(self.PINNED_TXT) as fh:
reqs = fh.readlines()
# Don't include the version information so that we don't mistakenly
# introduce a version conflict issue.
for req in reqs:
if req:
name, version = req.split('==')
if name and name.lower() not in blacklisted:
yield name
except IOError:
raise StopIteration
setup(
distclass=GradleDistribution,
package_dir={'': 'src'},
packages=find_packages('src'),
package_data={
# If any package contains *.json, include them:
'': ['*.json']
},
include_package_data=True,
name='falcon-pygradle',
# version='1.0.0', # This is not read instead Gradle build version is used
description='Falcon PyGradle example',
# entry_points='''
# [console_scripts]
# webapi=webapp.webapi:main
# ''',
author='ATTX Project',
author_email='stefan.negru@helsinki.fi',
url='https://www.helsinki.fi/en/projects/attx-2016',
long_description="PyGradle example for creating a Falcon REST API.",
license='Apache Software License',
platforms='Linux',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Information Analysis'
],
)
| {
"repo_name": "blankdots/pygradle-falcon-api",
"path": "setup.py",
"copies": "1",
"size": "6725",
"license": "apache-2.0",
"hash": 4947655725818165000,
"line_mean": 35.3513513514,
"line_max": 90,
"alpha_frac": 0.6316728625,
"autogenerated": false,
"ratio": 4.197877652933832,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00012225361062570366,
"num_lines": 185
} |
# Adapted from py-l1tf here https://github.com/elsonidoq/py-l1tf/
from cvxopt import solvers, matrix
import l1
import numpy as np
solvers.options['show_progress'] = 0
from matrix_utils import *
def l1tf(y, alpha, period=0, eta=1.0, with_l1p=False, beta=0.0):
# scaling things to standardized size
y_min = float(y.min())
y_max = float(y.max())
denom = y_max - y_min
# if denom == 0, y is constant
y_scaled = (y - y_min) / (1 if denom == 0 else denom)
assert isinstance(y, np.ndarray)
if with_l1p:
solution = l1tf_cvxopt_l1p(matrix(y_scaled), alpha, period=period, eta=eta)
else:
solution = l1tf_cvxopt(matrix(y_scaled), alpha, period=period, eta=eta, beta=beta)
#convert back to unscaled, numpy arrays
for k, v in solution.iteritems():
#don't add the baseline to seasonal parts which should have zero mean
add_base_line = k not in ['p', 's']
solution[k] = np.asarray(v * (y_max - y_min) + y_min*add_base_line).squeeze()
return solution
def l1tf_cvxopt(y, alpha, period=0, eta=1.0, beta=0.0):
n = y.size[0]
m = n - 2
D = get_second_derivative_matrix(n)
if beta > 0:
#put a penalty on the l1 norm of the first dervative as well
F = get_first_derivative_matrix(n)
D_F = zero_spmatrix(2*m, n)
D_F[:m, :n] = D
D_F[m:, :n] = F * beta
D = D_F
m *= 2
P = D * D.T
if period > 0:
B = get_B_matrix(n, period)
T = get_T_matrix(period)
Q = B*T
DQ = D * Q
TT = T.T * T
TTI = invert(TT)
P_seasonal = (1.0/eta) * DQ * TTI * DQ.T
P += P_seasonal
q = -D * y
G = zero_spmatrix(2*m, m)
G[:m, :m] = identity_spmatrix(m)
G[m:, :m] = - identity_spmatrix(m)
h = matrix(alpha, (2 * m, 1), tc='d')
res = solvers.qp(P, q, G, h)
nu = res['x']
DT_nu = D.T * nu
output={}
output['y'] = y
output['x_with_seasonal'] = y - DT_nu
output['x'] = y - DT_nu
if period > 0:
output['p'] = (1.0/eta) * TTI * Q.T * DT_nu
output['s'] = Q * output['p']
output['x'] -= output['s']
print 'sum seasonal: %s' % sum(output['s'][:period])
return output
def l1tf_cvxopt_l1p(y, alpha, period=0, eta=1.0):
n = y.size[0]
m = n - 2
D = get_second_derivative_matrix(n)
P = D * D.T
q = -D * y
n_contraints = m
if period > 1:
n_contraints += (period-1)
G = zero_spmatrix(2 * n_contraints, m)
G[:m, :m] = identity_spmatrix(m)
G[m:2*m, :m] = - identity_spmatrix(m)
h = matrix(alpha, (2 * n_contraints, 1), tc='d')
if period > 1:
B = get_B_matrix(n, period)
T = get_T_matrix(period)
Q = B*T
DQ = D * Q
G[2*m:2*m+period-1, :m] = DQ.T
G[2*m+period-1:, :m] = -DQ.T
h[2*m:] = eta
res = solvers.qp(P, q, G, h)
nu = res['x']
DT_nu = D.T * nu
output = {}
output['y'] = y
output['x_with_seasonal'] = y - DT_nu
if period > 1:
#separate seasonal from non-seasonal by solving an
#least norm problem
ratio= eta/alpha
Pmat = zero_spmatrix(m+period, period-1)
Pmat[:m, :period-1] = DQ
Pmat[m:(m+period), :period-1] = -ratio * T
qvec = matrix(0.0, (m+period, 1), tc='d')
qvec[:m] = D*(y-DT_nu)
p_solution = l1.l1(matrix(Pmat), qvec)
QP_solution = Q*p_solution
output['p'] = p_solution
output['s'] = QP_solution
output['x'] = output['x_with_seasonal'] - output['s']
print 'sum seasonal is: %s' % sum(output['s'][:period])
return output
| {
"repo_name": "dave31415/myl1tf",
"path": "myl1tf/myl1tf.py",
"copies": "1",
"size": "3664",
"license": "apache-2.0",
"hash": 9146451298056739000,
"line_mean": 25.7445255474,
"line_max": 90,
"alpha_frac": 0.5264737991,
"autogenerated": false,
"ratio": 2.7404637247569186,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37669375238569186,
"avg_score": null,
"num_lines": null
} |
#Adapted from pySerial's Query COM Ports (http://pyserial.sourceforge.org) and Pavel Radzivilovsky (http://stackoverflow.com/questions/2937585/how-to-open-a-serial-port-by-friendly-name/2937588#2937588)
import serial
#setup environment using ctypes
import ctypes
from serial.win32 import ULONG_PTR, is_64bit
from ctypes.wintypes import BOOL
from ctypes.wintypes import HWND
from ctypes.wintypes import DWORD
from ctypes.wintypes import WORD
from ctypes.wintypes import LONG
from ctypes.wintypes import ULONG
from ctypes.wintypes import LPCSTR
from ctypes.wintypes import HKEY
from ctypes.wintypes import BYTE
NULL = 0
HDEVINFO = ctypes.c_void_p
PCTSTR = ctypes.c_char_p
CHAR = ctypes.c_char
LPDWORD = PDWORD = ctypes.POINTER(DWORD)
#~ LPBYTE = PBYTE = ctypes.POINTER(BYTE)
LPBYTE = PBYTE = ctypes.c_void_p # XXX avoids error about types
PHKEY = ctypes.POINTER(HKEY)
ACCESS_MASK = DWORD
REGSAM = ACCESS_MASK
def ValidHandle(value, func, arguments):
if value == 0:
raise ctypes.WinError()
return value
def byte_buffer(length):
"""Get a buffer for a string"""
return (BYTE*length)()
def string(buff):
s = []
for c in buff:
if c == 0: break
s.append(chr(c & 0xff)) # "& 0xff": hack to convert signed to unsigned
return ''.join(s)
class GUID(ctypes.Structure):
_fields_ = [
('Data1', DWORD),
('Data2', WORD),
('Data3', WORD),
('Data4', BYTE*8),
]
def __str__(self):
return "{%08x-%04x-%04x-%s-%s}" % (
self.Data1,
self.Data2,
self.Data3,
''.join(["%02x" % d for d in self.Data4[:2]]),
''.join(["%02x" % d for d in self.Data4[2:]]),
)
class SP_DEVINFO_DATA(ctypes.Structure):
_fields_ = [
('cbSize', DWORD),
('ClassGuid', GUID),
('DevInst', DWORD),
('Reserved', ULONG_PTR),
]
def __str__(self):
return "ClassGuid:%s DevInst:%s" % (self.ClassGuid, self.DevInst)
PSP_DEVINFO_DATA = ctypes.POINTER(SP_DEVINFO_DATA)
class SP_DEVICE_INTERFACE_DATA(ctypes.Structure):
_fields_ = [
('cbSize', DWORD),
('InterfaceClassGuid', GUID),
('Flags', DWORD),
('Reserved', ULONG_PTR),
]
def __str__(self):
return "InterfaceClassGuid:%s Flags:%s" % (self.InterfaceClassGuid, self.Flags)
PSP_DEVICE_INTERFACE_DATA = ctypes.POINTER(SP_DEVICE_INTERFACE_DATA)
PSP_DEVICE_INTERFACE_DETAIL_DATA = ctypes.c_void_p
setupapi = ctypes.windll.LoadLibrary("setupapi")
SetupDiClassGuidsFromName = setupapi.SetupDiClassGuidsFromNameA
SetupDiClassGuidsFromName.argtypes = [PCTSTR, ctypes.POINTER(GUID), DWORD, PDWORD]
SetupDiClassGuidsFromName.restype = BOOL
SetupDiDestroyDeviceInfoList = setupapi.SetupDiDestroyDeviceInfoList
SetupDiDestroyDeviceInfoList.argtypes = [HDEVINFO]
SetupDiDestroyDeviceInfoList.restype = BOOL
SetupDiGetClassDevs = setupapi.SetupDiGetClassDevsA
SetupDiGetClassDevs.argtypes = [ctypes.POINTER(GUID), PCTSTR, HWND, DWORD]
SetupDiGetClassDevs.restype = HDEVINFO
SetupDiGetClassDevs.errcheck = ValidHandle
SetupDiEnumDeviceInterfaces = setupapi.SetupDiEnumDeviceInterfaces
SetupDiEnumDeviceInterfaces.argtypes = [HDEVINFO, PSP_DEVINFO_DATA, ctypes.POINTER(GUID), DWORD, PSP_DEVICE_INTERFACE_DATA]
SetupDiEnumDeviceInterfaces.restype = BOOL
SetupDiGetDeviceInterfaceDetail = setupapi.SetupDiGetDeviceInterfaceDetailA
SetupDiGetDeviceInterfaceDetail.argtypes = [HDEVINFO, PSP_DEVICE_INTERFACE_DATA, PSP_DEVICE_INTERFACE_DETAIL_DATA, DWORD, PDWORD, PSP_DEVINFO_DATA]
SetupDiGetDeviceInterfaceDetail.restype = BOOL
SetupDiGetDeviceRegistryProperty = setupapi.SetupDiGetDeviceRegistryPropertyA
SetupDiGetDeviceRegistryProperty.argtypes = [HDEVINFO, PSP_DEVINFO_DATA, DWORD, PDWORD, PBYTE, DWORD, PDWORD]
SetupDiGetDeviceRegistryProperty.restype = BOOL
SetupDiOpenDevRegKey = setupapi.SetupDiOpenDevRegKey
SetupDiOpenDevRegKey.argtypes = [HDEVINFO, PSP_DEVINFO_DATA, DWORD, DWORD, DWORD, REGSAM]
SetupDiOpenDevRegKey.restype = HKEY
advapi32 = ctypes.windll.LoadLibrary("Advapi32")
RegCloseKey = advapi32.RegCloseKey
RegCloseKey.argtypes = [HKEY]
RegCloseKey.restype = LONG
RegQueryValueEx = advapi32.RegQueryValueExA
RegQueryValueEx.argtypes = [HKEY, LPCSTR, LPDWORD, LPDWORD, LPBYTE, LPDWORD]
RegQueryValueEx.restype = LONG
GUID_CLASS_COMPORT = GUID(0x86e0d1e0L, 0x8089, 0x11d0,
(BYTE*8)(0x9c, 0xe4, 0x08, 0x00, 0x3e, 0x30, 0x1f, 0x73))
DIGCF_PRESENT = 2
DIGCF_DEVICEINTERFACE = 16
INVALID_HANDLE_VALUE = 0
ERROR_INSUFFICIENT_BUFFER = 122
SPDRP_HARDWAREID = 1
SPDRP_FRIENDLYNAME = 12
ERROR_NO_MORE_ITEMS = 259
DICS_FLAG_GLOBAL = 1
DIREG_DEV = 0x00000001
KEY_READ = 0x20019
REG_SZ = 1
# workaround for compatibility between Python 2.x and 3.x
PortName = serial.to_bytes([80, 111, 114, 116, 78, 97, 109, 101]) # "PortName"
def comPorts():
"""This generator scans the device registry for com ports and yields port, desc - using registry scan instead of deviceapi"""
g_hdi = SetupDiGetClassDevs(ctypes.byref(GUID_CLASS_COMPORT), None, NULL, DIGCF_PRESENT|DIGCF_DEVICEINTERFACE);
#~ for i in range(256):
for dwIndex in range(256):
did = SP_DEVICE_INTERFACE_DATA()
did.cbSize = ctypes.sizeof(did)
if not SetupDiEnumDeviceInterfaces(g_hdi, None, ctypes.byref(GUID_CLASS_COMPORT), dwIndex, ctypes.byref(did)):
if ctypes.GetLastError() != ERROR_NO_MORE_ITEMS:
raise ctypes.WinError()
break
dwNeeded = DWORD()
# get the size
if not SetupDiGetDeviceInterfaceDetail(g_hdi, ctypes.byref(did), None, 0, ctypes.byref(dwNeeded), None):
# Ignore ERROR_INSUFFICIENT_BUFFER
if ctypes.GetLastError() != ERROR_INSUFFICIENT_BUFFER:
raise ctypes.WinError()
# allocate buffer
class SP_DEVICE_INTERFACE_DETAIL_DATA_A(ctypes.Structure):
_fields_ = [
('cbSize', DWORD),
('DevicePath', CHAR*(dwNeeded.value - ctypes.sizeof(DWORD))),
]
def __str__(self):
return "DevicePath:%s" % (self.DevicePath,)
idd = SP_DEVICE_INTERFACE_DETAIL_DATA_A()
if is_64bit():
idd.cbSize = 8
else:
idd.cbSize = 5
devinfo = SP_DEVINFO_DATA()
devinfo.cbSize = ctypes.sizeof(devinfo)
if not SetupDiGetDeviceInterfaceDetail(g_hdi, ctypes.byref(did), ctypes.byref(idd), dwNeeded, None, ctypes.byref(devinfo)):
raise ctypes.WinError()
# friendly name
szFriendlyName = byte_buffer(250)
if not SetupDiGetDeviceRegistryProperty(g_hdi, ctypes.byref(devinfo), SPDRP_FRIENDLYNAME, None, ctypes.byref(szFriendlyName), ctypes.sizeof(szFriendlyName) - 1, None):
# Ignore ERROR_INSUFFICIENT_BUFFER
if ctypes.GetLastError() != ERROR_INSUFFICIENT_BUFFER:
#~ raise IOError("failed to get details for %s (%s)" % (devinfo, szHardwareID.value))
szFriendlyName = None
else:
# the real com port name has to read differently...
hkey = SetupDiOpenDevRegKey(g_hdi, ctypes.byref(devinfo), DICS_FLAG_GLOBAL, 0, DIREG_DEV, KEY_READ)
port_name_buffer = byte_buffer(250)
port_name_length = ULONG(ctypes.sizeof(port_name_buffer))
RegQueryValueEx(hkey, PortName, None, None, ctypes.byref(port_name_buffer), ctypes.byref(port_name_length))
RegCloseKey(hkey)
yield string(port_name_buffer), string(szFriendlyName)
SetupDiDestroyDeviceInfoList(g_hdi)
def ComPortNameFromFriendlyNamePrefix(namePrefix):
"""Queries COM Devices for a specific COM port by it's Friendly Name Prefix"""
for port, friendlyName in sorted(comPorts()):
if not friendlyName.startswith(namePrefix):
continue
return port | {
"repo_name": "6ba1cbef/badgeup",
"path": "src/CPy27/badgeup/badgeup/COMBadge/SetupDeviceWrapper.py",
"copies": "1",
"size": "7875",
"license": "mit",
"hash": -7553666514904651000,
"line_mean": 36.6842105263,
"line_max": 202,
"alpha_frac": 0.6858412698,
"autogenerated": false,
"ratio": 3.3783783783783785,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9516349419420713,
"avg_score": 0.009574045751532827,
"num_lines": 209
} |
#adapted from
#https://www.researchgate.net/publication/228966598_Optimal_Single_Biarc_Fitting_and_its_Applications
#calculates the biarc going through p0 and p1 with tangents t0 and t1
#respective. returns the homogeneous control points of a bezier curve
#defining the circular arcs.
def biarc_h(p0, t0, p1, t1, r):
V = p1 - p0
rt0 = la.refl(t0,V)
x = la.dot(rt0,t1)
print(x)
# #if x < -0.9:
# #r = 1/abs((1+x))
U = r*t0 + t1
nc = la.norm2(V)
b = 2*la.dot(V,U)
a = 2*r*(1 - la.dot(t0,t1))
eps = 1/2**16
if a < eps:
if abs(b) < eps: # U=V, beta infinite, second circle is a semicircle
print("abs(b=",b,") < eps")
J = (p0+p1)/2
r1 = m.sqrt(nc)/4
h = [la.hom(r1*t0,0),la.hom(J,1),la.hom(-r1*t1,0)]
sys.stdout.flush()
return h
beta = nc / b
print("abs(a=",a,") < eps, beta=",beta)
else:
D = b*b + 4*a*nc
beta = (-b + m.sqrt(D)) / (2*a)
#beta = 2*nc/(b + m.sqrt(D))
alpha = beta * r
if abs(b) < eps and x < 0: # U=V, beta infinite, second circle is a semicircle
print("panic!")
# parametrize circle of joints
pt0 = la.refl(t0,V)
pt1 = la.refl(t1,V)
tt0 = t0+pt0
tt1 = t1+pt1
nVcosa2 = la.dot(tt0,V)
d = nc/2*Nvcosa2
JP = p0 + d*t0
cosa2 = nVcosa2 / m.sqrt(nc)
J = la.proj(bezier2(0.5,[la.hom(p0,1),la.hom(JP,cosa2),la.hom(p1,1)]))
Jt = pt0
ch1 = J-p0
ch2 = p1-J
alpha = la.norm2(ch1)/2/cos(la.dot(t0,ch1))
beta = la.norm2(ch2)/2/cos(la.dot(t1,ch2))
else:
alpha = r * beta
ab = alpha + beta
c1 = p0 + alpha * t0
c3 = p1 - beta * t1
print(alpha,beta,a,b,nc)
c2 = (beta / ab) * c1 + (alpha / ab) * c3
w1 = la.dot(t0, la.unit(c2 - p0))
w2 = la.dot(t1, la.unit(p1 - c2))
sys.stdout.flush()
return la.hom(c1,w1),la.hom(c2,1),la.hom(c3,w2)
| {
"repo_name": "tarnheld/ted-editor",
"path": "cruft/old-biarc.py",
"copies": "1",
"size": "1929",
"license": "unlicense",
"hash": 8572040058868247000,
"line_mean": 23.72,
"line_max": 101,
"alpha_frac": 0.5272161742,
"autogenerated": false,
"ratio": 2.2456344586728756,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.32728506328728757,
"avg_score": null,
"num_lines": null
} |
''' Adapted from:
'''
import sys,getopt,struct,signal
from mod_debuggee_procedure_call import *
from pydbg import *
from pydbg.defines import *
from pydbg.pydbg_core import *
from pydbg_stack_dmp import *
from IPython.Shell import IPShellEmbed
import time
from subprocess import *
from mypdbg_bps import *
from mypdbg_hooks import *
'''
following 4 lines taken from script by pedram for trillian bug
'''
class SigHandler:
'''
class definition taken from http://www.bintest.com/extending_pydbg.html
'''
def __init__(self):
self.signaled = 0
self.sn=None
reset = __init__
def __call__(self, sn, sf):
self.sn = sn
self.signaled += 1
class Tdbg(pydbg,pydbg_core):
'''
class definition and concept taken from http://www.bintest.com/extending_pydbg.html
'''
def __init__(self):
pydbg.__init__(self)
self.sh = sh = SigHandler()
signal.signal(signal.SIGBREAK,sh)
self.ss_tids = set()
def convert_bytes_to_int(self, buffer, bytes = 4):
'''
convert the bytes to an integer big endian
'''
i = 0
cnt = 0
while cnt < bytes:
j = ord(buffer[cnt]) &0xff
i = j + (i << 8)
cnt += 1
return i
def convert_bytes_to_int_fe(self, buffer, bytes = 4):
'''
convert flip the endianness of the bytes and convert to an
integer little endian
'''
i = 0
cnt = 0
buffer = self.convert_rb_endianess(buffer)
while cnt < bytes:
j = ord(buffer[cnt]) &0xff
i = j + (i << 8)
cnt += 1
return i
def convert_rb_endianess(self, buffer):
'''
convert the endianess on a set of raw bytes
'''
h = [i for i in buffer]
h.reverse()
return "".join(h)
def convert_int_endianess(self, integer):
'''
convert the endianess of an integer value
'''
i_buf = self.flip_endian(integer)
return self.convert_bytes_to_int(i_buf)
def convert_to_rb(self, val, bytes = 4):
'''
integer value to convert into a set of raw bytes
'''
buffer = []
cnt = 0
v = val
while cnt < bytes:
buffer.append(v & 0xff)
v >>= 8
cnt += 1
buffer.reverse()
buffer = map(chr, buffer)
return "".join(buffer)
def convert_int_to_bytes_fe(self, val, bytes = 4):
'''
convert integer to raw bytes and flip the endianness
'''
buffer = []
cnt = 0
v = val
while cnt < bytes:
buffer.append(v & 0xff)
v >>= 8
cnt += 1
buffer = map(chr, buffer)
return "".join(buffer)
def debug_event_iteration(self):
pydbg.debug_event_iteration(self)
sh = self.sh
if sh.signaled and sh.sn:
if sh.sn==signal.SIGBREAK:
ipshell = IPShellEmbed()
ipshell()
sh.reset()
def enable_ss(self, tid):
'''
Enable Single Stepping on a thread
'''
if tid is None:
tid = self.enumerate_threads()[0]
try:
h_thread = self.open_thread(tid)
self.single_step(True, h_thread)
self.ss_tids.add(tid)
except: pass
def disable_ss(self, tid):
'''
Disable single stepping on a thread
'''
if not tid in self.ss_tids:
return
if tid is None:
tid = self.enumerate_threads()[0]
try:
h_thread = self.open_thread(tid)
self.single_step(False, h_thread)
self.ss_tids.remove(tid)
except: pass
def enable_all_ss(self):
'''
Enable Single Stepping on all threads
'''
tid_list = self.enumerate_threads()
for tid in tid_list:
enable_ss(tid)
def disable_all_ss(self):
'''
Disable Single Stepping on all threads
'''
for tid in self.ss_tids:
disable_ss(self, tid)
def update_threads_contexts(self):
'''
Update the contexts for all the threads running in the process
'''
tlist = self.enumerate_threads()
self.contexts = {}
for tid in tlist:
if tid == 0:
print "Invalid TID %08x"%tid
continue
context = self.get_thread_context(None, thread_id=tid)
retaddr = self.get_arg(0, context)
self.contexts[tid] = context
return self.contexts
def give_shell(self):
'''
Give an Ipython shell to the user so they can interact with the Debugger
'''
print "\n\n****Entering dbg shell"
ipshell = IPShellEmbed()
ipshell()
print "\n\n***Exiting the debug shell\n\n"
# redefine the dump_*context methods with only context here
def dump_context(self, context, stack_depth=5, print_dots = True):
'''
Overrode Pedram's Implementation of dump_context
'''
context_list = self.dump_context_list(context, stack_depth, print_dots)
context_dump = "CONTEXT DUMP\n"
context_dump += " EIP: %08x %s\n" % (context.Eip, context_list["eip"])
context_dump += " EAX: %08x (%10d) -> %s\n" % (context.Eax, context.Eax, context_list["eax"])
context_dump += " EBX: %08x (%10d) -> %s\n" % (context.Ebx, context.Ebx, context_list["ebx"])
context_dump += " ECX: %08x (%10d) -> %s\n" % (context.Ecx, context.Ecx, context_list["ecx"])
context_dump += " EDX: %08x (%10d) -> %s\n" % (context.Edx, context.Edx, context_list["edx"])
context_dump += " EDI: %08x (%10d) -> %s\n" % (context.Edi, context.Edi, context_list["edi"])
context_dump += " ESI: %08x (%10d) -> %s\n" % (context.Esi, context.Esi, context_list["esi"])
context_dump += " EBP: %08x (%10d) -> %s\n" % (context.Ebp, context.Ebp, context_list["esi"])
context_dump += " ESP: %08x (%10d) -> %s\n" % (context.Esp, context.Esp, context_list["esp"])
for offset in xrange(0, stack_depth + 1):
if offset * 4 >= 0x300:
break
context_dump += " +%02x: %08x (%10d) -> %s\n" % \
( \
offset * 4, \
context_list["esp+%02x"%(offset*4)]["value"], \
context_list["esp+%02x"%(offset*4)]["value"], \
context_list["esp+%02x"%(offset*4)]["desc"] \
)
return context_dump
def dump_context_list (self, context=None, stack_depth=5, print_dots=True):
'''
Overrode Pedram's Implementation of dump_context
'''
context_list = {}
context_list["eip"] = self.disasm(context.Eip)
context_list["eax"] = self.smart_dereference(context.Eax, print_dots)
context_list["ebx"] = self.smart_dereference(context.Ebx, print_dots)
context_list["ecx"] = self.smart_dereference(context.Ecx, print_dots)
context_list["edx"] = self.smart_dereference(context.Edx, print_dots)
context_list["edi"] = self.smart_dereference(context.Edi, print_dots)
context_list["esi"] = self.smart_dereference(context.Esi, print_dots)
context_list["ebp"] = self.smart_dereference(context.Ebp, print_dots)
context_list["esp"] = self.smart_dereference(context.Esp, print_dots)
for offset in xrange(0, stack_depth + 1):
# no try/except here because ESP *should* always be readable and i'd really like to know if it's not.
try:
esp = self.flip_endian_dword(self.read_process_memory(context.Esp + offset * 4, 4))
context_list["esp+%02x"%(offset*4)] = {}
context_list["esp+%02x"%(offset*4)]["value"] = esp
context_list["esp+%02x"%(offset*4)]["desc"] = self.smart_dereference(esp, print_dots)
except:
#esp = self.flip_endian_dword(self.read_process_memory(context.Esp + offset * 4, 4))
context_list["esp+%02x"%(offset*4)] = {}
context_list["esp+%02x"%(offset*4)]["value"] = "?????"
context_list["esp+%02x"%(offset*4)]["desc"] = "ERROR: Failed Read"
return context_list
def get_reg_value(context, register):
''' Taken from cody pierce mindshaRE post
'''
if register == "eax" or register == 0: return context.Eax
elif register == "ecx" or register == 1: return context.Ecx
elif register == "edx" or register == 2: return context.Edx
elif register == "ebx" or register == 3: return context.Ebx
elif register == "esp" or register == 4: return context.Esp
elif register == "ebp" or register == 5: return context.Ebp
elif register == "esi" or register == 6: return context.Esi
elif register == "edi" or register == 7: return context.Edi
elif register == "eip" or register == 8: return context.Eip
return False
def kickstart_process(filename):
p = Popen(filename, stdin=PIPE, stdout=PIPE)
return p
def special_treatment(dbg, context, tid):
if context.Eip == 0x408AEC or context.Eip == 0x00408d0a:
print "Setting up x%08x for tracing"%tid
dbg.enable_ss(tid)
def setup_proc(dbg, malware, wait=0, bp_list=None):
'''
Kick start the process specified by malware and then attach to it after (wait) seconds
then we set the breakpoints in bp_list
'''
p = kickstart_process(malware)
if wait:
time.sleep(wait)
dbg.attach(p.pid)
if not bp_list is None:
set_bp_list(dbg, bp_list)
return p
def quick_setup(dbg):
'''
Quick start up function for the debugger and the target process. We let the program run for one second and then attach to it
'''
global interesting_bps, malware_name, stack_dump_bps, my_bps
return setup_proc(dbg, malware_name, 1, stack_dump_bps+my_bps)
dbg = Tdbg()
dbg.set_callback(EXCEPTION_BREAKPOINT, handler_breakpoint)
dbg.set_callback(EXCEPTION_SINGLE_STEP, handler_breakpoint)
malware_name = "saleslist.exe"
if __name__ == "__main__":
print """Run the following commands to get started:
quick_setup(dbg)
dbg.run()"""
dbg.give_shell() | {
"repo_name": "deeso/python_scrirpts",
"path": "ida/mypdbg_interface.py",
"copies": "1",
"size": "9379",
"license": "apache-2.0",
"hash": -2333927574107269600,
"line_mean": 27.2242990654,
"line_max": 127,
"alpha_frac": 0.6200021324,
"autogenerated": false,
"ratio": 2.9064146265881625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4026416758988162,
"avg_score": null,
"num_lines": null
} |
# Adapted from rsted
import os
from os.path import join as J
from StringIO import StringIO
from docutils.core import publish_string, publish_parts
# see http://docutils.sourceforge.net/docs/user/config.html
default_rst_opts = {
'no_generator': True,
'no_source_link': True,
'tab_width': 4,
'file_insertion_enabled': False,
'raw_enabled': False,
'stylesheet_path': None,
'traceback': True,
'halt_level': 5,
}
THEMES = os.path.join(os.path.dirname(__file__), 'themes')
# cache + security
def rst2html(rst, theme=None, opts=None, body_only=False):
rst_opts = default_rst_opts.copy()
rst_opts['warning_stream'] = StringIO()
if body_only:
out = publish_parts(rst, writer_name='html',
settings_overrides=rst_opts)['html_body']
rst_opts['warning_stream'].seek(0)
warnings = rst_opts['warning_stream'].read()
return out, warnings
if opts:
rst_opts.update(opts)
rst_opts['template'] = os.path.join(THEMES, 'template.txt')
stylesheets = ['basic.css']
if theme:
stylesheets.append('%s/%s.css' % (theme, theme))
rst_opts['stylesheet'] = ','.join([J(THEMES, p) for p in stylesheets])
out = publish_string(rst, writer_name='html', settings_overrides=rst_opts)
rst_opts['warning_stream'].seek(0)
warnings = rst_opts['warning_stream'].read()
return out, warnings
| {
"repo_name": "AcrDijon/henet",
"path": "henet/rst/rst2html.py",
"copies": "1",
"size": "1414",
"license": "apache-2.0",
"hash": 4134362435020623400,
"line_mean": 27.8571428571,
"line_max": 78,
"alpha_frac": 0.6357850071,
"autogenerated": false,
"ratio": 3.3908872901678655,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4526672297267865,
"avg_score": null,
"num_lines": null
} |
# adapted from scikit-learn
"""Check whether we or not we should build the documentation
If the last commit message has a "[doc skip]" marker, do not build
the doc. On the contrary if a "[doc build]" marker is found, build the doc
instead of relying on the subsequent rules.
We always build the documentation for jobs that are not related to a specific
PR (e.g. a merge to master or a maintenance branch).
If this is a PR, check that if there are some files in this PR that are under
the "doc/" or "examples/" folders, otherwise skip.
If the introspection of the current commit fails for any reason, the default
behavior is to build the documentation.
"""
import sys
import os
from subprocess import check_output, CalledProcessError
def exit(msg="", skip=False):
print("%s: %s" % ("SKIP" if skip else "BUILD", msg))
sys.exit(0)
# Introspect the message for the commit that triggered the build
commit = os.environ.get('CIRCLE_SHA1')
if not commit:
exit("undefined CIRCLE_SHA1 variable")
try:
commit_msg = check_output("git log --format=%B -n 1".split() + [commit])
commit_msg = commit_msg.decode('utf-8')
except CalledProcessError:
exit("failed to introspect commit message for %s" % commit)
if "[doc skip]" in commit_msg:
exit("[doc skip] marker found", skip=True)
elif "[doc build]" in commit_msg:
exit("[doc build] marker found")
# Check whether this commit is part of a pull request or not
pr_url = os.environ.get('CI_PULL_REQUEST')
if not pr_url:
# The documentation should be always built when executed from one of the
# main branches
exit("not a pull request")
# Introspect the list of files changed by all the commits in this PR.
# Hardcode the assumption that this is a PR to origin/master of this repo
# as apparently there is way to reliably get the target of a PR with circle
# ci
git_range = "origin/master...%s" % commit
try:
check_output("git fetch origin master".split())
filenames = check_output("git diff --name-only".split() + [git_range])
except CalledProcessError:
exit("git introspection failed.")
filenames = filenames.decode('utf-8').split()
for filename in filenames:
if filename.startswith(u'doc/') or filename.startswith(u'examples/'):
exit("detected doc impacting file modified by PR in range %s: %s"
% (git_range, filename))
# This PR does not seem to have any documentation related file changed.
msg = "no doc impacting files detected:\n" + u"\n".join(filenames)
exit(msg, skip=True)
| {
"repo_name": "kcompher/FreeDiscovUI",
"path": "build_tools/circle/check_build_doc.py",
"copies": "1",
"size": "2506",
"license": "bsd-3-clause",
"hash": -4921218578907263000,
"line_mean": 36.4029850746,
"line_max": 77,
"alpha_frac": 0.7158818835,
"autogenerated": false,
"ratio": 3.7125925925925927,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4928474476092593,
"avg_score": null,
"num_lines": null
} |
# Adapted from scikit learn
from operator import attrgetter
import inspect
import subprocess
import os
import sys
from functools import partial
REVISION_CMD = 'git rev-parse --short HEAD'
def _get_git_revision():
try:
revision = subprocess.check_output(REVISION_CMD.split()).strip()
except (subprocess.CalledProcessError, OSError):
print('Failed to execute git to get revision')
return None
return revision.decode('utf-8')
def _linkcode_resolve(domain, info, package, url_fmt, revision):
"""Determine a link to online source for a class/method/function
This is called by sphinx.ext.linkcode
An example with a long-untouched module that everyone has
>>> _linkcode_resolve('py', {'module': 'tty',
... 'fullname': 'setraw'},
... package='tty',
... url_fmt='http://hg.python.org/cpython/file/'
... '{revision}/Lib/{package}/{path}#L{lineno}',
... revision='xxxx')
'http://hg.python.org/cpython/file/xxxx/Lib/tty/tty.py#L18'
"""
if revision is None:
return
if domain not in ('py', 'pyx'):
return
if not info.get('module') or not info.get('fullname'):
return
class_name = info['fullname'].split('.')[0]
if type(class_name) != str:
# Python 2 only
class_name = class_name.encode('utf-8')
module = __import__(info['module'], fromlist=[class_name])
obj = attrgetter(info['fullname'])(module)
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except Exception:
fn = None
if not fn:
return
fn = os.path.relpath(fn,
start=os.path.dirname(__import__(package).__file__))
try:
lineno = inspect.getsourcelines(obj)[1]
except Exception:
lineno = ''
return url_fmt.format(revision=revision, package=package,
path=fn, lineno=lineno)
def make_linkcode_resolve(package, url_fmt):
"""Returns a linkcode_resolve function for the given URL format
revision is a git commit reference (hash or name)
package is the name of the root module of the package
url_fmt is along the lines of ('https://github.com/USER/PROJECT/'
'blob/{revision}/{package}/'
'{path}#L{lineno}')
"""
revision = _get_git_revision()
return partial(_linkcode_resolve, revision=revision, package=package,
url_fmt=url_fmt)
| {
"repo_name": "rth/PyKrige",
"path": "doc/sphinxext/github_link.py",
"copies": "5",
"size": "2701",
"license": "bsd-3-clause",
"hash": 2432251983044790300,
"line_mean": 30.4069767442,
"line_max": 78,
"alpha_frac": 0.5808959645,
"autogenerated": false,
"ratio": 4.111111111111111,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7192007075611111,
"avg_score": null,
"num_lines": null
} |
# Adapted from scikit learn
from operator import attrgetter
import inspect
import subprocess
import os
import sys
from functools import partial
REVISION_CMD = "git rev-parse --short HEAD"
def _get_git_revision():
try:
revision = subprocess.check_output(REVISION_CMD.split()).strip()
except (subprocess.CalledProcessError, OSError):
print("Failed to execute git to get revision")
return None
return revision.decode("utf-8")
def _linkcode_resolve(domain, info, package, url_fmt, revision):
"""Determine a link to online source for a class/method/function
This is called by sphinx.ext.linkcode
An example with a long-untouched module that everyone has
>>> _linkcode_resolve('py', {'module': 'tty',
... 'fullname': 'setraw'},
... package='tty',
... url_fmt='http://hg.python.org/cpython/file/'
... '{revision}/Lib/{package}/{path}#L{lineno}',
... revision='xxxx')
'http://hg.python.org/cpython/file/xxxx/Lib/tty/tty.py#L18'
"""
if revision is None:
return
if domain not in ("py", "pyx"):
return
if not info.get("module") or not info.get("fullname"):
return
class_name = info["fullname"].split(".")[0]
if type(class_name) != str:
# Python 2 only
class_name = class_name.encode("utf-8")
module = __import__(info["module"], fromlist=[class_name])
obj = attrgetter(info["fullname"])(module)
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except Exception:
fn = None
if not fn:
return
fn = os.path.relpath(fn, start=os.path.dirname(__import__(package).__file__))
try:
lineno = inspect.getsourcelines(obj)[1]
except Exception:
lineno = ""
return url_fmt.format(revision=revision, package=package, path=fn, lineno=lineno)
def make_linkcode_resolve(package, url_fmt):
"""Returns a linkcode_resolve function for the given URL format
revision is a git commit reference (hash or name)
package is the name of the root module of the package
url_fmt is along the lines of ('https://github.com/USER/PROJECT/'
'blob/{revision}/{package}/'
'{path}#L{lineno}')
"""
revision = _get_git_revision()
return partial(
_linkcode_resolve, revision=revision, package=package, url_fmt=url_fmt
)
| {
"repo_name": "bsmurphy/PyKrige",
"path": "docs/source/sphinxext/github_link.py",
"copies": "1",
"size": "2645",
"license": "bsd-3-clause",
"hash": 2920548404126609000,
"line_mean": 30.1176470588,
"line_max": 85,
"alpha_frac": 0.593194707,
"autogenerated": false,
"ratio": 4.019756838905775,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5112951545905775,
"avg_score": null,
"num_lines": null
} |
# Adapted from score written by wkentaro
# https://github.com/wkentaro/pytorch-fcn/blob/master/torchfcn/utils.py
import numpy as np
class runningScore(object):
def __init__(self, n_classes):
self.n_classes = n_classes
self.confusion_matrix = np.zeros((n_classes, n_classes))
def _fast_hist(self, label_true, label_pred, n_class):
mask = (label_true >= 0) & (label_true < n_class)
hist = np.bincount(
n_class * label_true[mask].astype(int) + label_pred[mask], minlength=n_class ** 2
).reshape(n_class, n_class)
return hist
def update(self, label_trues, label_preds):
for lt, lp in zip(label_trues, label_preds):
self.confusion_matrix += self._fast_hist(lt.flatten(), lp.flatten(), self.n_classes)
def get_scores(self):
"""Returns accuracy score evaluation result.
- overall accuracy
- mean accuracy
- mean IU
- fwavacc
"""
hist = self.confusion_matrix
acc = np.diag(hist).sum() / hist.sum()
acc_cls = np.diag(hist) / hist.sum(axis=1)
acc_cls = np.nanmean(acc_cls)
iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
mean_iu = np.nanmean(iu)
freq = hist.sum(axis=1) / hist.sum()
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
cls_iu = dict(zip(range(self.n_classes), iu))
return (
{
"Overall Acc: \t": acc,
"Mean Acc : \t": acc_cls,
"FreqW Acc : \t": fwavacc,
"Mean IoU : \t": mean_iu,
},
cls_iu,
)
def reset(self):
self.confusion_matrix = np.zeros((self.n_classes, self.n_classes))
class averageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
| {
"repo_name": "meetshah1995/pytorch-semseg",
"path": "ptsemseg/metrics.py",
"copies": "1",
"size": "2166",
"license": "mit",
"hash": 5025356749440900000,
"line_mean": 29.9428571429,
"line_max": 96,
"alpha_frac": 0.5387811634,
"autogenerated": false,
"ratio": 3.3068702290076337,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43456513924076334,
"avg_score": null,
"num_lines": null
} |
# Adapted from /seamless/stdlib/switch-join/switch-join.py
from seamless.highlevel import Context, Cell
from seamless import stdlib
ctx = Context()
ctx.include(stdlib.switch)
ctx.include(stdlib.join)
ctx.a = 10.0
ctx.a1 = Cell("float")
ctx.a2 = Cell("float")
ctx.a3 = Cell("float")
ctx.f1 = 2.0
ctx.f2 = 3.0
ctx.f3 = 4.0
def add(a,b):
return a + b
def sub(a,b):
return a - b
def mul(a,b):
return a * b
ctx.op1 = add
ctx.op1.a = ctx.a1
ctx.op1.b = ctx.f1
ctx.r1 = ctx.op1
ctx.op2 = sub
ctx.op2.a = ctx.a2
ctx.op2.b = ctx.f2
ctx.r2 = ctx.op2
ctx.op3 = mul
ctx.op3.a = ctx.a3
ctx.op3.b = ctx.f3
ctx.r3 = ctx.op3
adict = {
"path1": ctx.a1,
"path2": ctx.a2,
"path3": ctx.a3,
}
rdict = {
"path1": ctx.r1,
"path2": ctx.r2,
"path3": ctx.r3,
}
ctx.selected = "path1"
ctx.switch = ctx.lib.switch(
celltype="float",
input=ctx.a,
selected=ctx.selected,
outputs=adict,
)
ctx.compute()
ctx.output = Cell("float")
"""
ctx.join = ctx.lib.join(
celltype="float",
inputs=rdict,
selected=ctx.selected,
output=ctx.output,
)
"""
# Alternative syntax
ctx.join = ctx.lib.join()
ctx.join.celltype = "float"
ctx.join.inputs = rdict
ctx.join.selected = ctx.selected
ctx.output = ctx.join.output
# /
ctx.compute()
print(ctx.output.value)
print(ctx.a.value, ctx.a1.value, ctx.a2.value, ctx.a3.value)
print(ctx.a1.status, ctx.a2.status, ctx.a3.status)
print(ctx.r1.value, ctx.r2.value, ctx.r3.value)
print()
ctx.selected = "path2"
print(ctx._needs_translation)
ctx.compute()
print(ctx.output.value)
print(ctx.a.value, ctx.a1.value, ctx.a2.value, ctx.a3.value)
print(ctx.a1.status, ctx.a2.status, ctx.a3.status)
print(ctx.r1.value, ctx.r2.value, ctx.r3.value)
print()
ctx.selected = "path3"
print(ctx._needs_translation)
ctx.compute()
print(ctx.output.value)
print(ctx.a.value, ctx.a1.value, ctx.a2.value, ctx.a3.value)
print(ctx.a1.status, ctx.a2.status, ctx.a3.status)
print(ctx.r1.value, ctx.r2.value, ctx.r3.value)
print()
graph = ctx.get_graph()
ctx.save_graph("switch-join-stdlib.seamless")
ctx.save_zip("switch-join-stdlib.zip") | {
"repo_name": "sjdv1982/seamless",
"path": "tests/highlevel/switch-join-stdlib.py",
"copies": "1",
"size": "2084",
"license": "mit",
"hash": -1973743578431258600,
"line_mean": 19.6435643564,
"line_max": 60,
"alpha_frac": 0.6756238004,
"autogenerated": false,
"ratio": 2.3155555555555556,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3491179355955556,
"avg_score": null,
"num_lines": null
} |
# Adapted from select_parser.py by Paul McGuire
# http://pyparsing.wikispaces.com/file/view/select_parser.py/158651233/select_parser.py
#
# a simple SELECT statement parser, taken from SQLite's SELECT statement
# definition at http://www.sqlite.org/lang_select.html
#
from pyparsing import *
ParserElement.enablePackrat()
def no_suppress_delimited_list(expression, delimiter=','):
return expression + ZeroOrMore(delimiter + expression)
def concat(tokens):
return ''.join(tokens)
def build_json_get_expr(terms):
if len(terms) < 2:
raise ValueError('Not enough terms')
if len(terms) == 2:
return 'json_get({0}, {1})'.format(terms[0], terms[1])
return 'json_get({0}, {1})'.format(build_json_get_expr(terms[:-1]), terms[-1])
def replace_json_get(tokens):
terms = [t for t in tokens[0] if t != '->']
return build_json_get_expr(terms)
# keywords
(UNION, ALL, AND, INTERSECT, EXCEPT, COLLATE, ASC, DESC, ON, USING, NATURAL, INNER,
CROSS, LEFT, OUTER, JOIN, AS, INDEXED, NOT, SELECT, DISTINCT, FROM, WHERE, GROUP, BY,
HAVING, ORDER, BY, LIMIT, OFFSET, OR) = map(CaselessKeyword, """UNION, ALL, AND, INTERSECT,
EXCEPT, COLLATE, ASC, DESC, ON, USING, NATURAL, INNER, CROSS, LEFT, OUTER, JOIN, AS, INDEXED,
NOT, SELECT, DISTINCT, FROM, WHERE, GROUP, BY, HAVING, ORDER, BY, LIMIT, OFFSET, OR
""".replace(",", "").split())
(CAST, ISNULL, NOTNULL, NULL, IS, BETWEEN, ELSE, END, CASE, WHEN, THEN, EXISTS,
COLLATE, IN, LIKE, GLOB, REGEXP, MATCH, ESCAPE, CURRENT_TIME, CURRENT_DATE,
CURRENT_TIMESTAMP) = map(CaselessKeyword, """CAST, ISNULL, NOTNULL, NULL, IS, BETWEEN, ELSE,
END, CASE, WHEN, THEN, EXISTS, COLLATE, IN, LIKE, GLOB, REGEXP, MATCH, ESCAPE,
CURRENT_TIME, CURRENT_DATE, CURRENT_TIMESTAMP""".replace(",", "").split())
keyword = MatchFirst((UNION, ALL, INTERSECT, EXCEPT, COLLATE, ASC, DESC, ON, USING, NATURAL, INNER,
CROSS, LEFT, OUTER, JOIN, AS, INDEXED, NOT, SELECT, DISTINCT, FROM, WHERE,
GROUP, BY,
HAVING, ORDER, BY, LIMIT, OFFSET, CAST, ISNULL, NOTNULL, NULL, IS, BETWEEN,
ELSE, END, CASE, WHEN, THEN, EXISTS,
COLLATE, IN, LIKE, GLOB, REGEXP, MATCH, ESCAPE, CURRENT_TIME, CURRENT_DATE,
CURRENT_TIMESTAMP))
identifier = ~keyword + Word(alphas, alphanums + "_")
collation_name = identifier.copy()
column_name = identifier.copy()
column_alias = identifier.copy()
table_name = identifier.copy()
table_alias = identifier.copy()
index_name = identifier.copy()
function_name = identifier.copy()
parameter_name = identifier.copy()
database_name = identifier.copy()
# expression
LPAR, RPAR, COMMA = map(Word, "(),")
select_stmt = Forward()
expr = Forward()
integer = Regex(r"[+-]?\d+")
numeric_literal = Regex(r"\d+(\.\d*)?([eE][+-]?\d+)?")
string_literal = QuotedString("'", unquoteResults=False)
blob_literal = Combine(oneOf("x X") + "'" + Word(hexnums) + "'")
literal_value = (numeric_literal | string_literal | blob_literal |
NULL | CURRENT_TIME | CURRENT_DATE | CURRENT_TIMESTAMP)
bind_parameter = (
Word("?", nums) |
Combine(oneOf(": @ $") + parameter_name)
)
type_name = oneOf("TEXT REAL INTEGER BLOB NULL")
expr_term = (
CAST + LPAR + expr + AS + type_name + RPAR |
EXISTS + LPAR + select_stmt + RPAR |
function_name + LPAR + Optional(no_suppress_delimited_list(expr) | "*") + RPAR |
literal_value |
bind_parameter |
(database_name + "." + table_name + "." + identifier) |
(table_name + "." + identifier) |
identifier
).setParseAction(concat)
UNARY, BINARY, TERNARY = 1, 2, 3
expr << operatorPrecedence(expr_term,
[
('->', BINARY, opAssoc.LEFT, replace_json_get),
(oneOf('- + ~') | NOT, UNARY, opAssoc.LEFT),
(ISNULL | NOTNULL | (NOT + NULL), UNARY, opAssoc.LEFT),
(IS + NOT, BINARY, opAssoc.LEFT),
('||', BINARY, opAssoc.LEFT),
(oneOf('* / %'), BINARY, opAssoc.LEFT),
(oneOf('+ -'), BINARY, opAssoc.LEFT),
(oneOf('<< >> & |'), BINARY, opAssoc.LEFT),
(oneOf('< <= > >='), BINARY, opAssoc.LEFT),
(
oneOf('= == != <>') | IS | IN | LIKE | GLOB | MATCH | REGEXP,
BINARY,
opAssoc.LEFT),
(AND, BINARY, opAssoc.LEFT),
(OR, BINARY, opAssoc.LEFT),
((BETWEEN, AND), TERNARY, opAssoc.LEFT),
])
compound_operator = (UNION + Optional(ALL) | INTERSECT | EXCEPT)
ordering_term = expr + Optional(COLLATE + collation_name) + Optional(ASC | DESC)
join_constraint = Optional(
ON + expr | USING + LPAR + Group(no_suppress_delimited_list(column_name)) + RPAR)
join_op = COMMA | (Optional(NATURAL) + Optional(INNER | CROSS | LEFT + OUTER | LEFT | OUTER) + JOIN)
table_reference = (
(database_name("database") + "." + table_name("table") | table_name("table")) +
Optional(Optional(AS) + table_alias("alias"))
).setResultsName("table_ids", listAllMatches=True)
join_source = Forward()
single_source = (
table_reference +
Optional(INDEXED + BY + index_name | NOT + INDEXED) |
(LPAR + select_stmt + RPAR + Optional(Optional(AS) + table_alias)) |
(LPAR + join_source + RPAR))
join_source << single_source + ZeroOrMore(join_op + single_source + join_constraint)
result_column = table_name + "." + "*" | (expr + Optional(Optional(AS) + column_alias)) | "*"
select_core = (
SELECT + Optional(DISTINCT | ALL) + Group(no_suppress_delimited_list(result_column)) +
Optional(FROM + join_source) +
Optional(WHERE + expr) +
Optional(GROUP + BY + Group(no_suppress_delimited_list(ordering_term)) +
Optional(HAVING + expr)))
select_stmt << (select_core + ZeroOrMore(compound_operator + select_core) +
Optional(ORDER + BY + Group(no_suppress_delimited_list(ordering_term))) +
Optional(
LIMIT + (integer | integer + OFFSET + integer | integer + COMMA + integer)))
| {
"repo_name": "lebinh/aq",
"path": "aq/select_parser.py",
"copies": "1",
"size": "6365",
"license": "mit",
"hash": 3449002527536661500,
"line_mean": 41.4333333333,
"line_max": 100,
"alpha_frac": 0.5838177533,
"autogenerated": false,
"ratio": 3.5049559471365637,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9580306812563695,
"avg_score": 0.001693377574573769,
"num_lines": 150
} |
# Adapted from similar tool in megaman (https://github.com/mmp2/megaman)
# LICENSE: Simplified BSD https://github.com/mmp2/megaman/blob/master/LICENSE
""" cythonize
Cythonize pyx files into C files as needed.
Usage: cythonize [root_dir]
Default [root_dir] is 'megaman'.
Checks pyx files to see if they have been changed relative to their
corresponding C files. If they have, then runs cython on these files to
recreate the C files.
The script thinks that the pyx files have changed relative to the C files
by comparing hashes stored in a database file.
Simple script to invoke Cython (and Tempita) on all .pyx (.pyx.in)
files; while waiting for a proper build system. Uses file hashes to
figure out if rebuild is needed.
For now, this script should be run by developers when changing Cython files
only, and the resulting C files checked in, so that end-users (and Python-only
developers) do not get the Cython/Tempita dependencies.
Originally written by Dag Sverre Seljebotn, and copied here from:
https://raw.github.com/dagss/private-scipy-refactor/cythonize/cythonize.py
Note: this script does not check any of the dependent C libraries; it only
operates on the Cython .pyx files.
"""
from __future__ import division, print_function, absolute_import
import os
import re
import sys
import hashlib
import subprocess
HASH_FILE = 'cythonize.dat'
DEFAULT_ROOT = 'funzo'
# WindowsError is not defined on unix systems
try:
WindowsError
except NameError:
WindowsError = None
#
# Rules
#
def process_pyx(fromfile, tofile):
try:
from Cython.Compiler.Version import version as cython_version
from distutils.version import LooseVersion
if LooseVersion(cython_version) < LooseVersion('0.22'):
raise Exception('Building megaman requires Cython >= 0.22')
except ImportError:
pass
flags = ['--fast-fail']
if tofile.endswith('.cxx'):
flags += ['--cplus']
try:
try:
r = subprocess.call(['cython'] + flags + ["-o", tofile, fromfile])
if r != 0:
raise Exception('Cython failed')
except OSError:
# There are ways of installing Cython that don't result in a cython
# executable on the path, see gh-2397.
r = subprocess.call([sys.executable, '-c',
'import sys; from Cython.Compiler.Main import '
'setuptools_main as main; sys.exit(main())'] + flags +
["-o", tofile, fromfile])
if r != 0:
raise Exception("Cython either isn't installed or it failed.")
except OSError:
raise OSError('Cython needs to be installed')
def process_tempita_pyx(fromfile, tofile):
try:
try:
from Cython import Tempita as tempita
except ImportError:
import tempita
except ImportError:
raise Exception('Building megaman requires Tempita: '
'pip install --user Tempita')
from_filename = tempita.Template.from_filename
template = from_filename(fromfile, encoding=sys.getdefaultencoding())
pyxcontent = template.substitute()
assert fromfile.endswith('.pyx.in')
pyxfile = fromfile[:-len('.pyx.in')] + '.pyx'
with open(pyxfile, "w") as f:
f.write(pyxcontent)
process_pyx(pyxfile, tofile)
rules = {
# fromext : function
'.pyx': process_pyx,
'.pyx.in': process_tempita_pyx
}
#
# Hash db
#
def load_hashes(filename):
# Return { filename : (sha1 of input, sha1 of output) }
if os.path.isfile(filename):
hashes = {}
with open(filename, 'r') as f:
for line in f:
filename, inhash, outhash = line.split()
hashes[filename] = (inhash, outhash)
else:
hashes = {}
return hashes
def save_hashes(hash_db, filename):
with open(filename, 'w') as f:
for key, value in sorted(hash_db.items()):
f.write("%s %s %s\n" % (key, value[0], value[1]))
def sha1_of_file(filename):
h = hashlib.sha1()
with open(filename, "rb") as f:
h.update(f.read())
return h.hexdigest()
#
# Main program
#
def normpath(path):
path = path.replace(os.sep, '/')
if path.startswith('./'):
path = path[2:]
return path
def get_hash(frompath, topath):
from_hash = sha1_of_file(frompath)
to_hash = sha1_of_file(topath) if os.path.exists(topath) else None
return (from_hash, to_hash)
def process(path, fromfile, tofile, processor_function, hash_db):
fullfrompath = os.path.join(path, fromfile)
fulltopath = os.path.join(path, tofile)
current_hash = get_hash(fullfrompath, fulltopath)
if current_hash == hash_db.get(normpath(fullfrompath), None):
print('%s has not changed' % fullfrompath)
return
orig_cwd = os.getcwd()
try:
os.chdir(path)
print('Processing %s' % fullfrompath)
processor_function(fromfile, tofile)
finally:
os.chdir(orig_cwd)
# changed target file, recompute hash
current_hash = get_hash(fullfrompath, fulltopath)
# store hash in db
hash_db[normpath(fullfrompath)] = current_hash
def find_process_files(root_dir):
hash_db = load_hashes(HASH_FILE)
for cur_dir, dirs, files in os.walk(root_dir):
for filename in files:
in_file = os.path.join(cur_dir, filename + ".in")
if filename.endswith('.pyx') and os.path.isfile(in_file):
continue
for fromext, function in rules.items():
if filename.endswith(fromext):
toext = ".c"
with open(os.path.join(cur_dir, filename), 'rb') as f:
data = f.read()
m = re.search(
br"^\s*#\s*distutils:\s*language\s*=\s*c\+\+\s*$", data, re.I | re.M)
if m:
toext = ".cxx"
fromfile = filename
tofile = filename[:-len(fromext)] + toext
process(cur_dir, fromfile, tofile, function, hash_db)
save_hashes(hash_db, HASH_FILE)
def main():
try:
root_dir = sys.argv[1]
except IndexError:
root_dir = DEFAULT_ROOT
find_process_files(root_dir)
if __name__ == '__main__':
main()
| {
"repo_name": "makokal/funzo",
"path": "tools/cythonize.py",
"copies": "1",
"size": "6391",
"license": "mit",
"hash": 6413831441783020000,
"line_mean": 30.6386138614,
"line_max": 97,
"alpha_frac": 0.6107025505,
"autogenerated": false,
"ratio": 3.713538640325392,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48242411908253924,
"avg_score": null,
"num_lines": null
} |
# Adapted from similar tool in scipy
# LICENSE: Simplified BSD https://github.com/mmp2/megaman/blob/master/LICENSE
""" cythonize
Cythonize pyx files into C files as needed.
Usage: cythonize [root_dir]
Default [root_dir] is 'megaman'.
Checks pyx files to see if they have been changed relative to their
corresponding C files. If they have, then runs cython on these files to
recreate the C files.
The script thinks that the pyx files have changed relative to the C files
by comparing hashes stored in a database file.
Simple script to invoke Cython (and Tempita) on all .pyx (.pyx.in)
files; while waiting for a proper build system. Uses file hashes to
figure out if rebuild is needed.
For now, this script should be run by developers when changing Cython files
only, and the resulting C files checked in, so that end-users (and Python-only
developers) do not get the Cython/Tempita dependencies.
Originally written by Dag Sverre Seljebotn, and copied here from:
https://raw.github.com/dagss/private-scipy-refactor/cythonize/cythonize.py
Note: this script does not check any of the dependent C libraries; it only
operates on the Cython .pyx files.
"""
from __future__ import division, print_function, absolute_import
import os
import re
import sys
import hashlib
import subprocess
HASH_FILE = 'cythonize.dat'
DEFAULT_ROOT = 'megaman'
# WindowsError is not defined on unix systems
try:
WindowsError
except NameError:
WindowsError = None
#
# Rules
#
def process_pyx(fromfile, tofile):
try:
from Cython.Compiler.Version import version as cython_version
from distutils.version import LooseVersion
if LooseVersion(cython_version) < LooseVersion('0.22'):
raise Exception('Building megaman requires Cython >= 0.22')
except ImportError:
pass
flags = ['--fast-fail']
if tofile.endswith('.cxx'):
flags += ['--cplus']
try:
try:
r = subprocess.call(['cython'] + flags + ["-o", tofile, fromfile])
if r != 0:
raise Exception('Cython failed')
except OSError:
# There are ways of installing Cython that don't result in a cython
# executable on the path, see gh-2397.
r = subprocess.call([sys.executable, '-c',
'import sys; from Cython.Compiler.Main import '
'setuptools_main as main; sys.exit(main())'] + flags +
["-o", tofile, fromfile])
if r != 0:
raise Exception("Cython either isn't installed or it failed.")
except OSError:
raise OSError('Cython needs to be installed')
def process_tempita_pyx(fromfile, tofile):
try:
try:
from Cython import Tempita as tempita
except ImportError:
import tempita
except ImportError:
raise Exception('Building megaman requires Tempita: '
'pip install --user Tempita')
from_filename = tempita.Template.from_filename
template = from_filename(fromfile, encoding=sys.getdefaultencoding())
pyxcontent = template.substitute()
assert fromfile.endswith('.pyx.in')
pyxfile = fromfile[:-len('.pyx.in')] + '.pyx'
with open(pyxfile, "w") as f:
f.write(pyxcontent)
process_pyx(pyxfile, tofile)
rules = {
# fromext : function
'.pyx': process_pyx,
'.pyx.in': process_tempita_pyx
}
#
# Hash db
#
def load_hashes(filename):
# Return { filename : (sha1 of input, sha1 of output) }
if os.path.isfile(filename):
hashes = {}
with open(filename, 'r') as f:
for line in f:
filename, inhash, outhash = line.split()
hashes[filename] = (inhash, outhash)
else:
hashes = {}
return hashes
def save_hashes(hash_db, filename):
with open(filename, 'w') as f:
for key, value in sorted(hash_db.items()):
f.write("%s %s %s\n" % (key, value[0], value[1]))
def sha1_of_file(filename):
h = hashlib.sha1()
with open(filename, "rb") as f:
h.update(f.read())
return h.hexdigest()
#
# Main program
#
def normpath(path):
path = path.replace(os.sep, '/')
if path.startswith('./'):
path = path[2:]
return path
def get_hash(frompath, topath):
from_hash = sha1_of_file(frompath)
to_hash = sha1_of_file(topath) if os.path.exists(topath) else None
return (from_hash, to_hash)
def process(path, fromfile, tofile, processor_function, hash_db):
fullfrompath = os.path.join(path, fromfile)
fulltopath = os.path.join(path, tofile)
current_hash = get_hash(fullfrompath, fulltopath)
if current_hash == hash_db.get(normpath(fullfrompath), None):
print('%s has not changed' % fullfrompath)
return
orig_cwd = os.getcwd()
try:
os.chdir(path)
print('Processing %s' % fullfrompath)
processor_function(fromfile, tofile)
finally:
os.chdir(orig_cwd)
# changed target file, recompute hash
current_hash = get_hash(fullfrompath, fulltopath)
# store hash in db
hash_db[normpath(fullfrompath)] = current_hash
def find_process_files(root_dir):
hash_db = load_hashes(HASH_FILE)
for cur_dir, dirs, files in os.walk(root_dir):
for filename in files:
in_file = os.path.join(cur_dir, filename + ".in")
if filename.endswith('.pyx') and os.path.isfile(in_file):
continue
for fromext, function in rules.items():
if filename.endswith(fromext):
toext = ".c"
with open(os.path.join(cur_dir, filename), 'rb') as f:
data = f.read()
m = re.search(br"^\s*#\s*distutils:\s*language\s*=\s*c\+\+\s*$", data, re.I|re.M)
if m:
toext = ".cxx"
fromfile = filename
tofile = filename[:-len(fromext)] + toext
process(cur_dir, fromfile, tofile, function, hash_db)
save_hashes(hash_db, HASH_FILE)
def main():
try:
root_dir = sys.argv[1]
except IndexError:
root_dir = DEFAULT_ROOT
find_process_files(root_dir)
if __name__ == '__main__':
main()
| {
"repo_name": "jakevdp/Mmani",
"path": "tools/cythonize.py",
"copies": "4",
"size": "6320",
"license": "bsd-2-clause",
"hash": 8289371627126050000,
"line_mean": 32.2631578947,
"line_max": 105,
"alpha_frac": 0.6136075949,
"autogenerated": false,
"ratio": 3.7132784958871916,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6326886090787192,
"avg_score": null,
"num_lines": null
} |
# Adapted from @skuroda,s PersistentRegexHighlight and @wbond's resource loader.
import sys
import sublime
#VERSION = int(sublime.version())
mod_prefix = "classes_and_tests"
reload_mods = []
"""
if VERSION > 3000:
mod_prefix = "PersistentRegexHighlight." + mod_prefix
from imp import reload
for mod in sys.modules:
if mod[0:24] == 'PersistentRegexHighlight' and sys.modules[mod] is not None:
reload_mods.append(mod)
else:"""
for mod in sorted(sys.modules):
testString = 'classes_and_tests'
if mod[0:len(testString)] == testString and sys.modules[mod] is not None:
reload_mods.append(mod)
mods_load_order = [
'.src.Std',
'.src.UnitTestFunctions',
'.src.SublimeFunctions',
'.src.SublimeWindowFunctions',
'.src.SublimeWindowManipulator'
'.src.UserSettings',
'.src.FileComponents',
'.src.MirroredDirectory',
'.src.Command',
#'.src.FileManipulation',
'.src.FileSystem',
#'.src.FileCreator',
'.src.TemplateFileCreator',
'.src.CommandExecutionThread',
'.src.MultipleCommandExecutionThread',
'.src.InputPanel',
'.src.OutputPanel',
'.src.LiveUnitTesting',
'.TextInsert',
'.ToggleSourcesTests',
'.RunUnitTests',
'.CreateMissingFunctions',
'.ContinuousUnitTesting',
'.ClassesAndTests'
'.SyncronizeClassAndTestTabs',
'.OutputPanelInsert',
'.OutputPanelClear'
'.DocumentationFromUnitTests'
]
for suffix in mods_load_order:
mod = mod_prefix + suffix
if mod in reload_mods:
reload(sys.modules[mod]) | {
"repo_name": "anconaesselmann/ClassesAndTests",
"path": "classes_and_tests/src/reloader.py",
"copies": "1",
"size": "1566",
"license": "mit",
"hash": -7883799954134831000,
"line_mean": 25.5593220339,
"line_max": 84,
"alpha_frac": 0.6666666667,
"autogenerated": false,
"ratio": 3.5112107623318387,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46778774290318387,
"avg_score": null,
"num_lines": null
} |
# Adapted from Software Design's serve_my_exe.py program for using
# the SD_app React app.
from http.server import BaseHTTPRequestHandler, HTTPServer
from subprocess import Popen, PIPE
import sys
import logging
exe_name = ""
class S(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
def do_POST(self):
content_length = int(self.headers['Content-Length'])
incoming_data = self.rfile.read(content_length)
global exe_name
x = Popen(exe_name, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
(stdout, stderr) = x.communicate(incoming_data)
outgoing_data = stdout
if len(stderr) > 0:
logging.debug(stderr)
self._set_headers()
self.wfile.write(outgoing_data)
def log_message(self, format, *args):
return
def run_server(server=HTTPServer, handler=S, port=25100):
server_address = ('', port)
httpd = server(server_address, handler)
logging.debug('Starting httpd...')
httpd.serve_forever()
| {
"repo_name": "StoDevX/cs251-toolkit",
"path": "cs251tk/webapp/server.py",
"copies": "1",
"size": "1183",
"license": "mit",
"hash": -8126628704647342000,
"line_mean": 29.3333333333,
"line_max": 77,
"alpha_frac": 0.6551141167,
"autogenerated": false,
"ratio": 3.70846394984326,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.986357806654326,
"avg_score": 0,
"num_lines": 39
} |
"""Adapted from sphinx.ext.autosummary.generate
Modified to only consider module members listed in `__all__` and
only class members listed in `autodoc_allowed_special_members`.
Copyright 2007-2016 by the Sphinx team, https://github.com/sphinx-doc/sphinx/blob/master/AUTHORS
License: BSD, see https://github.com/sphinx-doc/sphinx/blob/master/LICENSE for details.
"""
import os
from jinja2 import FileSystemLoader, TemplateNotFound
from jinja2.sandbox import SandboxedEnvironment
from sphinx import package_dir
from sphinx.ext.autosummary import import_by_name, get_documenter
from sphinx.jinja2glue import BuiltinTemplateLoader
from sphinx.util.osutil import ensuredir
from sphinx.util.inspect import safe_getattr
from sphinx.ext.autosummary.generate import find_autosummary_in_files
from sphinx.util import logging
logger = logging.getLogger(__name__)
def get_members(app, obj, typ, include_public=()):
__all__ = getattr(obj, '__all__', [])
skip_all = not __all__
__all__ += include_public
items = []
for name in dir(obj):
try:
documenter = get_documenter(app, safe_getattr(obj, name), obj)
except AttributeError:
continue
if documenter.objtype == typ:
items.append(name)
public = [x for x in items if x in __all__ or skip_all and not x.startswith('_')]
# only members with docstrings are considered public
public = [x for x in public if safe_getattr(obj, x).__doc__]
return public, items
def generate_autosummary_docs(sources, app, suffix='.rst', output_dir=None,
base_path=None, builder=None, template_dir=None):
showed_sources = list(sorted(sources))
if len(showed_sources) > 20:
showed_sources = showed_sources[:10] + ['...'] + showed_sources[-10:]
logger.info('[autosummary] generating autosummary for: %s' % ', '.join(showed_sources))
if output_dir:
logger.info('[autosummary] writing to %s' % output_dir)
if base_path is not None:
sources = [os.path.join(base_path, filename) for filename in sources]
# create our own templating environment
template_dirs = [os.path.join(package_dir, 'ext', 'autosummary', 'templates')]
if builder is not None:
# allow the user to override the templates
template_loader = BuiltinTemplateLoader()
template_loader.init(builder, dirs=template_dirs)
else:
if template_dir:
template_dirs.insert(0, template_dir)
template_loader = FileSystemLoader(template_dirs)
template_env = SandboxedEnvironment(loader=template_loader)
# read
items = find_autosummary_in_files(sources)
# keep track of new files
new_files = []
# write
for name, path, template_name in sorted(set(items), key=str):
if path is None:
continue # The corresponding autosummary:: directive did not have a :toctree: option
path = output_dir or os.path.abspath(path)
ensuredir(path)
try:
name, obj, parent, mod_name = import_by_name(name)
except ImportError as e:
logger.warning('[autosummary] failed to import %r: %s' % (name, e))
continue
fn = os.path.join(path, name + suffix)
# skip it if it exists
if os.path.isfile(fn):
continue
new_files.append(fn)
with open(fn, 'w') as f:
doc = get_documenter(app, obj, parent)
if template_name is not None:
template = template_env.get_template(template_name)
else:
try:
template = template_env.get_template('autosummary/%s.rst' % doc.objtype)
except TemplateNotFound:
template = template_env.get_template('autosummary/base.rst')
ns = {}
if doc.objtype == 'module':
ns['members'] = dir(obj)
ns['functions'], ns['all_functions'] = get_members(app, obj, 'function')
ns['classes'], ns['all_classes'] = get_members(app, obj, 'class')
ns['exceptions'], ns['all_exceptions'] = get_members(app, obj, 'exception')
elif doc.objtype == 'class':
ns['members'] = dir(obj)
include_public = app.config.autodoc_allowed_special_members
ns['methods'], ns['all_methods'] = get_members(app, obj, 'method', include_public)
ns['attributes'], ns['all_attributes'] = get_members(app, obj, 'attribute')
parts = name.split('.')
if doc.objtype in ('method', 'attribute'):
mod_name = '.'.join(parts[:-2])
cls_name = parts[-2]
obj_name = '.'.join(parts[-2:])
ns['class'] = cls_name
else:
mod_name, obj_name = '.'.join(parts[:-1]), parts[-1]
ns['fullname'] = name
ns['module'] = mod_name
ns['objname'] = obj_name
ns['name'] = parts[-1]
ns['objtype'] = doc.objtype
ns['underline'] = len(name) * '='
rendered = template.render(**ns)
f.write(rendered)
# descend recursively to new files
if new_files:
generate_autosummary_docs(new_files, app, suffix=suffix, output_dir=output_dir,
base_path=base_path, builder=builder, template_dir=template_dir)
def process_generate_options(app):
genfiles = app.config.generate_from_files
if not genfiles:
return
ext = '.rst'
genfiles = [genfile + (not genfile.endswith(ext) and ext or '') for genfile in genfiles]
generate_autosummary_docs(genfiles, app, suffix=ext, builder=app.builder, base_path=app.srcdir)
def setup(app):
app.connect('builder-inited', process_generate_options)
app.add_config_value('generate_from_files', [], 'env')
return {'version': "0.1"}
| {
"repo_name": "dean0x7d/pybinding",
"path": "docs/_ext/generate.py",
"copies": "1",
"size": "5931",
"license": "bsd-2-clause",
"hash": 8182314794212353000,
"line_mean": 37.264516129,
"line_max": 99,
"alpha_frac": 0.6071488788,
"autogenerated": false,
"ratio": 3.8714099216710185,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49785588004710185,
"avg_score": null,
"num_lines": null
} |
'''Adapted from Swinner, p: 88; class based version'''
import tkinter as tk
class DeuxDessins(tk.Tk):
def __init__(self):
super().__init__()
self.creation()
self.positionnement()
def creation(self):
'''création des différents widgets '''
self.canevas = tk.Canvas(self, width=200, height=200, bg='ivory')
self.bouton1 = tk.Button(self, text="cible", command=self.figure_1)
self.bouton2 = tk.Button(self, text="visage", command=self.figure_2)
def positionnement(self):
'''positionnement des widgets'''
self.canevas.pack()
self.bouton1.pack(side=tk.LEFT, padx=3, pady=3)
self.bouton2.pack(side=tk.RIGHT, padx=3, pady=3)
def dessiner_cercle(self, x, y, r, couleur='black'):
'''tracé d'un cercle de centre (x, y) et de rayon r'''
self.canevas.create_oval(x-r, y-r, x+r, y+r, outline=couleur)
def effacer_tout(self):
'''efface tout ce qui se trouve sur le canevas'''
self.canevas.delete(tk.ALL)
def figure_1(self):
'''dessiner une cible'''
self.effacer_tout()
self.canevas.create_line(100, 0, 100, 200, fill='blue')
self.canevas.create_line(0, 100, 200, 100, fill='blue')
rayon = 15
while rayon < 100:
self.dessiner_cercle(100, 100, rayon)
rayon += 15
def figure_2(self):
'''dessiner une caricature de visage'''
self.effacer_tout()
cercles = [[100, 100, 80, 'red'], # visage
[70, 70, 15, 'blue'], # yeux
[130, 70, 15, 'blue'],
[70, 70, 5, 'black'],
[130, 70, 5, 'black'],
[44, 115, 20, 'red'], # joues
[156, 115, 20, 'red'],
[100, 95, 15, 'purple'], # nez
[100, 145, 30, 'purple']] # bouche
for cercle in cercles:
self.dessiner_cercle(*cercle)
if __name__ == '__main__':
fenetre = DeuxDessins()
fenetre.mainloop()
| {
"repo_name": "aroberge/exemples_fr",
"path": "gui/deux_dessins_v2_tk.py",
"copies": "1",
"size": "2019",
"license": "cc0-1.0",
"hash": -8903217997508124000,
"line_mean": 29.5454545455,
"line_max": 76,
"alpha_frac": 0.5401785714,
"autogenerated": false,
"ratio": 3,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40401785714,
"avg_score": null,
"num_lines": null
} |
'''Adapted from Swinner, p: 88'''
import tkinter as tk
def dessiner_cercle(x, y, r, couleur='black'):
'''tracé d'un cercle de centre (x, y) et de rayon r'''
canevas.create_oval(x-r, y-r, x+r, y+r, outline=couleur)
def figure_1():
'''dessiner une cible'''
canevas.delete(tk.ALL) # efface dessin existant
canevas.create_line(100, 0, 100, 200, fill='blue')
canevas.create_line(0, 100, 200, 100, fill='blue')
rayon = 15
while rayon < 100:
dessiner_cercle(100, 100, rayon)
rayon += 15
def figure_2():
'''dessiner une caricature de visage'''
canevas.delete(tk.ALL)
cercles = [[100, 100, 80, 'red'], # visage
[70, 70, 15, 'blue'], # yeux
[130, 70, 15, 'blue'],
[70, 70, 5, 'black'],
[130, 70, 5, 'black'],
[44, 115, 20, 'red'], # joues
[156, 115, 20, 'red'],
[100, 95, 15, 'purple'], # nez
[100, 145, 30, 'purple']] # bouche
for cercle in cercles:
dessiner_cercle(*cercle) # cerc[0], cerc[1], cerc[2], cerc[3])
fenetre = tk.Tk()
canevas = tk.Canvas(fenetre, width=200, height=200, bg='ivory')
canevas.pack()
bouton1 = tk.Button(fenetre, text="cible", command=figure_1)
bouton1.pack(side=tk.LEFT, padx=3, pady=3)
bouton2 = tk.Button(fenetre, text="visage", command=figure_2)
bouton2.pack(side=tk.RIGHT, padx=3, pady=3)
fenetre.mainloop()
| {
"repo_name": "aroberge/exemples_fr",
"path": "gui/deux_dessins_tk.py",
"copies": "1",
"size": "1403",
"license": "cc0-1.0",
"hash": -2407639466751425500,
"line_mean": 28.829787234,
"line_max": 71,
"alpha_frac": 0.5798858773,
"autogenerated": false,
"ratio": 2.5080500894454385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3587935966745438,
"avg_score": null,
"num_lines": null
} |
# Adapted from test_file.py by Daniel Stutzbach
#from __future__ import unicode_literals
import sys
import os
import unittest
from array import array
from weakref import proxy
from test.test_support import (TESTFN, findfile, check_warnings, run_unittest,
make_bad_fd)
from UserList import UserList
import _fileio
class AutoFileTests(unittest.TestCase):
# file tests for which a test file is automatically set up
def setUp(self):
self.f = _fileio._FileIO(TESTFN, 'w')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write(bytes(range(10)))
self.assertEquals(self.f.tell(), p.tell())
self.f.close()
self.f = None
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testSeekTell(self):
self.f.write(bytes(bytearray(range(20))))
self.assertEquals(self.f.tell(), 20)
self.f.seek(0)
self.assertEquals(self.f.tell(), 0)
self.f.seek(10)
self.assertEquals(self.f.tell(), 10)
self.f.seek(5, 1)
self.assertEquals(self.f.tell(), 15)
self.f.seek(-5, 1)
self.assertEquals(self.f.tell(), 10)
self.f.seek(-5, 2)
self.assertEquals(self.f.tell(), 15)
def testAttributes(self):
# verify expected attributes exist
f = self.f
self.assertEquals(f.mode, "wb")
self.assertEquals(f.closed, False)
# verify the attributes are readonly
for attr in 'mode', 'closed':
self.assertRaises((AttributeError, TypeError),
setattr, f, attr, 'oops')
def testReadinto(self):
# verify readinto
self.f.write(bytes(bytearray([1, 2])))
self.f.close()
a = array('b', b'x'*10)
self.f = _fileio._FileIO(TESTFN, 'r')
n = self.f.readinto(a)
self.assertEquals(array('b', [1, 2]), a[:n])
def testRepr(self):
self.assertEquals(repr(self.f),
"_fileio._FileIO(%d, %s)" % (self.f.fileno(),
repr(self.f.mode)))
def testErrors(self):
f = self.f
self.assert_(not f.isatty())
self.assert_(not f.closed)
#self.assertEquals(f.name, TESTFN)
self.assertRaises(ValueError, f.read, 10) # Open for reading
f.close()
self.assert_(f.closed)
f = _fileio._FileIO(TESTFN, 'r')
self.assertRaises(TypeError, f.readinto, "")
self.assert_(not f.closed)
f.close()
self.assert_(f.closed)
def testMethods(self):
methods = ['fileno', 'isatty', 'read', 'readinto',
'seek', 'tell', 'truncate', 'write', 'seekable',
'readable', 'writable']
if sys.platform.startswith('atheos'):
methods.remove('truncate')
self.f.close()
self.assert_(self.f.closed)
for methodname in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises(ValueError, method)
def testOpendir(self):
# Issue 3703: opening a directory should fill the errno
# Windows always returns "[Errno 13]: Permission denied
# Unix calls dircheck() and returns "[Errno 21]: Is a directory"
try:
_fileio._FileIO('.', 'r')
except IOError as e:
self.assertNotEqual(e.errno, 0)
self.assertEqual(e.filename, ".")
else:
self.fail("Should have raised IOError")
class OtherFileTests(unittest.TestCase):
def testAbles(self):
try:
f = _fileio._FileIO(TESTFN, "w")
self.assertEquals(f.readable(), False)
self.assertEquals(f.writable(), True)
self.assertEquals(f.seekable(), True)
f.close()
f = _fileio._FileIO(TESTFN, "r")
self.assertEquals(f.readable(), True)
self.assertEquals(f.writable(), False)
self.assertEquals(f.seekable(), True)
f.close()
f = _fileio._FileIO(TESTFN, "a+")
self.assertEquals(f.readable(), True)
self.assertEquals(f.writable(), True)
self.assertEquals(f.seekable(), True)
self.assertEquals(f.isatty(), False)
f.close()
if sys.platform != "win32":
try:
f = _fileio._FileIO("/dev/tty", "a")
except EnvironmentError:
# When run in a cron job there just aren't any
# ttys, so skip the test. This also handles other
# OS'es that don't support /dev/tty.
pass
else:
f = _fileio._FileIO("/dev/tty", "a")
self.assertEquals(f.readable(), False)
self.assertEquals(f.writable(), True)
if sys.platform != "darwin" and \
not sys.platform.startswith('freebsd') and \
not sys.platform.startswith('sunos'):
# Somehow /dev/tty appears seekable on some BSDs
self.assertEquals(f.seekable(), False)
self.assertEquals(f.isatty(), True)
f.close()
finally:
os.unlink(TESTFN)
def testModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+", "rw", "rt"):
try:
f = _fileio._FileIO(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
def testUnicodeOpen(self):
# verify repr works for unicode too
f = _fileio._FileIO(str(TESTFN), "w")
f.close()
os.unlink(TESTFN)
def testInvalidFd(self):
self.assertRaises(ValueError, _fileio._FileIO, -10)
self.assertRaises(OSError, _fileio._FileIO, make_bad_fd())
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = _fileio._FileIO(TESTFN, bad_mode)
except ValueError as msg:
if msg.args[0] != 0:
s = str(msg)
if s.find(TESTFN) != -1 or s.find(bad_mode) == -1:
self.fail("bad error message for invalid mode: %s" % s)
# if msg.args[0] == 0, we're probably on Windows where there may be
# no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testTruncateOnWindows(self):
def bug801631():
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
f = _fileio._FileIO(TESTFN, 'w')
f.write(bytes(bytearray(range(11))))
f.close()
f = _fileio._FileIO(TESTFN,'r+')
data = f.read(5)
if data != bytes(bytearray(range(5))):
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
try:
bug801631()
finally:
os.unlink(TESTFN)
def testAppend(self):
try:
f = open(TESTFN, 'wb')
f.write(b'spam')
f.close()
f = open(TESTFN, 'ab')
f.write(b'eggs')
f.close()
f = open(TESTFN, 'rb')
d = f.read()
f.close()
self.assertEqual(d, b'spameggs')
finally:
try:
os.unlink(TESTFN)
except:
pass
def testInvalidInit(self):
self.assertRaises(TypeError, _fileio._FileIO, "1", 0, 0)
def testWarnings(self):
with check_warnings() as w:
self.assertEqual(w.warnings, [])
self.assertRaises(TypeError, _fileio._FileIO, [])
self.assertEqual(w.warnings, [])
self.assertRaises(ValueError, _fileio._FileIO, "/some/invalid/name", "rt")
self.assertEqual(w.warnings, [])
def test_main():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
try:
run_unittest(AutoFileTests, OtherFileTests)
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
test_main()
| {
"repo_name": "kangkot/arangodb",
"path": "3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_fileio.py",
"copies": "48",
"size": "9024",
"license": "apache-2.0",
"hash": -181123358207079000,
"line_mean": 32.5464684015,
"line_max": 86,
"alpha_frac": 0.5244902482,
"autogenerated": false,
"ratio": 4.012449977767897,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# Adapted from test_file.py by Daniel Stutzbach
from __future__ import unicode_literals
import sys
import os
import errno
import unittest
from array import array
from weakref import proxy
from functools import wraps
from UserList import UserList
from test.test_support import TESTFN, check_warnings, run_unittest, make_bad_fd
from test.test_support import py3k_bytes as bytes, cpython_only, check_py3k_warnings
from test.script_helper import run_python
from _io import FileIO as _FileIO
class AutoFileTests(unittest.TestCase):
# file tests for which a test file is automatically set up
def setUp(self):
self.f = _FileIO(TESTFN, 'w')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write(bytes(range(10)))
self.assertEqual(self.f.tell(), p.tell())
self.f.close()
self.f = None
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testSeekTell(self):
self.f.write(bytes(range(20)))
self.assertEqual(self.f.tell(), 20)
self.f.seek(0)
self.assertEqual(self.f.tell(), 0)
self.f.seek(10)
self.assertEqual(self.f.tell(), 10)
self.f.seek(5, 1)
self.assertEqual(self.f.tell(), 15)
self.f.seek(-5, 1)
self.assertEqual(self.f.tell(), 10)
self.f.seek(-5, 2)
self.assertEqual(self.f.tell(), 15)
def testAttributes(self):
# verify expected attributes exist
f = self.f
self.assertEqual(f.mode, "wb")
self.assertEqual(f.closed, False)
# verify the attributes are readonly
for attr in 'mode', 'closed':
self.assertRaises((AttributeError, TypeError),
setattr, f, attr, 'oops')
def testReadinto(self):
# verify readinto
self.f.write(b"\x01\x02")
self.f.close()
a = array(b'b', b'x'*10)
self.f = _FileIO(TESTFN, 'r')
n = self.f.readinto(a)
self.assertEqual(array(b'b', [1, 2]), a[:n])
def testWritelinesList(self):
l = [b'123', b'456']
self.f.writelines(l)
self.f.close()
self.f = _FileIO(TESTFN, 'rb')
buf = self.f.read()
self.assertEqual(buf, b'123456')
def testWritelinesUserList(self):
l = UserList([b'123', b'456'])
self.f.writelines(l)
self.f.close()
self.f = _FileIO(TESTFN, 'rb')
buf = self.f.read()
self.assertEqual(buf, b'123456')
def testWritelinesError(self):
self.assertRaises(TypeError, self.f.writelines, [1, 2, 3])
self.assertRaises(TypeError, self.f.writelines, None)
def test_none_args(self):
self.f.write(b"hi\nbye\nabc")
self.f.close()
self.f = _FileIO(TESTFN, 'r')
self.assertEqual(self.f.read(None), b"hi\nbye\nabc")
self.f.seek(0)
self.assertEqual(self.f.readline(None), b"hi\n")
self.assertEqual(self.f.readlines(None), [b"bye\n", b"abc"])
def testWriteUnicode(self):
with check_py3k_warnings():
self.f.write(u'')
def testRepr(self):
self.assertEqual(repr(self.f), "<_io.FileIO name=%r mode='%s'>"
% (self.f.name, self.f.mode))
del self.f.name
self.assertEqual(repr(self.f), "<_io.FileIO fd=%r mode='%s'>"
% (self.f.fileno(), self.f.mode))
self.f.close()
self.assertEqual(repr(self.f), "<_io.FileIO [closed]>")
def testErrors(self):
f = self.f
self.assertFalse(f.isatty())
self.assertFalse(f.closed)
#self.assertEqual(f.name, TESTFN)
self.assertRaises(ValueError, f.read, 10) # Open for reading
f.close()
self.assertTrue(f.closed)
f = _FileIO(TESTFN, 'r')
self.assertRaises(TypeError, f.readinto, "")
self.assertFalse(f.closed)
f.close()
self.assertTrue(f.closed)
def testMethods(self):
methods = ['fileno', 'isatty', 'seekable', 'readable', 'writable',
'read', 'readall', 'readline', 'readlines',
'tell', 'truncate', 'flush']
if sys.platform.startswith('atheos'):
methods.remove('truncate')
self.f.close()
self.assertTrue(self.f.closed)
for methodname in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises(ValueError, method)
self.assertRaises(ValueError, self.f.readinto) # XXX should be TypeError?
self.assertRaises(ValueError, self.f.readinto, bytearray(1))
self.assertRaises(ValueError, self.f.seek)
self.assertRaises(ValueError, self.f.seek, 0)
self.assertRaises(ValueError, self.f.write)
self.assertRaises(ValueError, self.f.write, b'')
self.assertRaises(TypeError, self.f.writelines)
self.assertRaises(ValueError, self.f.writelines, b'')
def testOpendir(self):
# Issue 3703: opening a directory should fill the errno
# Windows always returns "[Errno 13]: Permission denied
# Unix calls dircheck() and returns "[Errno 21]: Is a directory"
try:
_FileIO('.', 'r')
except IOError as e:
self.assertNotEqual(e.errno, 0)
self.assertEqual(e.filename, ".")
else:
self.fail("Should have raised IOError")
@unittest.skipIf(os.name == 'nt', "test only works on a POSIX-like system")
def testOpenDirFD(self):
fd = os.open('.', os.O_RDONLY)
with self.assertRaises(IOError) as cm:
_FileIO(fd, 'r')
os.close(fd)
self.assertEqual(cm.exception.errno, errno.EISDIR)
#A set of functions testing that we get expected behaviour if someone has
#manually closed the internal file descriptor. First, a decorator:
def ClosedFD(func):
@wraps(func)
def wrapper(self):
#forcibly close the fd before invoking the problem function
f = self.f
os.close(f.fileno())
try:
func(self, f)
finally:
try:
self.f.close()
except IOError:
pass
return wrapper
def ClosedFDRaises(func):
@wraps(func)
def wrapper(self):
#forcibly close the fd before invoking the problem function
f = self.f
os.close(f.fileno())
try:
func(self, f)
except IOError as e:
self.assertEqual(e.errno, errno.EBADF)
else:
self.fail("Should have raised IOError")
finally:
try:
self.f.close()
except IOError:
pass
return wrapper
@ClosedFDRaises
def testErrnoOnClose(self, f):
f.close()
@ClosedFDRaises
def testErrnoOnClosedWrite(self, f):
f.write(b'a')
@ClosedFDRaises
def testErrnoOnClosedSeek(self, f):
f.seek(0)
@ClosedFDRaises
def testErrnoOnClosedTell(self, f):
f.tell()
@ClosedFDRaises
def testErrnoOnClosedTruncate(self, f):
f.truncate(0)
@ClosedFD
def testErrnoOnClosedSeekable(self, f):
f.seekable()
@ClosedFD
def testErrnoOnClosedReadable(self, f):
f.readable()
@ClosedFD
def testErrnoOnClosedWritable(self, f):
f.writable()
@ClosedFD
def testErrnoOnClosedFileno(self, f):
f.fileno()
@ClosedFD
def testErrnoOnClosedIsatty(self, f):
self.assertEqual(f.isatty(), False)
def ReopenForRead(self):
try:
self.f.close()
except IOError:
pass
self.f = _FileIO(TESTFN, 'r')
os.close(self.f.fileno())
return self.f
@ClosedFDRaises
def testErrnoOnClosedRead(self, f):
f = self.ReopenForRead()
f.read(1)
@ClosedFDRaises
def testErrnoOnClosedReadall(self, f):
f = self.ReopenForRead()
f.readall()
@ClosedFDRaises
def testErrnoOnClosedReadinto(self, f):
f = self.ReopenForRead()
a = array(b'b', b'x'*10)
f.readinto(a)
class OtherFileTests(unittest.TestCase):
def testAbles(self):
try:
f = _FileIO(TESTFN, "w")
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
f.close()
f = _FileIO(TESTFN, "r")
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
f.close()
f = _FileIO(TESTFN, "a+")
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.assertEqual(f.isatty(), False)
f.close()
finally:
os.unlink(TESTFN)
@unittest.skipIf(sys.platform == 'win32', 'no ttys on Windows')
def testAblesOnTTY(self):
try:
f = _FileIO("/dev/tty", "a")
except EnvironmentError:
# When run in a cron job there just aren't any
# ttys, so skip the test. This also handles other
# OS'es that don't support /dev/tty.
self.skipTest('need /dev/tty')
else:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
if sys.platform != "darwin" and \
'bsd' not in sys.platform and \
not sys.platform.startswith(('sunos', 'aix')):
# Somehow /dev/tty appears seekable on some BSDs
self.assertEqual(f.seekable(), False)
self.assertEqual(f.isatty(), True)
f.close()
def testInvalidModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+", "rw", "rt"):
try:
f = _FileIO(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
def testModeStrings(self):
# test that the mode attribute is correct for various mode strings
# given as init args
try:
for modes in [('w', 'wb'), ('wb', 'wb'), ('wb+', 'rb+'),
('w+b', 'rb+'), ('a', 'ab'), ('ab', 'ab'),
('ab+', 'ab+'), ('a+b', 'ab+'), ('r', 'rb'),
('rb', 'rb'), ('rb+', 'rb+'), ('r+b', 'rb+')]:
# read modes are last so that TESTFN will exist first
with _FileIO(TESTFN, modes[0]) as f:
self.assertEqual(f.mode, modes[1])
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
def testUnicodeOpen(self):
# verify repr works for unicode too
f = _FileIO(str(TESTFN), "w")
f.close()
os.unlink(TESTFN)
def testBytesOpen(self):
# Opening a bytes filename
try:
fn = TESTFN.encode("ascii")
except UnicodeEncodeError:
self.skipTest('could not encode %r to ascii' % TESTFN)
f = _FileIO(fn, "w")
try:
f.write(b"abc")
f.close()
with open(TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abc")
finally:
os.unlink(TESTFN)
def testConstructorHandlesNULChars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(TypeError, _FileIO, fn_with_NUL, 'w')
self.assertRaises(TypeError, _FileIO, fn_with_NUL.encode('ascii'), 'w')
def testInvalidFd(self):
self.assertRaises(ValueError, _FileIO, -10)
self.assertRaises(OSError, _FileIO, make_bad_fd())
if sys.platform == 'win32':
import msvcrt
self.assertRaises(IOError, msvcrt.get_osfhandle, make_bad_fd())
@cpython_only
def testInvalidFd_overflow(self):
# Issue 15989
import _testcapi
self.assertRaises(TypeError, _FileIO, _testcapi.INT_MAX + 1)
self.assertRaises(TypeError, _FileIO, _testcapi.INT_MIN - 1)
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = _FileIO(TESTFN, bad_mode)
except ValueError as msg:
if msg.args[0] != 0:
s = str(msg)
if TESTFN in s or bad_mode not in s:
self.fail("bad error message for invalid mode: %s" % s)
# if msg.args[0] == 0, we're probably on Windows where there may be
# no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testTruncate(self):
f = _FileIO(TESTFN, 'w')
f.write(bytes(bytearray(range(10))))
self.assertEqual(f.tell(), 10)
f.truncate(5)
self.assertEqual(f.tell(), 10)
self.assertEqual(f.seek(0, os.SEEK_END), 5)
f.truncate(15)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.seek(0, os.SEEK_END), 15)
f.close()
def testTruncateOnWindows(self):
def bug801631():
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
f = _FileIO(TESTFN, 'w')
f.write(bytes(range(11)))
f.close()
f = _FileIO(TESTFN,'r+')
data = f.read(5)
if data != bytes(range(5)):
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
try:
bug801631()
finally:
os.unlink(TESTFN)
def testAppend(self):
try:
f = open(TESTFN, 'wb')
f.write(b'spam')
f.close()
f = open(TESTFN, 'ab')
f.write(b'eggs')
f.close()
f = open(TESTFN, 'rb')
d = f.read()
f.close()
self.assertEqual(d, b'spameggs')
finally:
try:
os.unlink(TESTFN)
except:
pass
def testInvalidInit(self):
self.assertRaises(TypeError, _FileIO, "1", 0, 0)
def testWarnings(self):
with check_warnings(quiet=True) as w:
self.assertEqual(w.warnings, [])
self.assertRaises(TypeError, _FileIO, [])
self.assertEqual(w.warnings, [])
self.assertRaises(ValueError, _FileIO, "/some/invalid/name", "rt")
self.assertEqual(w.warnings, [])
def test_surrogates(self):
# Issue #8438: try to open a filename containing surrogates.
# It should either fail because the file doesn't exist or the filename
# can't be represented using the filesystem encoding, but not because
# of a LookupError for the error handler "surrogateescape".
filename = u'\udc80.txt'
try:
with _FileIO(filename):
pass
except (UnicodeEncodeError, IOError):
pass
# Spawn a separate Python process with a different "file system
# default encoding", to exercise this further.
env = dict(os.environ)
env[b'LC_CTYPE'] = b'C'
_, out = run_python('-c', 'import _io; _io.FileIO(%r)' % filename, env=env)
if ('UnicodeEncodeError' not in out and not
( ('IOError: [Errno 2] No such file or directory' in out) or
('IOError: [Errno 22] Invalid argument' in out) ) ):
self.fail('Bad output: %r' % out)
def testUnclosedFDOnException(self):
class MyException(Exception): pass
class MyFileIO(_FileIO):
def __setattr__(self, name, value):
if name == "name":
raise MyException("blocked setting name")
return super(MyFileIO, self).__setattr__(name, value)
fd = os.open(__file__, os.O_RDONLY)
self.assertRaises(MyException, MyFileIO, fd)
os.close(fd) # should not raise OSError(EBADF)
def test_main():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
try:
run_unittest(AutoFileTests, OtherFileTests)
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
test_main()
| {
"repo_name": "IronLanguages/ironpython2",
"path": "Src/StdLib/Lib/test/test_fileio.py",
"copies": "2",
"size": "17109",
"license": "apache-2.0",
"hash": 7781314735448295000,
"line_mean": 32.2213592233,
"line_max": 84,
"alpha_frac": 0.5481910106,
"autogenerated": false,
"ratio": 3.8525106957892366,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010057585661698398,
"num_lines": 515
} |
# Adapted from test_file.py by Daniel Stutzbach
import sys
import os
import errno
import unittest
from array import array
from weakref import proxy
from functools import wraps
from test.support import TESTFN, check_warnings, run_unittest, make_bad_fd
from test.support import gc_collect
from _io import FileIO as _FileIO
class AutoFileTests(unittest.TestCase):
# file tests for which a test file is automatically set up
def setUp(self):
self.f = _FileIO(TESTFN, 'w')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write(bytes(range(10)))
self.assertEqual(self.f.tell(), p.tell())
self.f.close()
self.f = None
gc_collect()
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testSeekTell(self):
self.f.write(bytes(range(20)))
self.assertEqual(self.f.tell(), 20)
self.f.seek(0)
self.assertEqual(self.f.tell(), 0)
self.f.seek(10)
self.assertEqual(self.f.tell(), 10)
self.f.seek(5, 1)
self.assertEqual(self.f.tell(), 15)
self.f.seek(-5, 1)
self.assertEqual(self.f.tell(), 10)
self.f.seek(-5, 2)
self.assertEqual(self.f.tell(), 15)
def testAttributes(self):
# verify expected attributes exist
f = self.f
self.assertEqual(f.mode, "wb")
self.assertEqual(f.closed, False)
# verify the attributes are readonly
for attr in 'mode', 'closed':
self.assertRaises((AttributeError, TypeError),
setattr, f, attr, 'oops')
def testReadinto(self):
# verify readinto
self.f.write(bytes([1, 2]))
self.f.close()
a = array('b', b'x'*10)
self.f = _FileIO(TESTFN, 'r')
n = self.f.readinto(a)
self.assertEqual(array('b', [1, 2]), a[:n])
def test_none_args(self):
self.f.write(b"hi\nbye\nabc")
self.f.close()
self.f = _FileIO(TESTFN, 'r')
self.assertEqual(self.f.read(None), b"hi\nbye\nabc")
self.f.seek(0)
self.assertEqual(self.f.readline(None), b"hi\n")
self.assertEqual(self.f.readlines(None), [b"bye\n", b"abc"])
def test_reject(self):
self.assertRaises(TypeError, self.f.write, "Hello!")
def testRepr(self):
self.assertEqual(repr(self.f), "<_io.FileIO name=%r mode=%r>"
% (self.f.name, self.f.mode))
del self.f.name
self.assertEqual(repr(self.f), "<_io.FileIO fd=%r mode=%r>"
% (self.f.fileno(), self.f.mode))
self.f.close()
self.assertEqual(repr(self.f), "<_io.FileIO [closed]>")
def testErrors(self):
f = self.f
self.assertTrue(not f.isatty())
self.assertTrue(not f.closed)
#self.assertEqual(f.name, TESTFN)
self.assertRaises(ValueError, f.read, 10) # Open for reading
f.close()
self.assertTrue(f.closed)
f = _FileIO(TESTFN, 'r')
self.assertRaises(TypeError, f.readinto, "")
self.assertTrue(not f.closed)
f.close()
self.assertTrue(f.closed)
def testMethods(self):
methods = ['fileno', 'isatty', 'read',
'tell', 'truncate', 'seekable',
'readable', 'writable']
self.f.close()
self.assertTrue(self.f.closed)
for methodname in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises(ValueError, method)
# methods with one argument
self.assertRaises(ValueError, self.f.readinto, 0)
self.assertRaises(ValueError, self.f.write, 0)
self.assertRaises(ValueError, self.f.seek, 0)
def testOpendir(self):
# Issue 3703: opening a directory should fill the errno
# Windows always returns "[Errno 13]: Permission denied
# Unix calls dircheck() and returns "[Errno 21]: Is a directory"
try:
_FileIO('.', 'r')
except IOError as e:
self.assertNotEqual(e.errno, 0)
self.assertEqual(e.filename, ".")
else:
self.fail("Should have raised IOError")
#A set of functions testing that we get expected behaviour if someone has
#manually closed the internal file descriptor. First, a decorator:
def ClosedFD(func):
@wraps(func)
def wrapper(self):
#forcibly close the fd before invoking the problem function
f = self.f
os.close(f.fileno())
try:
func(self, f)
finally:
try:
self.f.close()
except IOError:
pass
return wrapper
def ClosedFDRaises(func):
@wraps(func)
def wrapper(self):
#forcibly close the fd before invoking the problem function
f = self.f
os.close(f.fileno())
try:
func(self, f)
except IOError as e:
self.assertEqual(e.errno, errno.EBADF)
else:
self.fail("Should have raised IOError")
finally:
try:
self.f.close()
except IOError:
pass
return wrapper
@ClosedFDRaises
def testErrnoOnClose(self, f):
f.close()
@ClosedFDRaises
def testErrnoOnClosedWrite(self, f):
f.write(b'a')
@ClosedFDRaises
def testErrnoOnClosedSeek(self, f):
f.seek(0)
@ClosedFDRaises
def testErrnoOnClosedTell(self, f):
f.tell()
@ClosedFDRaises
def testErrnoOnClosedTruncate(self, f):
f.truncate(0)
@ClosedFD
def testErrnoOnClosedSeekable(self, f):
f.seekable()
@ClosedFD
def testErrnoOnClosedReadable(self, f):
f.readable()
@ClosedFD
def testErrnoOnClosedWritable(self, f):
f.writable()
@ClosedFD
def testErrnoOnClosedFileno(self, f):
f.fileno()
@ClosedFD
def testErrnoOnClosedIsatty(self, f):
self.assertEqual(f.isatty(), False)
def ReopenForRead(self):
try:
self.f.close()
except IOError:
pass
self.f = _FileIO(TESTFN, 'r')
os.close(self.f.fileno())
return self.f
@ClosedFDRaises
def testErrnoOnClosedRead(self, f):
f = self.ReopenForRead()
f.read(1)
@ClosedFDRaises
def testErrnoOnClosedReadall(self, f):
f = self.ReopenForRead()
f.readall()
@ClosedFDRaises
def testErrnoOnClosedReadinto(self, f):
f = self.ReopenForRead()
a = array('b', b'x'*10)
f.readinto(a)
class OtherFileTests(unittest.TestCase):
def testAbles(self):
try:
f = _FileIO(TESTFN, "w")
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
f.close()
f = _FileIO(TESTFN, "r")
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
f.close()
f = _FileIO(TESTFN, "a+")
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.assertEqual(f.isatty(), False)
f.close()
if sys.platform != "win32":
try:
f = _FileIO("/dev/tty", "a")
except EnvironmentError:
# When run in a cron job there just aren't any
# ttys, so skip the test. This also handles other
# OS'es that don't support /dev/tty.
pass
else:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
if sys.platform != "darwin" and \
'bsd' not in sys.platform and \
not sys.platform.startswith('sunos'):
# Somehow /dev/tty appears seekable on some BSDs
self.assertEqual(f.seekable(), False)
self.assertEqual(f.isatty(), True)
f.close()
finally:
os.unlink(TESTFN)
def testModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+", "rw", "rt"):
try:
f = _FileIO(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
def testUnicodeOpen(self):
# verify repr works for unicode too
f = _FileIO(str(TESTFN), "w")
f.close()
os.unlink(TESTFN)
def testBytesOpen(self):
# Opening a bytes filename
try:
fn = TESTFN.encode("ascii")
except UnicodeEncodeError:
# Skip test
return
f = _FileIO(fn, "w")
try:
f.write(b"abc")
f.close()
with open(TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abc")
finally:
os.unlink(TESTFN)
def testConstructorHandlesNULChars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(TypeError, _FileIO, fn_with_NUL, 'w')
self.assertRaises(TypeError, _FileIO, bytes(fn_with_NUL, 'ascii'), 'w')
def testInvalidFd(self):
self.assertRaises(ValueError, _FileIO, -10)
self.assertRaises(OSError, _FileIO, make_bad_fd())
if sys.platform == 'win32':
import msvcrt
self.assertRaises(IOError, msvcrt.get_osfhandle, make_bad_fd())
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = _FileIO(TESTFN, bad_mode)
except ValueError as msg:
if msg.args[0] != 0:
s = str(msg)
if TESTFN in s or bad_mode not in s:
self.fail("bad error message for invalid mode: %s" % s)
# if msg.args[0] == 0, we're probably on Windows where there may be
# no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testTruncate(self):
f = _FileIO(TESTFN, 'w')
f.write(bytes(bytearray(range(10))))
self.assertEqual(f.tell(), 10)
f.truncate(5)
self.assertEqual(f.tell(), 10)
self.assertEqual(f.seek(0, os.SEEK_END), 5)
f.truncate(15)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.seek(0, os.SEEK_END), 15)
f.close()
def testTruncateOnWindows(self):
def bug801631():
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
f = _FileIO(TESTFN, 'w')
f.write(bytes(range(11)))
f.close()
f = _FileIO(TESTFN,'r+')
data = f.read(5)
if data != bytes(range(5)):
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
try:
bug801631()
finally:
os.unlink(TESTFN)
def testAppend(self):
try:
f = open(TESTFN, 'wb')
f.write(b'spam')
f.close()
f = open(TESTFN, 'ab')
f.write(b'eggs')
f.close()
f = open(TESTFN, 'rb')
d = f.read()
f.close()
self.assertEqual(d, b'spameggs')
finally:
try:
os.unlink(TESTFN)
except:
pass
def testInvalidInit(self):
self.assertRaises(TypeError, _FileIO, "1", 0, 0)
def testWarnings(self):
with check_warnings(quiet=True) as w:
self.assertEqual(w.warnings, [])
self.assertRaises(TypeError, _FileIO, [])
self.assertEqual(w.warnings, [])
self.assertRaises(ValueError, _FileIO, "/some/invalid/name", "rt")
self.assertEqual(w.warnings, [])
def test_main():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
try:
run_unittest(AutoFileTests, OtherFileTests)
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
test_main()
| {
"repo_name": "wdv4758h/ZipPy",
"path": "lib-python/3/test/test_fileio.py",
"copies": "1",
"size": "13190",
"license": "bsd-3-clause",
"hash": 4190823416583478000,
"line_mean": 30.1820330969,
"line_max": 79,
"alpha_frac": 0.5328278999,
"autogenerated": false,
"ratio": 3.9209274673008325,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4953755367200833,
"avg_score": null,
"num_lines": null
} |
# Adapted from test_file.py by Daniel Stutzbach
import sys
import os
import io
import errno
import unittest
from array import array
from weakref import proxy
from functools import wraps
from test.support import TESTFN, check_warnings, run_unittest, make_bad_fd, cpython_only
from collections import UserList
import _io # C implementation of io
import _pyio # Python implementation of io
class AutoFileTests:
# file tests for which a test file is automatically set up
def setUp(self):
self.f = self.FileIO(TESTFN, 'w')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write(bytes(range(10)))
self.assertEqual(self.f.tell(), p.tell())
self.f.close()
self.f = None
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testSeekTell(self):
self.f.write(bytes(range(20)))
self.assertEqual(self.f.tell(), 20)
self.f.seek(0)
self.assertEqual(self.f.tell(), 0)
self.f.seek(10)
self.assertEqual(self.f.tell(), 10)
self.f.seek(5, 1)
self.assertEqual(self.f.tell(), 15)
self.f.seek(-5, 1)
self.assertEqual(self.f.tell(), 10)
self.f.seek(-5, 2)
self.assertEqual(self.f.tell(), 15)
def testAttributes(self):
# verify expected attributes exist
f = self.f
self.assertEqual(f.mode, "wb")
self.assertEqual(f.closed, False)
# verify the attributes are readonly
for attr in 'mode', 'closed':
self.assertRaises((AttributeError, TypeError),
setattr, f, attr, 'oops')
def testBlksize(self):
# test private _blksize attribute
blksize = io.DEFAULT_BUFFER_SIZE
# try to get preferred blksize from stat.st_blksize, if available
if hasattr(os, 'fstat'):
fst = os.fstat(self.f.fileno())
blksize = getattr(fst, 'st_blksize', blksize)
self.assertEqual(self.f._blksize, blksize)
# verify readinto
def testReadintoByteArray(self):
self.f.write(bytes([1, 2, 0, 255]))
self.f.close()
ba = bytearray(b'abcdefgh')
with self.FileIO(TESTFN, 'r') as f:
n = f.readinto(ba)
self.assertEqual(ba, b'\x01\x02\x00\xffefgh')
self.assertEqual(n, 4)
def _testReadintoMemoryview(self):
self.f.write(bytes([1, 2, 0, 255]))
self.f.close()
m = memoryview(bytearray(b'abcdefgh'))
with self.FileIO(TESTFN, 'r') as f:
n = f.readinto(m)
self.assertEqual(m, b'\x01\x02\x00\xffefgh')
self.assertEqual(n, 4)
m = memoryview(bytearray(b'abcdefgh')).cast('H', shape=[2, 2])
with self.FileIO(TESTFN, 'r') as f:
n = f.readinto(m)
self.assertEqual(bytes(m), b'\x01\x02\x00\xffefgh')
self.assertEqual(n, 4)
def _testReadintoArray(self):
self.f.write(bytes([1, 2, 0, 255]))
self.f.close()
a = array('B', b'abcdefgh')
with self.FileIO(TESTFN, 'r') as f:
n = f.readinto(a)
self.assertEqual(a, array('B', [1, 2, 0, 255, 101, 102, 103, 104]))
self.assertEqual(n, 4)
a = array('b', b'abcdefgh')
with self.FileIO(TESTFN, 'r') as f:
n = f.readinto(a)
self.assertEqual(a, array('b', [1, 2, 0, -1, 101, 102, 103, 104]))
self.assertEqual(n, 4)
a = array('I', b'abcdefgh')
with self.FileIO(TESTFN, 'r') as f:
n = f.readinto(a)
self.assertEqual(a, array('I', b'\x01\x02\x00\xffefgh'))
self.assertEqual(n, 4)
def testWritelinesList(self):
l = [b'123', b'456']
self.f.writelines(l)
self.f.close()
self.f = self.FileIO(TESTFN, 'rb')
buf = self.f.read()
self.assertEqual(buf, b'123456')
def testWritelinesUserList(self):
l = UserList([b'123', b'456'])
self.f.writelines(l)
self.f.close()
self.f = self.FileIO(TESTFN, 'rb')
buf = self.f.read()
self.assertEqual(buf, b'123456')
def testWritelinesError(self):
self.assertRaises(TypeError, self.f.writelines, [1, 2, 3])
self.assertRaises(TypeError, self.f.writelines, None)
self.assertRaises(TypeError, self.f.writelines, "abc")
def test_none_args(self):
self.f.write(b"hi\nbye\nabc")
self.f.close()
self.f = self.FileIO(TESTFN, 'r')
self.assertEqual(self.f.read(None), b"hi\nbye\nabc")
self.f.seek(0)
self.assertEqual(self.f.readline(None), b"hi\n")
self.assertEqual(self.f.readlines(None), [b"bye\n", b"abc"])
def test_reject(self):
self.assertRaises(TypeError, self.f.write, "Hello!")
def testRepr(self):
self.assertEqual(repr(self.f),
"<%s.FileIO name=%r mode=%r closefd=True>" %
(self.modulename, self.f.name, self.f.mode))
del self.f.name
self.assertEqual(repr(self.f),
"<%s.FileIO fd=%r mode=%r closefd=True>" %
(self.modulename, self.f.fileno(), self.f.mode))
self.f.close()
self.assertEqual(repr(self.f),
"<%s.FileIO [closed]>" % (self.modulename,))
def testReprNoCloseFD(self):
fd = os.open(TESTFN, os.O_RDONLY)
try:
with self.FileIO(fd, 'r', closefd=False) as f:
self.assertEqual(repr(f),
"<%s.FileIO name=%r mode=%r closefd=False>" %
(self.modulename, f.name, f.mode))
finally:
os.close(fd)
def testErrors(self):
f = self.f
self.assertFalse(f.isatty())
self.assertFalse(f.closed)
#self.assertEqual(f.name, TESTFN)
self.assertRaises(ValueError, f.read, 10) # Open for reading
f.close()
self.assertTrue(f.closed)
f = self.FileIO(TESTFN, 'r')
self.assertRaises(TypeError, f.readinto, "")
self.assertFalse(f.closed)
f.close()
self.assertTrue(f.closed)
def testMethods(self):
methods = ['fileno', 'isatty', 'seekable', 'readable', 'writable',
'read', 'readall', 'readline', 'readlines',
'tell', 'truncate', 'flush']
self.f.close()
self.assertTrue(self.f.closed)
for methodname in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises(ValueError, method)
self.assertRaises(TypeError, self.f.readinto)
self.assertRaises(ValueError, self.f.readinto, bytearray(1))
self.assertRaises(TypeError, self.f.seek)
self.assertRaises(ValueError, self.f.seek, 0)
self.assertRaises(TypeError, self.f.write)
self.assertRaises(ValueError, self.f.write, b'')
self.assertRaises(TypeError, self.f.writelines)
self.assertRaises(ValueError, self.f.writelines, b'')
def testOpendir(self):
# Issue 3703: opening a directory should fill the errno
# Windows always returns "[Errno 13]: Permission denied
# Unix uses fstat and returns "[Errno 21]: Is a directory"
try:
self.FileIO('.', 'r')
except OSError as e:
self.assertNotEqual(e.errno, 0)
self.assertEqual(e.filename, ".")
else:
self.fail("Should have raised OSError")
@unittest.skipIf(os.name == 'nt', "test only works on a POSIX-like system")
def testOpenDirFD(self):
fd = os.open('.', os.O_RDONLY)
with self.assertRaises(OSError) as cm:
self.FileIO(fd, 'r')
os.close(fd)
self.assertEqual(cm.exception.errno, errno.EISDIR)
#A set of functions testing that we get expected behaviour if someone has
#manually closed the internal file descriptor. First, a decorator:
def ClosedFD(func):
@wraps(func)
def wrapper(self):
#forcibly close the fd before invoking the problem function
f = self.f
os.close(f.fileno())
try:
func(self, f)
finally:
try:
self.f.close()
except OSError:
pass
return wrapper
def ClosedFDRaises(func):
@wraps(func)
def wrapper(self):
#forcibly close the fd before invoking the problem function
f = self.f
os.close(f.fileno())
try:
func(self, f)
except OSError as e:
self.assertEqual(e.errno, errno.EBADF)
else:
self.fail("Should have raised OSError")
finally:
try:
self.f.close()
except OSError:
pass
return wrapper
@ClosedFDRaises
def testErrnoOnClose(self, f):
f.close()
@ClosedFDRaises
def testErrnoOnClosedWrite(self, f):
f.write(b'a')
@ClosedFDRaises
def testErrnoOnClosedSeek(self, f):
f.seek(0)
@ClosedFDRaises
def testErrnoOnClosedTell(self, f):
f.tell()
@ClosedFDRaises
def testErrnoOnClosedTruncate(self, f):
f.truncate(0)
@ClosedFD
def testErrnoOnClosedSeekable(self, f):
f.seekable()
@ClosedFD
def testErrnoOnClosedReadable(self, f):
f.readable()
@ClosedFD
def testErrnoOnClosedWritable(self, f):
f.writable()
@ClosedFD
def testErrnoOnClosedFileno(self, f):
f.fileno()
@ClosedFD
def testErrnoOnClosedIsatty(self, f):
self.assertEqual(f.isatty(), False)
def ReopenForRead(self):
try:
self.f.close()
except OSError:
pass
self.f = self.FileIO(TESTFN, 'r')
os.close(self.f.fileno())
return self.f
@ClosedFDRaises
def testErrnoOnClosedRead(self, f):
f = self.ReopenForRead()
f.read(1)
@ClosedFDRaises
def testErrnoOnClosedReadall(self, f):
f = self.ReopenForRead()
f.readall()
@ClosedFDRaises
def testErrnoOnClosedReadinto(self, f):
f = self.ReopenForRead()
a = array('b', b'x'*10)
f.readinto(a)
class CAutoFileTests(AutoFileTests, unittest.TestCase):
FileIO = _io.FileIO
modulename = '_io'
class PyAutoFileTests(AutoFileTests, unittest.TestCase):
FileIO = _pyio.FileIO
modulename = '_pyio'
class OtherFileTests:
def testAbles(self):
try:
f = self.FileIO(TESTFN, "w")
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
f.close()
f = self.FileIO(TESTFN, "r")
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
f.close()
f = self.FileIO(TESTFN, "a+")
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.assertEqual(f.isatty(), False)
f.close()
if sys.platform != "win32":
try:
f = self.FileIO("/dev/tty", "a")
except OSError:
# When run in a cron job there just aren't any
# ttys, so skip the test. This also handles other
# OS'es that don't support /dev/tty.
pass
else:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
if sys.platform != "darwin" and \
'bsd' not in sys.platform and \
not sys.platform.startswith('sunos'):
# Somehow /dev/tty appears seekable on some BSDs
self.assertEqual(f.seekable(), False)
self.assertEqual(f.isatty(), True)
f.close()
finally:
os.unlink(TESTFN)
def testInvalidModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+", "rw", "rt"):
try:
f = self.FileIO(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
def testModeStrings(self):
# test that the mode attribute is correct for various mode strings
# given as init args
try:
for modes in [('w', 'wb'), ('wb', 'wb'), ('wb+', 'rb+'),
('w+b', 'rb+'), ('a', 'ab'), ('ab', 'ab'),
('ab+', 'ab+'), ('a+b', 'ab+'), ('r', 'rb'),
('rb', 'rb'), ('rb+', 'rb+'), ('r+b', 'rb+')]:
# read modes are last so that TESTFN will exist first
with self.FileIO(TESTFN, modes[0]) as f:
self.assertEqual(f.mode, modes[1])
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
def testUnicodeOpen(self):
# verify repr works for unicode too
f = self.FileIO(str(TESTFN), "w")
f.close()
os.unlink(TESTFN)
def testBytesOpen(self):
# Opening a bytes filename
try:
fn = TESTFN.encode("ascii")
except UnicodeEncodeError:
self.skipTest('could not encode %r to ascii' % TESTFN)
f = self.FileIO(fn, "w")
try:
f.write(b"abc")
f.close()
with open(TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abc")
finally:
os.unlink(TESTFN)
def testConstructorHandlesNULChars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(ValueError, self.FileIO, fn_with_NUL, 'w')
self.assertRaises(ValueError, self.FileIO, bytes(fn_with_NUL, 'ascii'), 'w')
def testInvalidFd(self):
self.assertRaises(ValueError, self.FileIO, -10)
self.assertRaises(OSError, self.FileIO, make_bad_fd())
if sys.platform == 'win32':
import msvcrt
self.assertRaises(OSError, msvcrt.get_osfhandle, make_bad_fd())
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = self.FileIO(TESTFN, bad_mode)
except ValueError as msg:
if msg.args[0] != 0:
s = str(msg)
if TESTFN in s or bad_mode not in s:
self.fail("bad error message for invalid mode: %s" % s)
# if msg.args[0] == 0, we're probably on Windows where there may be
# no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testTruncate(self):
f = self.FileIO(TESTFN, 'w')
f.write(bytes(bytearray(range(10))))
self.assertEqual(f.tell(), 10)
f.truncate(5)
self.assertEqual(f.tell(), 10)
self.assertEqual(f.seek(0, io.SEEK_END), 5)
f.truncate(15)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.seek(0, io.SEEK_END), 15)
f.close()
def testTruncateOnWindows(self):
def bug801631():
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
f = self.FileIO(TESTFN, 'w')
f.write(bytes(range(11)))
f.close()
f = self.FileIO(TESTFN,'r+')
data = f.read(5)
if data != bytes(range(5)):
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
try:
bug801631()
finally:
os.unlink(TESTFN)
def testAppend(self):
try:
f = open(TESTFN, 'wb')
f.write(b'spam')
f.close()
f = open(TESTFN, 'ab')
f.write(b'eggs')
f.close()
f = open(TESTFN, 'rb')
d = f.read()
f.close()
self.assertEqual(d, b'spameggs')
finally:
try:
os.unlink(TESTFN)
except:
pass
def testInvalidInit(self):
self.assertRaises(TypeError, self.FileIO, "1", 0, 0)
def testWarnings(self):
with check_warnings(quiet=True) as w:
self.assertEqual(w.warnings, [])
self.assertRaises(TypeError, self.FileIO, [])
self.assertEqual(w.warnings, [])
self.assertRaises(ValueError, self.FileIO, "/some/invalid/name", "rt")
self.assertEqual(w.warnings, [])
def testUnclosedFDOnException(self):
class MyException(Exception): pass
class MyFileIO(self.FileIO):
def __setattr__(self, name, value):
if name == "name":
raise MyException("blocked setting name")
return super(MyFileIO, self).__setattr__(name, value)
fd = os.open(__file__, os.O_RDONLY)
self.assertRaises(MyException, MyFileIO, fd)
os.close(fd) # should not raise OSError(EBADF)
class COtherFileTests(OtherFileTests, unittest.TestCase):
FileIO = _io.FileIO
modulename = '_io'
@cpython_only
def testInvalidFd_overflow(self):
# Issue 15989
import _testcapi
self.assertRaises(TypeError, self.FileIO, _testcapi.INT_MAX + 1)
self.assertRaises(TypeError, self.FileIO, _testcapi.INT_MIN - 1)
class PyOtherFileTests(OtherFileTests, unittest.TestCase):
FileIO = _pyio.FileIO
modulename = '_pyio'
def test_main():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
try:
run_unittest(CAutoFileTests, PyAutoFileTests,
COtherFileTests, PyOtherFileTests)
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
test_main()
| {
"repo_name": "MalloyPower/parsing-python",
"path": "front-end/testsuite-python-lib/Python-3.6.0/Lib/test/test_fileio.py",
"copies": "5",
"size": "18759",
"license": "mit",
"hash": 1826191505021484000,
"line_mean": 31.8528896673,
"line_max": 88,
"alpha_frac": 0.544432006,
"autogenerated": false,
"ratio": 3.7973684210526315,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007749904818278237,
"num_lines": 571
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.