text stringlengths 0 1.05M | meta dict |
|---|---|
"""Attempts to figure out what version of things we're using in
playdoh-lib. This is made exceedingly difficult because of the
following things:
1. We install with pip to lib/python/, but nix the .egg-info files so
we have no clue what we installed unless the package also has the
version information tucked away somewhere.
This is dumb--we should stop doing this.
2. Some projects squirrel the version away in a place that's difficult
to pull out.
3. We have a few projects that apparently don't like doing version
releases.
Also interesting and vaguely related is that we're not including the
license this code that we're distributing is distributed under. That's
a huge license fail.
"""
import importlib
import logging
import os
import re
import site
import sys
import yaml
from victor import __version__
def fix_sys_path(cfg):
"""Adds sitedirs and moves packages to the beginning of sys.path
Uses "sitedirs" key in config which is a list of paths.
"""
if 'sitedirs' not in cfg:
return
prev_sys_path = list(sys.path)
for sitedir in cfg['sitedirs']:
site.addsitedir(sitedir)
new_sys_path = []
for item in list(sys.path):
if item not in prev_sys_path:
new_sys_path.append(item)
sys.path.remove(item)
sys.path[:0] = new_sys_path
class NoVersion(Exception):
pass
def get_version_from_module(module_name):
mod = importlib.import_module(module_name)
# Try some possible version attributes
for version_string in ('__version__', 'VERSION', 'majorVersionId', 'ver'):
if hasattr(mod, version_string):
return getattr(mod, version_string)
raise NoVersion('{0}: {1}'.format(module_name, dir(mod)))
def get_version_from_requirement(line):
version_re = re.compile(
'^'
'([^=><]+)'
'(?:\\s*[=><]*\\s*([^=><]*?))?'
'$')
match = version_re.match(line)
return match.groups(0)
def get_version(module_name, verbosity=0):
if verbosity:
print '>>>', module_name
# Try importing various possible version modules
for version_module in (module_name,
'.'.join([module_name, '__version__']),
):
try:
return get_version_from_module(version_module)
except (NoVersion, ImportError):
logging.exception(version_module)
# There are a couple of packages that have setup.py **in** the
# source which is beyond bizarre, but whatevs.
try:
mod = importlib.import_module(module_name)
fp = open(os.path.join(os.path.dirname(mod.__file__), 'setup.py'), 'r')
for line in fp.readlines():
line = line.strip()
if line.startswith('version'):
line = line.split('=')
if len(line) > 1:
line = line[1].strip().strip('"\',')
return line
except (ImportError, IOError):
logging.exception(module_name)
raise NoVersion('{0}'.format(module_name))
def get_blacklist(cfg):
"""Returns list of blacklisted items
An item is in the blacklist because victor can't divine version
information for it. This could be because victor sucks or that the
item has no version information available. We keep it in the
blacklist because then we have a record that it's problematic.
Uses "sitedirs" key in config which is a list of paths.
"""
# This looks a little weird, but handles the None case, too.
return cfg.get('blacklist') or []
def load_cfg(cfg_fn):
return yaml.load(open(cfg_fn, 'rb'))
def cmdline_handler(scriptname, argv):
print '{0}: {1}'.format(scriptname, __version__)
logging.basicConfig(level=logging.CRITICAL)
cfg = load_cfg('victor.yaml')
logging.debug('Fixing sys path...')
fix_sys_path(cfg)
logging.debug('Getting blacklist...')
blacklist = get_blacklist(cfg)
logging.debug('Going through packagelist...')
package_to_version = {}
for mem in cfg.get('packagelist', []):
if mem.endswith(os.sep) or os.path.isdir(mem):
for mod in os.listdir(mem):
name, extension = os.path.splitext(mod)
if extension not in ('.py', ''):
logging.debug('skipping {0}: wrong file type'.format(mod))
continue
if mod in blacklist:
logging.debug('skipping {0}: in blacklist'.format(mod))
continue
if mod.endswith('.py'):
mod = mod[:-3]
try:
version = get_version(mod)
package_to_version[mod] = version
except NoVersion:
package_to_version[mod] = 'NO VERSION'
elif mem.startswith('REQ '):
mem = mem[4:].strip()
fp = open(mem, 'r')
for mod in fp.readlines():
mod = mod.strip()
if not mod or mod.startswith('#'):
continue
mod, version = get_version_from_requirement(mod)
if mod in blacklist:
logging.debug('skipping {0}: in blacklist'.format(mod))
continue
if mod.endswith('.py'):
mod = mod[:-3]
try:
version = get_version(mod)
package_to_version[mod] = version
except NoVersion:
package_to_version[mod] = 'NO VERSION'
else:
if mem in blacklist:
logging.debug('skipping {0}: in blacklist'.format(mem))
continue
try:
version = get_version(mem)
package_to_version[mem] = version
except NoVersion:
package_to_version[mem] = 'NO VERSION'
print ''
print 'Versions:'
if package_to_version:
for key, val in sorted(package_to_version.items()):
print ' {0}: {1}'.format(key, val)
else:
print ' <None>'
print ''
print 'These have no discernable version:'
if blacklist:
for item in blacklist:
print ' {0}'.format(item)
else:
print ' <None>'
"""
bleach: 1.1.x (c381a)
commonware: 0.4.2 (b5544)
django-appconf: 0.5 (d7ff3)
django-compressor: 1.2a2 (90966)
django-cronjobs: (cfda8)
django-mobility: (644e0)
django-mozilla-product-details: (5a59a)
django-multidb-router: (7e608)
django-nose: 1.0 (83c78)
django-session-csrf: (f00ad)
django-sha2: (3ba2b)
funfactory: (faca9)
jingo: (1dc0e)
jingo-minify: (d2ff3)
nuggets: (ce506)
schematic: (e7499)
test-utils: (3c221)
tower: (6112e)
"""
| {
"repo_name": "willkg/victor",
"path": "victor/cmdline.py",
"copies": "1",
"size": "6760",
"license": "bsd-3-clause",
"hash": -5004679920419993000,
"line_mean": 27.5232067511,
"line_max": 79,
"alpha_frac": 0.5806213018,
"autogenerated": false,
"ratio": 3.86949055523755,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.495011185703755,
"avg_score": null,
"num_lines": null
} |
# attempts to locate Space Engineers and git, it will try common paths if build.ini is incorrect
if (not os.path.exists(GitExe)):
GitExe = r"C:\Program Files (x86)\Git\bin\git.exe"
if (os.path.exists(GitExe)):
logging.info("Git in Program Files (x86)")
else:
GitExe = r"C:\Program Files\Git\bin\git.exe"
if (os.path.exists(GitExe)):
logging.info("Git in Program Files")
else:
GitHubPath = os.getenv('LOCALAPPDATA') + "\\GitHub\\"
if (os.path.exists(GitHubPath)):
for f in os.listdir(GitHubPath):
if (f.startswith('PortableGit_')):
logging.info("Git in " + str(f))
GitExe = GitHubPath + str(f) + "\\cmd\\git.exe"
break
try:
if (not os.path.exists(SpaceEngineers)):
paths = [r"C:\Program Files (x86)\Steam\steamapps\common\SpaceEngineers",
r"C:\Program Files\Steam\steamapps\common\SpaceEngineers",
r"C:\Games\Steam\steamapps\common\SpaceEngineers"]
for SpaceEngineers in paths:
if (os.path.exists(SpaceEngineers)):
logging.info("Space Engineers located at " + SpaceEngineers)
break
except NameError:
# SpaceEngineers variable was not defined, no need to search for Space Engineers
pass
if os.path.exists(GitExe):
proc = subprocess.Popen([GitExe, "describe", "--always", "--dirty", "--tags"], stdout=subprocess.PIPE)
gitCommit = str(proc.stdout.read())
gitCommit = gitCommit[2:len(gitCommit)-3]
else:
path = os.path.dirname(os.path.realpath(sys.argv[0]))
for c in range(0, 100):
pathToDotGit = path + r"\.git"
if (os.path.exists(pathToDotGit)):
logging.info("Git folder located at " + pathToDotGit)
break
upOne = os.path.dirname(path)
if (path == upOne):
logging.error("Hit root directory without finding .git folder")
sys.exit()
path = upOne
pathToDotGit = pathToDotGit + "\\"
path = pathToDotGit + 'HEAD'
file = open(path, 'r')
text = file.read()
file.close()
if (text.startswith('ref: ')):
path = pathToDotGit + text[5:len(text) - 1]
if (os.path.exists(path)):
file = open (path, 'r')
gitCommit = file.read()[:7]
file.close()
else:
logging.error("Does not exist: " + path)
else:
gitCommit = text[:7]
logging.info("Commit: " + gitCommit) | {
"repo_name": "Rynchodon/ARMS",
"path": ".build/find-git.py",
"copies": "2",
"size": "2175",
"license": "cc0-1.0",
"hash": 3104833589695850500,
"line_mean": 32.4769230769,
"line_max": 103,
"alpha_frac": 0.672183908,
"autogenerated": false,
"ratio": 2.8173575129533677,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4489541420953368,
"avg_score": null,
"num_lines": null
} |
""" Attempts to simulate a system consisting of a tone being received by
multiple phase-shifted and amplitude scaled sensors with uncorrelated noise
at each
"""
import numpy as np
import logging
import itertools
from directionFinder_backend.signal_generator_correlation import SignalGeneratorCorrelation
class SignalGenerator:
def __init__(self, num_channels=4, tone_freq=0.2345, snr=0.1,
phase_shifts=np.zeros(4), amplitude_scales=np.ones(4),
adc_bits=8, samples=2048,
fft_bits = 18,
impulse_length = 1000, impulse_snr = 1,
impulse_offsets = np.zeros(4),
logger = logging.getLogger(__name__)):
""" Creates a signal generator instance
Parameters:
num_channels -- how many independant sensors should be simulated.
tone_freq -- the frequency of the signal as a fraction of sample frequency.
0.5 = nyquist. 1 = alias for 0
SNR -- signal to noise ratio in linear power terms. Must be <= 1.
phase_shifts -- array of phase shifts to apply to each channel in radians
amplitude_scales -- array of amplitude scalings to apply to each channel
#TODO: Is this a voltage or power scale???
bits -- when quantising time domain signal, how many bits to quantise to.
samples -- when generating vectors, how many samples per channel.
fft_bits -- when quantising output of FFT, how many bits to quantise to.
"""
self.logger = logger
self.num_channels = num_channels
self.tone_freq = tone_freq
self.snr = snr
self.phase_shifts = phase_shifts
self.amplitude_scales = amplitude_scales
self.adc_bits = adc_bits
self.samples = samples
self.fft_bits = fft_bits
self.impulse_length = impulse_length
self.impulse_snr = impulse_snr
assert(snr <= 1)
assert(phase_shifts.size == num_channels)
assert(amplitude_scales.size == num_channels)
self.noise_stddev = 1.0/3
self.cross_combinations = list(itertools.combinations(range(num_channels), 2)) # [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]
self.auto_combinations = [(0, 0)]
self.frequency_correlations = {}
for comb in self.cross_combinations:
self.frequency_correlations[comb] = SignalGeneratorCorrelation(
comb,
0,
0.5,
self.logger.getChild("{a}x{b}".format(a = comb[0], b = comb[1])))
def fetch_crosses(self):
spectrums = self.generate_quantised_spectrums()
for a, b in self.cross_combinations:
cross = spectrums[a] * np.conj(spectrums[b])
self.frequency_correlations[(a, b)].update(cross)
def set_impulse_len(self, length):
self.impulse_length = length
def set_impulse_snr(self, snr):
self.impulse_snr = snr
def impulse_arm(self):
pass
def impulse_fetch(self):
pre_delay = 256 * 4
impulse = np.concatenate((
np.zeros(pre_delay),
np.random.normal(loc = 0,
scale = self.noise_stddev * 127 * self.impulse_snr,
size = self.impulse_length),
np.zeros(pre_delay)
))
impulse.clip(-128, 127)
self.time_domain_signals = np.ndarray((self.num_channels,
len(impulse)),
dtype = np.int8)
for chan in range(self.num_channels):
noise = np.random.normal(loc = 0,
scale = self.noise_stddev * 127,
size = len(impulse))
noise = noise.clip(-128, 127)
self.time_domain_signals[chan] = noise
self.time_domain_signals[chan] += impulse
self.time_domain_signals[chan].round()
def generate(self):
signals = np.ndarray((self.num_channels, self.samples))
for channel in range(self.num_channels):
signals[channel] = self.generate_tone(channel)
signals[channel] += self.generate_noise()
return signals
def generate_tone(self, channel):
#x = np.linspace(start = self.phase_shifts[channel],
# stop = self.phase_shifts[channel] + (2*np.pi * self.tone_freq * self.samples),
# num = self.samples,
# endpoint=False)
x = np.arange(0, self.samples, 1)
tone = np.sin((2*np.pi * self.tone_freq * x) + self.phase_shifts[channel])
# For a sine wave: P = A^2 / 2. This must be divided by N as the FFT acts
# to multiply the power of a bin by N.
# We want the power in the FFT bin to be P.
power = ((self.noise_stddev**2) * self.snr) / self.samples
# but multiplied by 2 because the power is split into 2 bins: positive freq and negative freq
power = power * 2
amplitude = np.sqrt(2*power)
return (self.amplitude_scales[channel] * amplitude) * tone
def generate_noise(self):
return np.random.normal(0, self.noise_stddev, self.samples)
def quantise(self, signal):
""" Quantising process is:
- we assume the input signal is [-1 ; +1]
- scale up to the number of bits we have.
- round.
- clip
- scale back down to be [-1 ; +1]
"""
scale_factor = float(1 << (self.adc_bits-1)) # -1 because positive and negative half
clip_min = -scale_factor
clip_max = scale_factor
signal *= scale_factor
signal = signal.round()
signal /= scale_factor
signal = signal.clip(-1, 1)
return signal
def generate_quantised(self):
signals = self.generate()
signals = self.quantise(signals)
return signals
def quantise_spectrum(self, signal):
# explanation:
# assuming power distributed evently over spectrum, the amplitude of
# a bin is sqrt(variance * N). We want to normalise this to 1.
# However, power is not evenly distributed, so we give a factor of 4
# hear room
normalise_factor = (np.sqrt(self.samples * (self.noise_stddev**2))) * 3
signal /= normalise_factor # all bin's power should now be below 1
upscale_factor = float(1 << (self.fft_bits-1))
signal *= upscale_factor
signal = signal.round()
signal /= upscale_factor
# would like to clip here, but clipping complex will screw with zero the imaginary part if real is out of range
#signal = signal.clip(-1, 1)
return signal
def generate_quantised_spectrums(self):
signals = self.generate_quantised()
spectrums = np.fft.rfft(signals)
#spectrums = self.quantise_spectrum(spectrums)
return spectrums
| {
"repo_name": "jgowans/directionFinder_backend",
"path": "directionFinder_backend/signal_generator.py",
"copies": "1",
"size": "6980",
"license": "mit",
"hash": -388002788820253760,
"line_mean": 41.303030303,
"line_max": 138,
"alpha_frac": 0.5815186246,
"autogenerated": false,
"ratio": 3.9346110484780157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5016129673078016,
"avg_score": null,
"num_lines": null
} |
"""Attempts to spellcheck and correct failed cd commands"""
import os
from difflib import get_close_matches
from thefuck.specific.sudo import sudo_support
from thefuck.rules import cd_mkdir
from thefuck.utils import for_app
__author__ = "mmussomele"
MAX_ALLOWED_DIFF = 0.6
def _get_sub_dirs(parent):
"""Returns a list of the child directories of the given parent directory"""
return [child for child in os.listdir(parent) if os.path.isdir(os.path.join(parent, child))]
@sudo_support
@for_app('cd')
def match(command):
"""Match function copied from cd_mkdir.py"""
return (command.script.startswith('cd ')
and ('no such file or directory' in command.stderr.lower()
or 'cd: can\'t cd to' in command.stderr.lower()))
@sudo_support
def get_new_command(command):
"""
Attempt to rebuild the path string by spellchecking the directories.
If it fails (i.e. no directories are a close enough match), then it
defaults to the rules of cd_mkdir.
Change sensitivity by changing MAX_ALLOWED_DIFF. Default value is 0.6
"""
dest = command.script_parts[1].split(os.sep)
if dest[-1] == '':
dest = dest[:-1]
cwd = os.getcwd()
for directory in dest:
if directory == ".":
continue
elif directory == "..":
cwd = os.path.split(cwd)[0]
continue
best_matches = get_close_matches(directory, _get_sub_dirs(cwd), cutoff=MAX_ALLOWED_DIFF)
if best_matches:
cwd = os.path.join(cwd, best_matches[0])
else:
return cd_mkdir.get_new_command(command)
return 'cd "{0}"'.format(cwd)
enabled_by_default = True
| {
"repo_name": "levythu/thefuck",
"path": "thefuck/rules/cd_correction.py",
"copies": "1",
"size": "1678",
"license": "mit",
"hash": 4810766269784511000,
"line_mean": 30.0740740741,
"line_max": 96,
"alpha_frac": 0.6442193087,
"autogenerated": false,
"ratio": 3.6086021505376342,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4752821459237634,
"avg_score": null,
"num_lines": null
} |
"""Attempts to spellcheck and correct failed cd commands"""
import os
from difflib import get_close_matches
from thefuck.utils import sudo_support
from thefuck.rules import cd_mkdir
__author__ = "mmussomele"
MAX_ALLOWED_DIFF = 0.6
def _get_sub_dirs(parent):
"""Returns a list of the child directories of the given parent directory"""
return [child for child in os.listdir(parent) if os.path.isdir(os.path.join(parent, child))]
@sudo_support
def match(command, settings):
"""Match function copied from cd_mkdir.py"""
return (command.script.startswith('cd ')
and ('no such file or directory' in command.stderr.lower()
or 'cd: can\'t cd to' in command.stderr.lower()))
@sudo_support
def get_new_command(command, settings):
"""
Attempt to rebuild the path string by spellchecking the directories.
If it fails (i.e. no directories are a close enough match), then it
defaults to the rules of cd_mkdir.
Change sensitivity by changing MAX_ALLOWED_DIFF. Default value is 0.6
"""
dest = command.script.split()[1].split(os.sep)
if dest[-1] == '':
dest = dest[:-1]
cwd = os.getcwd()
for directory in dest:
if directory == ".":
continue
elif directory == "..":
cwd = os.path.split(cwd)[0]
continue
best_matches = get_close_matches(directory, _get_sub_dirs(cwd), cutoff=MAX_ALLOWED_DIFF)
if best_matches:
cwd = os.path.join(cwd, best_matches[0])
else:
return cd_mkdir.get_new_command(command, settings)
return 'cd "{0}"'.format(cwd)
enabled_by_default = True
| {
"repo_name": "zhangzhishan/thefuck",
"path": "thefuck/rules/cd_correction.py",
"copies": "4",
"size": "1653",
"license": "mit",
"hash": 3175311209669262300,
"line_mean": 30.7884615385,
"line_max": 96,
"alpha_frac": 0.6424682396,
"autogenerated": false,
"ratio": 3.6733333333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6315801572933334,
"avg_score": null,
"num_lines": null
} |
"""Attempts to spellcheck and correct failed cd commands"""
import os
import six
from difflib import get_close_matches
from thefuck.specific.sudo import sudo_support
from thefuck.rules import cd_mkdir
from thefuck.utils import for_app
__author__ = "mmussomele"
MAX_ALLOWED_DIFF = 0.6
def _get_sub_dirs(parent):
"""Returns a list of the child directories of the given parent directory"""
return [child for child in os.listdir(parent) if os.path.isdir(os.path.join(parent, child))]
@sudo_support
@for_app('cd')
def match(command):
"""Match function copied from cd_mkdir.py"""
return (command.script.startswith('cd ')
and ('no such file or directory' in command.stderr.lower()
or 'cd: can\'t cd to' in command.stderr.lower()))
@sudo_support
def get_new_command(command):
"""
Attempt to rebuild the path string by spellchecking the directories.
If it fails (i.e. no directories are a close enough match), then it
defaults to the rules of cd_mkdir.
Change sensitivity by changing MAX_ALLOWED_DIFF. Default value is 0.6
"""
dest = command.script_parts[1].split(os.sep)
if dest[-1] == '':
dest = dest[:-1]
if six.PY2:
cwd = os.getcwdu()
else:
cwd = os.getcwd()
for directory in dest:
if directory == ".":
continue
elif directory == "..":
cwd = os.path.split(cwd)[0]
continue
best_matches = get_close_matches(directory, _get_sub_dirs(cwd), cutoff=MAX_ALLOWED_DIFF)
if best_matches:
cwd = os.path.join(cwd, best_matches[0])
else:
return cd_mkdir.get_new_command(command)
return u'cd "{0}"'.format(cwd)
enabled_by_default = True
| {
"repo_name": "mlk/thefuck",
"path": "thefuck/rules/cd_correction.py",
"copies": "4",
"size": "1747",
"license": "mit",
"hash": -1179209368808488000,
"line_mean": 29.1206896552,
"line_max": 96,
"alpha_frac": 0.6382369777,
"autogenerated": false,
"ratio": 3.5799180327868854,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6218155010486885,
"avg_score": null,
"num_lines": null
} |
"""Attempts to spellcheck and correct failed cd commands"""
import os
import six
from thefuck.specific.sudo import sudo_support
from thefuck.rules import cd_mkdir
from thefuck.utils import for_app, get_close_matches
__author__ = "mmussomele"
MAX_ALLOWED_DIFF = 0.6
def _get_sub_dirs(parent):
"""Returns a list of the child directories of the given parent directory"""
return [child for child in os.listdir(parent) if os.path.isdir(os.path.join(parent, child))]
@sudo_support
@for_app('cd')
def match(command):
"""Match function copied from cd_mkdir.py"""
return (
command.script.startswith('cd ') and any((
'no such file or directory' in command.output.lower(),
'cd: can\'t cd to' in command.output.lower(),
'does not exist' in command.output.lower()
)))
@sudo_support
def get_new_command(command):
"""
Attempt to rebuild the path string by spellchecking the directories.
If it fails (i.e. no directories are a close enough match), then it
defaults to the rules of cd_mkdir.
Change sensitivity by changing MAX_ALLOWED_DIFF. Default value is 0.6
"""
dest = command.script_parts[1].split(os.sep)
if dest[-1] == '':
dest = dest[:-1]
if dest[0] == '':
cwd = os.sep
dest = dest[1:]
elif six.PY2:
cwd = os.getcwdu()
else:
cwd = os.getcwd()
for directory in dest:
if directory == ".":
continue
elif directory == "..":
cwd = os.path.split(cwd)[0]
continue
best_matches = get_close_matches(directory, _get_sub_dirs(cwd), cutoff=MAX_ALLOWED_DIFF)
if best_matches:
cwd = os.path.join(cwd, best_matches[0])
else:
return cd_mkdir.get_new_command(command)
return u'cd "{0}"'.format(cwd)
| {
"repo_name": "nvbn/thefuck",
"path": "thefuck/rules/cd_correction.py",
"copies": "3",
"size": "1843",
"license": "mit",
"hash": -1145849367275746400,
"line_mean": 29.2131147541,
"line_max": 96,
"alpha_frac": 0.6169289202,
"autogenerated": false,
"ratio": 3.585603112840467,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003380091262464086,
"num_lines": 61
} |
""" Attempt to clone https://github.com/michaeldv/awesome_print
Usage:
from awesome_print import ap
ap(object)
"""
import __builtin__
from types import *
mode = 'ansi'
def ap(*args):
for arg in args:
print format(arg)
def indent(level):
return ' ' * level
def format(obj, level = 0):
type = __builtin__.type(obj)
if type is NoneType:
return red('None')
if type is TypeType:
pass
if type is BooleanType:
return green(str(obj))
if type in [StringType, UnicodeType]:
return yellow(str(obj))
if type in [IntType, LongType, FloatType, ComplexType]:
return bold_blue(str(obj))
if type in (TupleType, ListType):
open, close = ('(', ')') if type is TupleType else ('[', ']')
if len(obj) is 0:
return open + close
s = []
i = 0
width = str(len(str(len(obj))))
for e in obj:
s.append(('%s[%' + width + 'd] %s') % \
(indent(level + 1), i, format(e, level + 1)))
i+=1
return open + "\n" + \
",\n".join(s) + \
"\n" + indent(level) + close
if type is DictType:
if len(obj) is 0:
return '{}'
width = str(max([flen(format(k)) for k in obj.keys()]))
s = []
for k in obj.keys():
v = obj[k]
s.append(('%s%' + width + 's: %s') % \
(indent(level + 1), format(k), format(v, level + 1)))
return '{' + "\n" + \
",\n".join(s) + \
"\n" + indent(level) + '}'
if type is LambdaType:
return str(obj)
return str(obj)
def flen(str):
return max(len(s) for s in str.split("\n"))
def black(str):
return color(str, '30')
def dark_gray(str):
return bold(str, '30')
def red(str):
return color(str, '31')
def bold_red(str):
return bold(str, '31')
def green(str):
return color(str, '32')
def green(str):
return bold(str, '32')
def yellow(str):
return color(str, '33')
def bold_yellow(str):
return bold(str, '33')
def blue(str):
return color(str, '34')
def bold_blue(str):
return bold(str, '34')
def purple(str):
return color(str, '35')
def bold_purple(str):
return bold(str, '35')
def cyan(str):
return color(str, '36')
def bold_cyan(str):
return bold(str, '36')
def light_gray(str):
return color(str, '37')
def white(str):
return bold(str, '37')
def color(str, color, intensity='0'):
if mode == 'plain':
return str
return '\033['+intensity+';'+color+'m'+str+'\033[0m'
def bold(str, col):
if mode == 'plain':
return str
return color(str, col, '1')
| {
"repo_name": "smazhara/awesome_print",
"path": "awesome_print/awesome_print.py",
"copies": "1",
"size": "2738",
"license": "mit",
"hash": 4212010509238425600,
"line_mean": 19.5864661654,
"line_max": 73,
"alpha_frac": 0.5178962747,
"autogenerated": false,
"ratio": 3.183720930232558,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4201617204932558,
"avg_score": null,
"num_lines": null
} |
"""Attempt to determine the current user's "system" directories"""
try:
## raise ImportError
from win32com.shell import shell, shellcon
except ImportError:
shell = None
try:
## raise ImportError
import _winreg
except ImportError:
_winreg = None
import os, sys
## The registry keys where the SHGetFolderPath values appear to be stored
r"HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
r"HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
def _winreg_getShellFolder( name ):
"""Get a shell folder by string name from the registry"""
k = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
try:
# should check that it's valid? How?
return _winreg.QueryValueEx( k, name )[0]
finally:
_winreg.CloseKey( k )
def shell_getShellFolder( type ):
"""Get a shell folder by shell-constant from COM interface"""
return shell.SHGetFolderPath(
0,# null hwnd
type, # the (roaming) appdata path
0,# null access token (no impersonation)
0 # want current value, shellcon.SHGFP_TYPE_CURRENT isn't available, this seems to work
)
def appdatadirectory( ):
"""Attempt to retrieve the current user's app-data directory
This is the location where application-specific
files should be stored. On *nix systems, this will
be the HOME directory. On Win32 systems, it will be
the "Application Data" directory. Note that for
Win32 systems it is normal to create a sub-directory
for storing data in the Application Data directory.
XXX should os.environ['home'] override Win32 queries or
vice-versa?
"""
if shell:
# on Win32 and have Win32all extensions, best-case
return shell_getShellFolder(shellcon.CSIDL_APPDATA)
if _winreg:
# on Win32, but no Win32 shell com available, this uses
# a direct registry access, likely to fail on Win98/Me
return _winreg_getShellFolder( 'AppData' )
# okay, what if for some reason _winreg is missing? would we want to allow ctypes?
## default case, look for name in environ...
for name in ['appdata', 'home']:
if name in os.environ:
return os.environ[name]
# well, someone's being naughty, see if we can get ~ to expand to a directory...
possible = os.path.abspath(os.path.expanduser( '~/' ))
if os.path.exists( possible ):
return possible
raise OSError( """Unable to determine user's application-data directory""" )
if __name__ == "__main__":
print 'AppData', appdatadirectory()
| {
"repo_name": "alexus37/AugmentedRealityChess",
"path": "pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGLContext/browser/homedirectory.py",
"copies": "2",
"size": "2711",
"license": "mit",
"hash": 6004529105698446000,
"line_mean": 34.6842105263,
"line_max": 95,
"alpha_frac": 0.6790852084,
"autogenerated": false,
"ratio": 3.9692532942898975,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5648338502689897,
"avg_score": null,
"num_lines": null
} |
# Attempt to find faces and draw rectangles around them using openCV
# Dan Kolbman
import sys, cv2
def detect( path ):
img = cv2.imread( path )
face_cascade = cv2.CascadeClassifier( 'haarcascade_frontalface_default.xml' )
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
try:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
except cv2.error:
# Some images have problems being converted to grayscale
print 'Couldn\'t convert to grayscale'
return False
faces = face_cascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(30, 30),
flags = cv2.cv.CV_HAAR_SCALE_IMAGE
)
for (x,y,w,h) in faces:
# Draws a square around the face
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray, 1.1, 5)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,0,255),2)
print 'Found', len(faces),'faces'
# Display the image
cv2.imshow('img',img)
# Wait for user to press a key
k = cv2.waitKey()
# Close everything
cv2.destroyAllWindows()
################################################################################
if(len(sys.argv) < 2):
print 'Usage: python face_detect.py <image.jpg>'
else:
detect( sys.argv[1] )
| {
"repo_name": "dankolbman/CleverTind",
"path": "face_detect.py",
"copies": "1",
"size": "1355",
"license": "mit",
"hash": 1808044439759640000,
"line_mean": 26.6530612245,
"line_max": 80,
"alpha_frac": 0.6184501845,
"autogenerated": false,
"ratio": 2.913978494623656,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4032428679123656,
"avg_score": null,
"num_lines": null
} |
"""Attempt to generate templates for module reference with Sphinx
XXX - we exclude extension modules
To include extension modules, first identify them as valid in the
``_uri2path`` method, then handle them in the ``_parse_module`` script.
We get functions and classes by parsing the text of .py files.
Alternatively we could import the modules for discovery, and we'd have
to do that for extension modules. This would involve changing the
``_parse_module`` method to work via import and introspection, and
might involve changing ``discover_modules`` (which determines which
files are modules, and therefore which module URIs will be passed to
``_parse_module``).
NOTE: this is a modified version of a script originally shipped with the
PyMVPA project, which we've adapted for NIPY use. PyMVPA is an MIT-licensed
project."""
from __future__ import print_function
# Stdlib imports
import os
import re
# Functions and classes
class ApiDocWriter(object):
''' Class for automatic detection and parsing of API docs
to Sphinx-parsable reST format'''
# only separating first two levels
rst_section_levels = ['*', '=', '-', '~', '^']
def __init__(self,
package_name,
rst_extension='.rst',
package_skip_patterns=None,
module_skip_patterns=None,
):
''' Initialize package for parsing
Parameters
----------
package_name : string
Name of the top-level package. *package_name* must be the
name of an importable package
rst_extension : string, optional
Extension for reST files, default '.rst'
package_skip_patterns : None or sequence of {strings, regexps}
Sequence of strings giving URIs of packages to be excluded
Operates on the package path, starting at (including) the
first dot in the package path, after *package_name* - so,
if *package_name* is ``sphinx``, then ``sphinx.util`` will
result in ``.util`` being passed for earching by these
regexps. If is None, gives default. Default is:
['\.tests$']
module_skip_patterns : None or sequence
Sequence of strings giving URIs of modules to be excluded
Operates on the module name including preceding URI path,
back to the first dot after *package_name*. For example
``sphinx.util.console`` results in the string to search of
``.util.console``
If is None, gives default. Default is:
['\.setup$', '\._']
'''
if package_skip_patterns is None:
package_skip_patterns = ['\\.tests$']
if module_skip_patterns is None:
module_skip_patterns = ['\\.setup$', '\\._']
self.package_name = package_name
self.rst_extension = rst_extension
self.package_skip_patterns = package_skip_patterns
self.module_skip_patterns = module_skip_patterns
def get_package_name(self):
return self._package_name
def set_package_name(self, package_name):
''' Set package_name
>>> docwriter = ApiDocWriter('sphinx')
>>> import sphinx
>>> docwriter.root_path == sphinx.__path__[0]
True
>>> docwriter.package_name = 'docutils'
>>> import docutils
>>> docwriter.root_path == docutils.__path__[0]
True
'''
# It's also possible to imagine caching the module parsing here
self._package_name = package_name
self.root_module = __import__(package_name)
self.root_path = self.root_module.__path__[0]
self.written_modules = None
package_name = property(get_package_name, set_package_name, None,
'get/set package_name')
def _get_object_name(self, line):
''' Get second token in line
>>> docwriter = ApiDocWriter('sphinx')
>>> docwriter._get_object_name(" def func(): ")
'func'
>>> docwriter._get_object_name(" class Klass(object): ")
'Klass'
>>> docwriter._get_object_name(" class Klass: ")
'Klass'
'''
name = line.split()[1].split('(')[0].strip()
# in case we have classes which are not derived from object
# ie. old style classes
return name.rstrip(':')
def _uri2path(self, uri):
''' Convert uri to absolute filepath
Parameters
----------
uri : string
URI of python module to return path for
Returns
-------
path : None or string
Returns None if there is no valid path for this URI
Otherwise returns absolute file system path for URI
Examples
--------
>>> docwriter = ApiDocWriter('sphinx')
>>> import sphinx
>>> modpath = sphinx.__path__[0]
>>> res = docwriter._uri2path('sphinx.builder')
>>> res == os.path.join(modpath, 'builder.py')
True
>>> res = docwriter._uri2path('sphinx')
>>> res == os.path.join(modpath, '__init__.py')
True
>>> docwriter._uri2path('sphinx.does_not_exist')
'''
if uri == self.package_name:
return os.path.join(self.root_path, '__init__.py')
path = uri.replace('.', os.path.sep)
path = path.replace(self.package_name + os.path.sep, '')
path = os.path.join(self.root_path, path)
# XXX maybe check for extensions as well?
if os.path.exists(path + '.py'): # file
path += '.py'
elif os.path.exists(os.path.join(path, '__init__.py')):
path = os.path.join(path, '__init__.py')
else:
return None
return path
def _path2uri(self, dirpath):
''' Convert directory path to uri '''
relpath = dirpath.replace(self.root_path, self.package_name)
if relpath.startswith(os.path.sep):
relpath = relpath[1:]
return relpath.replace(os.path.sep, '.')
def _parse_module(self, uri):
''' Parse module defined in *uri* '''
filename = self._uri2path(uri)
if filename is None:
# nothing that we could handle here.
return ([],[])
f = open(filename, 'rt')
functions, classes = self._parse_lines(f)
f.close()
return functions, classes
def _parse_lines(self, linesource):
''' Parse lines of text for functions and classes '''
functions = []
classes = []
for line in linesource:
if line.startswith('def ') and line.count('('):
# exclude private stuff
name = self._get_object_name(line)
if not name.startswith('_'):
functions.append(name)
elif line.startswith('class '):
# exclude private stuff
name = self._get_object_name(line)
if not name.startswith('_'):
classes.append(name)
else:
pass
functions.sort()
classes.sort()
return functions, classes
def generate_api_doc(self, uri):
'''Make autodoc documentation template string for a module
Parameters
----------
uri : string
python location of module - e.g 'sphinx.builder'
Returns
-------
S : string
Contents of API doc
'''
# get the names of all classes and functions
functions, classes = self._parse_module(uri)
if not len(functions) and not len(classes):
print('WARNING: Empty -',uri) # dbg
return ''
# Make a shorter version of the uri that omits the package name for
# titles
uri_short = re.sub(r'^%s\.' % self.package_name,'',uri)
ad = '.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n'
chap_title = uri_short
ad += (chap_title+'\n'+ self.rst_section_levels[1] * len(chap_title)
+ '\n\n')
# Set the chapter title to read 'module' for all modules except for the
# main packages
if '.' in uri:
title = 'Module: :mod:`' + uri_short + '`'
else:
title = ':mod:`' + uri_short + '`'
ad += title + '\n' + self.rst_section_levels[2] * len(title)
if len(classes):
ad += '\nInheritance diagram for ``%s``:\n\n' % uri
ad += '.. inheritance-diagram:: %s \n' % uri
ad += ' :parts: 3\n'
ad += '\n.. automodule:: ' + uri + '\n'
ad += '\n.. currentmodule:: ' + uri + '\n'
multi_class = len(classes) > 1
multi_fx = len(functions) > 1
if multi_class:
ad += '\n' + 'Classes' + '\n' + \
self.rst_section_levels[2] * 7 + '\n'
elif len(classes) and multi_fx:
ad += '\n' + 'Class' + '\n' + \
self.rst_section_levels[2] * 5 + '\n'
for c in classes:
ad += '\n:class:`' + c + '`\n' \
+ self.rst_section_levels[multi_class + 2 ] * \
(len(c)+9) + '\n\n'
ad += '\n.. autoclass:: ' + c + '\n'
# must NOT exclude from index to keep cross-refs working
ad += ' :members:\n' \
' :undoc-members:\n' \
' :show-inheritance:\n' \
' :inherited-members:\n' \
'\n' \
' .. automethod:: __init__\n'
if multi_fx:
ad += '\n' + 'Functions' + '\n' + \
self.rst_section_levels[2] * 9 + '\n\n'
elif len(functions) and multi_class:
ad += '\n' + 'Function' + '\n' + \
self.rst_section_levels[2] * 8 + '\n\n'
for f in functions:
# must NOT exclude from index to keep cross-refs working
ad += '\n.. autofunction:: ' + uri + '.' + f + '\n\n'
return ad
def _survives_exclude(self, matchstr, match_type):
''' Returns True if *matchstr* does not match patterns
``self.package_name`` removed from front of string if present
Examples
--------
>>> dw = ApiDocWriter('sphinx')
>>> dw._survives_exclude('sphinx.okpkg', 'package')
True
>>> dw.package_skip_patterns.append('^\\.badpkg$')
>>> dw._survives_exclude('sphinx.badpkg', 'package')
False
>>> dw._survives_exclude('sphinx.badpkg', 'module')
True
>>> dw._survives_exclude('sphinx.badmod', 'module')
True
>>> dw.module_skip_patterns.append('^\\.badmod$')
>>> dw._survives_exclude('sphinx.badmod', 'module')
False
'''
if match_type == 'module':
patterns = self.module_skip_patterns
elif match_type == 'package':
patterns = self.package_skip_patterns
else:
raise ValueError('Cannot interpret match type "%s"'
% match_type)
# Match to URI without package name
L = len(self.package_name)
if matchstr[:L] == self.package_name:
matchstr = matchstr[L:]
for pat in patterns:
try:
pat.search
except AttributeError:
pat = re.compile(pat)
if pat.search(matchstr):
return False
return True
def discover_modules(self):
''' Return module sequence discovered from ``self.package_name``
Parameters
----------
None
Returns
-------
mods : sequence
Sequence of module names within ``self.package_name``
Examples
--------
>>> dw = ApiDocWriter('sphinx')
>>> mods = dw.discover_modules()
>>> 'sphinx.util' in mods
True
>>> dw.package_skip_patterns.append('\.util$')
>>> 'sphinx.util' in dw.discover_modules()
False
>>>
'''
modules = [self.package_name]
# raw directory parsing
for dirpath, dirnames, filenames in os.walk(self.root_path):
# Check directory names for packages
root_uri = self._path2uri(os.path.join(self.root_path,
dirpath))
for dirname in dirnames[:]: # copy list - we modify inplace
package_uri = '.'.join((root_uri, dirname))
if (self._uri2path(package_uri) and
self._survives_exclude(package_uri, 'package')):
modules.append(package_uri)
else:
dirnames.remove(dirname)
# Check filenames for modules
for filename in filenames:
module_name = filename[:-3]
module_uri = '.'.join((root_uri, module_name))
if (self._uri2path(module_uri) and
self._survives_exclude(module_uri, 'module')):
modules.append(module_uri)
return sorted(modules)
def write_modules_api(self, modules,outdir):
# write the list
written_modules = []
for m in modules:
api_str = self.generate_api_doc(m)
if not api_str:
continue
# write out to file
outfile = os.path.join(outdir,
m + self.rst_extension)
fileobj = open(outfile, 'wt')
fileobj.write(api_str)
fileobj.close()
written_modules.append(m)
self.written_modules = written_modules
def write_api_docs(self, outdir):
"""Generate API reST files.
Parameters
----------
outdir : string
Directory name in which to store files
We create automatic filenames for each module
Returns
-------
None
Notes
-----
Sets self.written_modules to list of written modules
"""
if not os.path.exists(outdir):
os.mkdir(outdir)
# compose list of modules
modules = self.discover_modules()
self.write_modules_api(modules,outdir)
def write_index(self, outdir, froot='gen', relative_to=None):
"""Make a reST API index file from written files
Parameters
----------
path : string
Filename to write index to
outdir : string
Directory to which to write generated index file
froot : string, optional
root (filename without extension) of filename to write to
Defaults to 'gen'. We add ``self.rst_extension``.
relative_to : string
path to which written filenames are relative. This
component of the written file path will be removed from
outdir, in the generated index. Default is None, meaning,
leave path as it is.
"""
if self.written_modules is None:
raise ValueError('No modules written')
# Get full filename path
path = os.path.join(outdir, froot+self.rst_extension)
# Path written into index is relative to rootpath
if relative_to is not None:
relpath = outdir.replace(relative_to + os.path.sep, '')
else:
relpath = outdir
idx = open(path,'wt')
w = idx.write
w('.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n')
w('.. toctree::\n\n')
for f in self.written_modules:
w(' %s\n' % os.path.join(relpath,f))
idx.close()
| {
"repo_name": "e-koch/pyspeckit",
"path": "docs/sphinxext/apigen.py",
"copies": "8",
"size": "15658",
"license": "mit",
"hash": -3882542394503865300,
"line_mean": 35.5841121495,
"line_max": 79,
"alpha_frac": 0.5342955678,
"autogenerated": false,
"ratio": 4.292214912280702,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8826510480080703,
"avg_score": null,
"num_lines": null
} |
"""Attempt to generate templates for module reference with Sphinx
XXX - we exclude extension modules
To include extension modules, first identify them as valid in the
``_uri2path`` method, then handle them in the ``_parse_module`` script.
We get functions and classes by parsing the text of .py files.
Alternatively we could import the modules for discovery, and we'd have
to do that for extension modules. This would involve changing the
``_parse_module`` method to work via import and introspection, and
might involve changing ``discover_modules`` (which determines which
files are modules, and therefore which module URIs will be passed to
``_parse_module``).
NOTE: this is a modified version of a script originally shipped with the
PyMVPA project, which we've adapted for NIPY use. PyMVPA is an MIT-licensed
project."""
from __future__ import print_function
#from builtins import object
# Stdlib imports
import inspect
import os
import re
import sys
import tempfile
import warnings
from nipype.interfaces.base import BaseInterface
from nipype.pipeline.engine import Workflow
from nipype.utils.misc import trim
#from github import get_file_url
def get_file_url(arg):
pass
# Functions and classes
class InterfaceHelpWriter(object):
''' Class for automatic detection and parsing of API docs
to Sphinx-parsable reST format'''
# only separating first two levels
rst_section_levels = ['*', '=', '-', '~', '^']
def __init__(self,
package_name,
rst_extension='.rst',
package_skip_patterns=None,
module_skip_patterns=None,
class_skip_patterns=None
):
''' Initialize package for parsing
Parameters
----------
package_name : string
Name of the top-level package. *package_name* must be the
name of an importable package
rst_extension : string, optional
Extension for reST files, default '.rst'
package_skip_patterns : None or sequence of {strings, regexps}
Sequence of strings giving URIs of packages to be excluded
Operates on the package path, starting at (including) the
first dot in the package path, after *package_name* - so,
if *package_name* is ``sphinx``, then ``sphinx.util`` will
result in ``.util`` being passed for earching by these
regexps. If is None, gives default. Default is:
['\.tests$']
module_skip_patterns : None or sequence
Sequence of strings giving URIs of modules to be excluded
Operates on the module name including preceding URI path,
back to the first dot after *package_name*. For example
``sphinx.util.console`` results in the string to search of
``.util.console``
If is None, gives default. Default is:
['\.setup$', '\._']
class_skip_patterns : None or sequence
Sequence of strings giving classes to be excluded
Default is: None
'''
if package_skip_patterns is None:
package_skip_patterns = ['\\.tests$']
if module_skip_patterns is None:
module_skip_patterns = ['\\.setup$', '\\._']
if class_skip_patterns:
self.class_skip_patterns = class_skip_patterns
else:
self.class_skip_patterns = []
self.package_name = package_name
self.rst_extension = rst_extension
self.package_skip_patterns = package_skip_patterns
self.module_skip_patterns = module_skip_patterns
def get_package_name(self):
return self._package_name
def set_package_name(self, package_name):
''' Set package_name
>>> docwriter = ApiDocWriter('sphinx')
>>> import sphinx
>>> docwriter.root_path == sphinx.__path__[0]
True
>>> docwriter.package_name = 'docutils'
>>> import docutils
>>> docwriter.root_path == docutils.__path__[0]
True
'''
# It's also possible to imagine caching the module parsing here
self._package_name = package_name
self.root_module = __import__(package_name)
self.root_path = self.root_module.__path__[0]
self.written_modules = None
package_name = property(get_package_name, set_package_name, None,
'get/set package_name')
def _get_object_name(self, line):
''' Get second token in line
>>> docwriter = ApiDocWriter('sphinx')
>>> docwriter._get_object_name(" def func(): ")
'func'
>>> docwriter._get_object_name(" class Klass(object): ")
'Klass'
>>> docwriter._get_object_name(" class Klass: ")
'Klass'
'''
name = line.split()[1].split('(')[0].strip()
# in case we have classes which are not derived from object
# ie. old style classes
return name.rstrip(':')
def _uri2path(self, uri):
''' Convert uri to absolute filepath
Parameters
----------
uri : string
URI of python module to return path for
Returns
-------
path : None or string
Returns None if there is no valid path for this URI
Otherwise returns absolute file system path for URI
Examples
--------
>>> docwriter = ApiDocWriter('sphinx')
>>> import sphinx
>>> modpath = sphinx.__path__[0]
>>> res = docwriter._uri2path('sphinx.builder')
>>> res == os.path.join(modpath, 'builder.py')
True
>>> res = docwriter._uri2path('sphinx')
>>> res == os.path.join(modpath, '__init__.py')
True
>>> docwriter._uri2path('sphinx.does_not_exist')
'''
if uri == self.package_name:
return os.path.join(self.root_path, '__init__.py')
path = uri.replace('.', os.path.sep)
path = path.replace(self.package_name + os.path.sep, '')
path = os.path.join(self.root_path, path)
# XXX maybe check for extensions as well?
if os.path.exists(path + '.py'): # file
path += '.py'
elif os.path.exists(os.path.join(path, '__init__.py')):
path = os.path.join(path, '__init__.py')
else:
return None
return path
def _path2uri(self, dirpath):
''' Convert directory path to uri '''
relpath = dirpath.replace(self.root_path, self.package_name)
if relpath.startswith(os.path.sep):
relpath = relpath[1:]
return relpath.replace(os.path.sep, '.')
def _parse_module(self, uri):
''' Parse module defined in *uri* '''
filename = self._uri2path(uri)
if filename is None:
# nothing that we could handle here.
return ([], [])
f = open(filename, 'rt')
functions, classes = self._parse_lines(f, uri)
f.close()
return functions, classes
def _parse_lines(self, linesource, module):
''' Parse lines of text for functions and classes '''
functions = []
classes = []
for line in linesource:
if line.startswith('def ') and line.count('('):
# exclude private stuff
name = self._get_object_name(line)
if not name.startswith('_'):
functions.append(name)
elif line.startswith('class '):
# exclude private stuff
name = self._get_object_name(line)
if not name.startswith('_') and \
self._survives_exclude('.'.join((module, name)),
'class'):
classes.append(name)
else:
pass
functions.sort()
classes.sort()
return functions, classes
def _write_graph_section(self, fname, title):
ad = '\n%s\n%s\n\n' % (title, self.rst_section_levels[3] * len(title))
ad += '.. graphviz::\n\n'
fhandle = open(fname)
for line in fhandle:
ad += '\t' + line + '\n'
fhandle.close()
os.remove(fname)
os.remove(fname + ".png")
return ad
def generate_api_doc(self, uri):
'''Make autodoc documentation template string for a module
Parameters
----------
uri : string
python location of module - e.g 'sphinx.builder'
Returns
-------
S : string
Contents of API doc
'''
# get the names of all classes and functions
functions, classes = self._parse_module(uri)
# edit only Classes documentation
functions = []
workflows = []
helper_functions = []
for function in functions:
try:
__import__(uri)
finst = sys.modules[uri].__dict__[function]
except TypeError:
continue
try:
workflow = finst()
except Exception:
helper_functions.append((function, finst))
continue
if isinstance(workflow, Workflow):
workflows.append((workflow, function, finst))
if not classes and not workflows and not helper_functions:
print('WARNING: Empty -', uri) # dbg
return ''
# Make a shorter version of the uri that omits the package name for
# titles
uri_short = re.sub(r'^%s\.' % self.package_name, '', uri)
# uri_short = uri
ad = '.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n'
chap_title = uri_short
ad += (chap_title + '\n' +
self.rst_section_levels[1] * len(chap_title) + '\n\n')
# Set the chapter title to read 'module' for all modules except for the
# main packages
# if '.' in uri:
# title = 'Module: :mod:`' + uri_short + '`'
# else:
# title = ':mod:`' + uri_short + '`'
# ad += title + '\n' + self.rst_section_levels[2] * len(title)
# ad += '\n' + 'Classes' + '\n' + \
# self.rst_section_levels[2] * 7 + '\n'
for c in classes:
__import__(uri)
print(c)
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
classinst = sys.modules[uri].__dict__[c]
except Exception as inst:
print(inst)
continue
if not issubclass(classinst, BaseInterface):
continue
label = uri + '.' + c + ':'
ad += '\n.. _%s\n\n' % label
ad += '\n.. index:: %s\n\n' % c
ad += c + '\n' + self.rst_section_levels[2] * len(c) + '\n\n'
ad += "`Link to code <%s>`__\n\n" % get_file_url(classinst)
ad += trim(classinst.help(returnhelp=True),
self.rst_section_levels[3]) + '\n'
if workflows or helper_functions:
ad += '\n.. module:: %s\n\n' % uri
for workflow, name, finst in workflows:
label = ':func:`' + name + '`'
ad += '\n.. _%s:\n\n' % (uri + '.' + name)
ad += '\n'.join((label, self.rst_section_levels[2] * len(label)))
ad += "\n\n`Link to code <%s>`__\n\n" % get_file_url(finst)
helpstr = trim(finst.__doc__, self.rst_section_levels[3])
ad += '\n\n' + helpstr + '\n\n'
"""
# use sphinx autodoc for function signature
ad += '\n.. _%s:\n\n' % (uri + '.' + name)
ad += '.. autofunction:: %s\n\n' % name
"""
(_, fname) = tempfile.mkstemp(suffix=".dot")
workflow.write_graph(dotfilename=fname, graph2use='hierarchical')
ad += self._write_graph_section(fname, 'Graph') + '\n'
for name, finst in helper_functions:
label = ':func:`' + name + '`'
ad += '\n.. _%s:\n\n' % (uri + '.' + name)
ad += '\n'.join((label, self.rst_section_levels[2] * len(label)))
ad += "\n\n`Link to code <%s>`__\n\n" % get_file_url(finst)
helpstr = trim(finst.__doc__, self.rst_section_levels[3])
ad += '\n\n' + helpstr + '\n\n'
return ad
def _survives_exclude(self, matchstr, match_type):
''' Returns True if *matchstr* does not match patterns
``self.package_name`` removed from front of string if present
Examples
--------
>>> dw = ApiDocWriter('sphinx')
>>> dw._survives_exclude('sphinx.okpkg', 'package')
True
>>> dw.package_skip_patterns.append('^\\.badpkg$')
>>> dw._survives_exclude('sphinx.badpkg', 'package')
False
>>> dw._survives_exclude('sphinx.badpkg', 'module')
True
>>> dw._survives_exclude('sphinx.badmod', 'module')
True
>>> dw.module_skip_patterns.append('^\\.badmod$')
>>> dw._survives_exclude('sphinx.badmod', 'module')
False
'''
if match_type == 'module':
patterns = self.module_skip_patterns
elif match_type == 'package':
patterns = self.package_skip_patterns
elif match_type == 'class':
patterns = self.class_skip_patterns
else:
raise ValueError('Cannot interpret match type "%s"'
% match_type)
# Match to URI without package name
L = len(self.package_name)
if matchstr[:L] == self.package_name:
matchstr = matchstr[L:]
for pat in patterns:
try:
pat.search
except AttributeError:
pat = re.compile(pat)
if pat.search(matchstr):
return False
return True
def discover_modules(self):
''' Return module sequence discovered from ``self.package_name``
Parameters
----------
None
Returns
-------
mods : sequence
Sequence of module names within ``self.package_name``
Examples
--------
>>> dw = ApiDocWriter('sphinx')
>>> mods = dw.discover_modules()
>>> 'sphinx.util' in mods
True
>>> dw.package_skip_patterns.append('\.util$')
>>> 'sphinx.util' in dw.discover_modules()
False
>>>
'''
modules = [self.package_name]
# raw directory parsing
for dirpath, dirnames, filenames in os.walk(self.root_path):
# Check directory names for packages
root_uri = self._path2uri(os.path.join(self.root_path,
dirpath))
for dirname in dirnames[:]: # copy list - we modify inplace
package_uri = '.'.join((root_uri, dirname))
if (self._uri2path(package_uri) and
self._survives_exclude(package_uri, 'package')):
modules.append(package_uri)
else:
dirnames.remove(dirname)
# Check filenames for modules
for filename in filenames:
module_name = filename[:-3]
module_uri = '.'.join((root_uri, module_name))
if (self._uri2path(module_uri) and
self._survives_exclude(module_uri, 'module')):
modules.append(module_uri)
return sorted(modules)
def write_modules_api(self, modules, outdir):
# write the list
written_modules = []
for m in modules:
api_str = self.generate_api_doc(m)
if not api_str:
continue
api_str.replace('nipype', '')
print('nipype' in api_str)
# write out to file
outfile = os.path.join(outdir,
m + self.rst_extension)
fileobj = open(outfile, 'wt')
fileobj.write(api_str)
fileobj.close()
written_modules.append(m)
self.written_modules = written_modules
def write_api_docs(self, outdir):
"""Generate API reST files.
Parameters
----------
outdir : string
Directory name in which to store files
We create automatic filenames for each module
Returns
-------
None
Notes
-----
Sets self.written_modules to list of written modules
"""
if not os.path.exists(outdir):
os.mkdir(outdir)
# compose list of modules
modules = self.discover_modules()
self.write_modules_api(modules, outdir)
def write_index(self, outdir, froot='gen', relative_to=None):
"""Make a reST API index file from written files
Parameters
----------
path : string
Filename to write index to
outdir : string
Directory to which to write generated index file
froot : string, optional
root (filename without extension) of filename to write to
Defaults to 'gen'. We add ``self.rst_extension``.
relative_to : string
path to which written filenames are relative. This
component of the written file path will be removed from
outdir, in the generated index. Default is None, meaning,
leave path as it is.
"""
if self.written_modules is None:
raise ValueError('No modules written')
# Get full filename path
path = os.path.join(outdir, froot + self.rst_extension)
# Path written into index is relative to rootpath
if relative_to is not None:
relpath = outdir.replace(relative_to + os.path.sep, '')
else:
relpath = outdir
idx = open(path, 'wt')
w = idx.write
w('.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n')
w('.. toctree::\n')
w(' :maxdepth: 2\n\n')
for f in self.written_modules:
w(' %s\n' % os.path.join(relpath, f))
idx.close() | {
"repo_name": "process-asl/process-asl",
"path": "tools/interfacedocgen.py",
"copies": "2",
"size": "18245",
"license": "bsd-3-clause",
"hash": 8976197156278363000,
"line_mean": 36.3893442623,
"line_max": 79,
"alpha_frac": 0.5348862702,
"autogenerated": false,
"ratio": 4.255889899696758,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005770029650050651,
"num_lines": 488
} |
""" Attempt to implement a reactive system in Python. With that I mean
a system in which signals are bound implicitly, as in Shiny.
The Signal is the core element here. It is like the Property in
HasProps. A function can easily be turned into a Signal object by
decorating it.
Other than Properties, signals have a function associated with them.
that compute stuff (maybe rename signal to behavior). They also cache their
value.
"""
import re
import sys
import time
import inspect
class Signal:
#todo: or is this a "behavior"?
""" Wrap a function in a class to allow marking it dirty and caching
last result.
"""
def __init__(self, fun):
self._fun = fun
self._value = None
self._dirty = True
self._dependers = []
def __call__(self, *args):
if self._dirty:
self._value = self._fun(*args)
self._dirty = False
return self._value
def set_dirty(self):
self._dirty = True
for dep in self._dependers:
dep.set_dirty()
dep()
class Input(Signal):
""" A signal defined simply by a value that can be set. You can
consider this a source signal.
"""
def __init__(self):
Signal.__init__(self, lambda x=None:None)
def set(self, value):
self._value = value
self.set_dirty()
class Output(Signal):
pass # I don't think that I need this?
def check_deps(fun, locals, globals):
""" Analyse the source code of fun to get the signals that the reactive
function depends on. It then registers the function at these signals.
"""
# Get source of function and find uses of inputs
# todo: use AST parsing instead
s = inspect.getsource(fun._fun)
matches = re.findall(r'([a-zA-Z0-9_.]+?)\.get_signal\([\'\"](\w+?)[\'\"]\)', s)
fun._nmatches = 0
# print('found %i deps on %r' % (len(matches), fun))
# For each used input, try to retrieve the actual object
for match in matches:
ob = locals.get(match[0], globals.get(match[0], None))
if ob is None:
print('could not locate dependency %r' % match[0])
else:
ob._bind_signal(match[1], fun)
fun._nmatches += 1
# print('bound signal for', ob)
#dep = getattr(ob, 'input_'+match[1])
#print('found dep ', dep)
# Detect outputs
#matches = re.findall(r'([a-zA-Z0-9_.]+?)\.set_output\([\'\"](\w+?)[\'\"]\,', s)
# Detect calls
if fun._nmatches:
matches = re.findall(r'([a-zA-Z0-9_.]+?)\(', s)
for match in matches:
ob = locals.get(match[0], globals.get(match[0], None))
if isinstance(ob, Signal):
ob._dependers.append(fun)
# # For each used input, try to retrieve the actual object
# for match in matches:
# ob = locals.get(match[0], globals.get(match[0], None))
# if ob is None:
# print('could not locate dependency %r' % match[0])
# else:
# ob._bind_signal(match[1], fun2)
fun._deps_checked = len(matches)
def react(fun):
""" decorator
"""
# fun should be called, when any of its deps gets called
# can I get that info?
# Probably with sys.settrace(), but that is CPython specific.
# Evaluating source code via inspect?
# Evaluating the AST?
# -> We can detect "input.slider" or something, but can we detect
# what object its called on? What is "w"?
# Note: only works on Python implementations that have a stack
_frame = sys._getframe(1)
# todo: from trellis:
# if isinstance(rule, types.FunctionType): # only pick up name if a function
# if frame.f_locals.get(rule.__name__) is rule: # and locally-defined!
# name = name or rule.__name__
if not isinstance(fun, Signal):
fun = Signal(fun)
check_deps(fun, _frame.f_locals, _frame.f_globals)
if fun._nmatches:
return fun
else:
return fun._fun # return original, its probbaly a method that we shoukd try later
class Reactive:
""" Base class for classes that can have signals and reactive
methods.
"""
SIGNALS = []
def __init__(self):
self._signals = {}
self._downstream = {}
for name, val in self.SIGNALS:
self._signals[name] = val
for name in dir(self.__class__):
cls_ob = getattr(self.__class__, name)
if hasattr(cls_ob, '_deps_checked'):
fun = getattr(self, name)
# print('re-trying reactive')
react(fun)
def _emit_signal(self, name, value):
self._signals[name] = value
for f in self._downstream.get(name, ()):
f.set_dirty()
f()
def _bind_signal(self, name, fun):
funcs = self._downstream.setdefault(name, [])
if fun not in funcs:
funcs.append(fun)
def bind_signals(self, *args):
# Alternative: explicit binding
def bind(fun):
if not isinstance(fun, Signal):
fun = Signal(fun)
for name in names:
funcs = self._downstream.setdefault(name, [])
if fun not in funcs:
funcs.append(fun)
return fun
fun = None
names = []
for arg in args:
if callable(arg):
fun = arg
else:
names.append(arg)
print('binding ', names)
if fun is None:
return bind
else:
return bin(fun)
def get_signal(self, name):
# i = inspect.getframeinfo(inspect.currentframe())
if False:
s = inspect.stack()
caller = s[1]
print(caller[0].f_locals.keys())
print(caller[0].f_globals.keys())
id = caller[1], caller[2], caller[3]
# if 'self' in f_locals:
# fun = f_locals()
self.caller = caller
self.caller2 = sys._getframe(1)
fun = caller[0].f_globals[id[-1]]
print(id, fun)
self._bind_signal(name, fun)
return self._signals[name]
def set_output(self, name, value):
# def xx(fun):
# def yy():
# value = fun()
# f = getattr(self, 'on_' + name)
# f(value)
# fun()
# return yy
# return xx
f = getattr(self, 'on_' + name)
f(value)
print('-----')
class Widget(Reactive):
SIGNALS = [('slider1', 0), ('slider2', 0)]
def manual_slider1(self, v):
""" Simulate changing a slider value.
"""
# if this is called, outpus should be shown
# todo: also store latest value
self._emit_signal('slider1', v)
def manual_slider2(self, v):
self._emit_signal('slider2', v)
def on_show(self, val):
print('hooray!', val)
class Widget1(Widget):
@react
def bla1(self):
x = self.get_signal('slider1') * 3
self.set_output('show', x)
w = Widget1()
@react
def something_that_takes_long():
# when slider1 changes, it should invoke bla! (and other inputs/signals that depend on it
print('this may take a while')
time.sleep(1)
return w.get_signal('slider1') * 2
@react
def bla():
# to an output.
x = something_that_takes_long()
x += w.get_signal('slider2')
w.set_output('show', x - 1)
# todo: set_output, or return and connect somehow?
print('-----')
class Widget2(Widget):
def on_slider1_change(self): # but can only bind to single change
x = self.get_signal('slider1') * 3
self.set_output('show', x)
# # maybe this? but then slider1 of what?
# @binding('slider1', 'slider2')
# def xxx(self):
# pass
w2 = Widget2()
@w2.bind_signals('slider1')
def something_that_takes_long():
# when slider1 changes, it should invoke bla! (and other inputs/signals that depend on it
print('this may take a while')
time.sleep(1)
return w2.get_signal('slider1') * 2
#@some_widget.bind_signals('foo')
#@some_other_widget.bind_signals('bar')
@w2.bind_signals('slider2')
# todo: w2.bind_signals('slider2', something_that_takes_long)
def bla():
# to an output.
x = something_that_takes_long()
x += w2.get_signal('slider2')
w2.set_output('show', x - 1)
# todo: set_output, or return and connect somehow?
print('-----')
class Temp2(Reactive):
""" Simple example of object that has signals for temperature in both
Celcius and Fahrenheit. Changing either automatically changes the other.
"""
SIGNALS = [('C', 0), ('F', 32)]
t2 = Temp2()
self = t2
@t2.bind_signals('F')
def _c2f():
self._signals['C'] = (self.get_signal('F')-32) / 1.8
@t2.bind_signals('C')
def _f2c():
self._signals['F'] = self.get_signal('C') * 1.8 + 32
@t2.bind_signals('C', 'F')
def _show():
print('C:', self.get_signal('C'))
print('F:', self.get_signal('F'))
| {
"repo_name": "synmnstr/flexx",
"path": "exp/reactive.py",
"copies": "22",
"size": "9230",
"license": "bsd-2-clause",
"hash": 2199809718498553900,
"line_mean": 26.3076923077,
"line_max": 93,
"alpha_frac": 0.5511375948,
"autogenerated": false,
"ratio": 3.65689381933439,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""Attempt to implement the Multilinear Jigsaw Puzzles as described
in Appendix A of the paper [0].
!!! ATTENTION !!!
There are several issues and this implementation is NOT working
correctly for now.
!!! ATTENTION !!!
0. Parameter Choices:
Generally the correct choice of parameters and the relation between
them seems to be a bit underspecified. While there is a 'Setting
Parameters' paragraph in Appendix A, it relies heavily on Big O
notation and was a bit confusing to us in other regards (e.g.
why is m an effective security parameter, even though there already
exists the dedicated security parameter lambda?)
1. a mod g:
The first step of encoding an element a is to calculate
"a_hat = a modulo (g) to create a small polynomial". We could
not think of a better canonical representative for this reduction
than a itself, which makes us believe that either the modulo reduction
is redundant or the authors had another canonical representative in
mind (a polynomial) which is unclear how to compute.
2. Size of m:
In the already mentioned 'Setting Parameters' paragraph, it is said that
k = m^delta even though k is actually an input parameter. If we infer
from this that m = k^(1/delta) we run into massive performance problems
for even the tiniest RBPs. We tried to use constant values for m (see
_getM) until we realized that this will completely change the value of
q, q^(7/8), p etc.
3. Application of mod p:
It remains unclear how the (mod p) which is inherent to the computations
over Z_p that 'plaintext' RBPs perform is applied in the encoding.
Currently computations in the encoding work as expected unless the
results grow larger than p.
This is most probably related to (0), (1) and (2).
4. Error Terms:
When encoding an element a, the second step is to choose an error
term e, so that a_hat + e*g is 'discrete Gaussian centered at the
origin'. We are not entirely sure how to ensure this. Currently we
only choose small random coefficients for the error terms.
[0] S. Garg, C. Gentry, S. Halevi, M. Raykova, A. Sahai, and B. Waters. "Candidate
indistinguishability obfuscation and functional encryption for all circuits." In:
Foundations of Computer Science (FOCS), 2013 IEEE 54th Annual Symposium on.
IEEE. 2013, pp. 40-49.
"""
from sage.all import *
from operator import mul
import logging
class JigsawPuzzle(object):
"""Multilinear Jigsaw Puzzles as defined in Appendix A"""
def __init__(self, k, secParam, dimensionality=None, delta=0.5, epsilon=0.55):
logging.info("--- Starting Jigsaw Puzzle generation ----")
self.x = SR.var('x')
#--- constants ---
#0 < delta < epsilon < 1
#delta down -> m up, g depends on m and delta
self.delta = delta
#epsilon down -> q down
self.epsilon = epsilon
self.k = k
self.secParam = secParam
#Dimensionality m
self.m = self.__getM(dimensionality)
logging.debug('m: %d'%self.m)
#choose 'large' random prime q
#q should be aproximately 2^O(m^epsilon)
q_exp = int((self.__getM(dimensionality)**self.epsilon))*10
self.q = random_prime (2**(q_exp+1), lbound=2**q_exp)
logging.debug('%d < q < %d ==> q: %d'%(2**q_exp, 2**(q_exp+1), self.q))
#generate ring R = Z[X]/(X^m+1) and R_q = Z_q[X]/(X^m+1) (m = dimension parameter)
self.R = QuotientRing(ZZ[self.x], self.x**self.m+1)
self.t = self.R.gen()
self.R_q = QuotientRing(Integers(self.q)[self.x], self.x**self.m+1)
#choose small random polynomial g element R, |R/g)| is a large prime p, + more conditions
self.g = self.__chooseG()
logging.debug('g_bound: %d => g: %s' % (2**int(self.m**self.delta), self.g))
logging.debug('p: %d'%self.getP())
#choose k random polynomials z_1, z_2, ..., z_k
self.zList = self.__getZList()
#generate zero test element
self.pzt = self.__getPzt()
logging.debug('Zero test element: %s' % self.pzt)
logging.info("--- Jigsaw Puzzle generation successful ---")
def encode(self, a, levelSet):
"""Encode the element a at the level described by levelSet"""
assert(all(i<self.k for i in levelSet))
#reduce a modulo (g) to get small polynomial a^ in R
aHat = self.R(a).mod(self.g)
#choose error so that it is small and a^+e*g is discrete centered at the origin
#all coefficients smaller than 2^O(m^delta)
bound = 2**(int(self.m**self.delta)/2)
coeffs = [ZZ.random_element(0, bound) for i in range(self.m)]
error = sum([a*self.t**i for a, i in zip(coeffs, range(len(coeffs)))])
#return: (a^ + e*g) / product of all z_i (i elem S)
numerator = self.R_q(aHat + error*self.g)
denominator = self.R_q(reduce(mul, [self.zList[i] for i in levelSet]))
return numerator/denominator
def encodeMatrix(self, mat, levelSet):
"""Encode all elements of matrix at the level levelSet"""
enc = self.encode
return mat.apply_map(lambda z: enc(z, levelSet))
def isZero(self, u):
"""Test if u is a valid encoding of 0 at the highest level"""
logging.info('Starting zero test.')
#should only be h*e
v = self.R_q(u*self.pzt)
#test if v is small enough -> has canonical embedding of Euclidean Norm smaller than q^(7/8)
norm = float(sum([int(a)**2 for a in v.list()]))**(0.5)
ret = norm < (self.q**(7/8.0))
logging.info('Zero test result: %f < %f => %s'%(norm, self.q**(7/8.0), ret))
return ret
def getP(self):
"""Return the prime p that identifies the plaintext space of the input."""
return self.p
def elementNorm(self, u):
"""Debug Helper: Calculate the norm of one element"""
v = self.R_q(u*self.pzt)
norm = float(sum([int(a)**2 for a in v.list()]))**(0.5)
return norm
def __getZList(self):
"""Return self.k random polynomials over self.R_q which are all invertible"""
ret = []
while not(len(ret)==self.k):
new = self.R_q.random_element()
try:
tmp = 1/new
ret.append(new)
except:
pass
return ret
def __chooseG(self):
"""Choose a 'small' polynomial g subject to several conditions
Conditions:
- each coefficient should be smaller than 2^O(m^delta)
- |R/(g)| is a large prime p (p > 2^lambda)
- g^-1 when viewed in Q[X]/(X^m+1) is sufficiently small
"""
qr = QuotientRing(QQ[self.x], self.x**self.m+1)
cond = False
bound = 2**(int(self.m**self.delta))
while not(cond):
#all coefficients smaller than 2^O(m^delta)
coeffs = [ZZ.random_element(0, bound) for i in range(self.m)]
g = sum([a*self.t**i for a, i in zip(coeffs, range(len(coeffs)))])
#conditions:
#|R/(g)| is large prime p (p > 2^lambda/securityParameter)
self.p = g.norm()
p = self.p
cond = p > 2**self.secParam and p in Primes()
#g^-1 when viewed in Q[X]/(X^m+1) sufficiently small
if cond:
g_1 = qr(g)**(-1)
cond = all([abs(c) < bound for c in g_1.list()])
return g
def __getPzt(self):
"""Generate the zero test parameter"""
#random 'mid-size' polynimial, chosen from a discrete Gaussian in R, coefficients are of size roughly q^(2/3)
coeffs = [ZZ.random_element(int(0.9*self.q**(2/3.0)), int(1.1*self.q**(2/3.0))) for i in range(self.m)]
h = sum([a*self.t**i for a, i in zip(coeffs, range(len(coeffs)))])
#pzt = h*product of all z_i / g
return h*reduce(mul, self.zList)/self.g
def __getM(self, dim):
"""Chooses m according to dim.
m = None -> use the standard way described in the paper.
otherwise use dim directly as long as it is > 0 and a power of two
"""
ret = -1
if dim is None:
#k should be = m^delta -> m = k^(1/delta)
ret = self.k**(1/self.delta)
#needs be power of two
ret = 2**int(math.log(ret, 2))
else:
assert(dim > 0)
#is power of two?
assert((dim & (dim - 1)) == 0)
#use constant for reasonable runtimes
ret = dim
assert(ret != -1)
return ret
def __str__(self):
return 'Multilinear Jigsaw Puzzle with k: %d, P: %d, q: %d' % (self.k, self.getP(), self.q)
if __name__ == '__main__':
length = 5
puzzle = JigsawPuzzle(length+2, 2, dimensionality=2**3, delta=0.4, epsilon=0.5)
ring = Integers(puzzle.getP())
print puzzle
print 'q^(7/8): %f' % (puzzle.q**(7/8.0))
zeroEnc = puzzle.encode(ring(0), range(length+2))
print 'encryption of zero: %s'%zeroEnc
print 'is zero? %s'%puzzle.isZero(zeroEnc) | {
"repo_name": "tum-i22/indistinguishability-obfuscation",
"path": "obfusc8/mjp.py",
"copies": "1",
"size": "8222",
"license": "apache-2.0",
"hash": 3352941483547593700,
"line_mean": 32.5632653061,
"line_max": 111,
"alpha_frac": 0.6630990027,
"autogenerated": false,
"ratio": 2.8818787241500177,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8275602468351023,
"avg_score": 0.1538750516997989,
"num_lines": 245
} |
"""Attempt to install all datasets into all database management systems
This module, when run, attempts to install datasets from all Retriever scripts
in the /scripts folder (except for those listed in IGNORE), for each engine in
engine_list in retriever.engines. In other words, it runs trys to install using
all possible combinations of database platform and script and checks to
see if there are any errors. It does not check the values in the database.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
from imp import reload
from retriever.engines import engine_list, choose_engine
from retriever.lib.scripts import MODULE_LIST, SCRIPT_LIST
reload(sys)
if hasattr(sys, 'setdefaultencoding'):
sys.setdefaultencoding('latin-1')
if os.name == "nt":
os_password = "Password12!"
else:
os_password = ""
MODULE_LIST = MODULE_LIST()
if len(sys.argv) > 1:
engine_list = [
e for e in engine_list
if e.name in sys.argv[1:] or
e.abbreviation in sys.argv[1:]
]
if os.path.exists("test_all"):
os.system("rm -r test_all")
os.makedirs("test_all")
os.chdir("test_all")
dbfile = os.path.normpath(os.path.join(os.getcwd(), 'testdb.sqlite'))
engine_test = {
"postgres": {'engine': 'postgres',
'user': 'postgres',
'password': os_password,
'host': 'localhost',
'port': 5432,
'database': 'postgres',
'database_name': 'testschema',
'table_name': '{db}.{table}'},
"mysql": {'engine': 'mysql',
'user': 'travis',
'password': '',
'host': 'localhost',
'port': 3306,
'database_name': 'testdb',
'table_name': '{db}.{table}'},
"xml": {'engine': 'xml',
'table_name': 'output_file_{table}.xml'},
"json": {'engine': 'json',
'table_name': 'output_file_{table}.json'},
"csv": {'engine': 'csv',
'table_name': 'output_file_{table}.csv'},
"sqlite": {'engine': 'sqlite',
'file': dbfile, 'table_name': '{db}_{table}'}
}
SCRIPT_LIST = SCRIPT_LIST()
TEST_ENGINES = {}
IGNORE = ["forest-inventory-analysis", "bioclim", "prism-climate", "vertnet", "NPN", "mammal-super-tree"]
IGNORE = [dataset.lower() for dataset in IGNORE]
for engine in engine_list:
if engine.abbreviation in engine_test:
try:
opts = engine_test[engine.abbreviation]
TEST_ENGINES[engine.abbreviation] = choose_engine(opts)
except:
TEST_ENGINES[engine.abbreviation] = None
pass
errors = []
for module in MODULE_LIST:
for (key, value) in list(TEST_ENGINES.items()):
if module.SCRIPT.name.lower() not in IGNORE:
if value != None:
print("==>", module.__name__, value.name, "..........", module.SCRIPT.name)
try:
module.SCRIPT.download(value)
except KeyboardInterrupt:
pass
except Exception as e:
print("ERROR.")
errors.append((key, module.__name__, e))
else:
errors.append((key, "No connection detected......" + module.SCRIPT.name))
print('')
if errors:
print("Engine, Dataset, Error")
for error in errors:
print(error)
else:
print("All tests passed")
| {
"repo_name": "goelakash/retriever",
"path": "retriever/try_install_all.py",
"copies": "1",
"size": "3460",
"license": "mit",
"hash": 1641011411192851200,
"line_mean": 30.7431192661,
"line_max": 105,
"alpha_frac": 0.5757225434,
"autogenerated": false,
"ratio": 3.7814207650273226,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9851306245494937,
"avg_score": 0.0011674125864769421,
"num_lines": 109
} |
"""Attempt to make the FeedPlatform command line tools available as
a Django management subcommand.
"""
# Introducing a 2.5 dependency here so we can name our django
# command "feedplatform" (i.e. same as the top-level module).
# Unfortunately, Django names commands always after their filename.
from __future__ import absolute_import
from feedplatform.management import \
call_command as fp_call_command, \
get_command as fp_get_command, \
get_commands as fp_get_commands, \
BaseCommand as fp_BaseCommand, \
CommandError as fp_CommandError, \
UnknownCommandError as fp_UnknownCommandError, \
ManagementUtility as fp_ManagementUtility
import sys, os
from django.core.management.base import BaseCommand
from django.core.management import LaxOptionParser
# The --config and --pythonpath options are useless when run
# through the django integration command; the config is fixed,
# and django's management interface itself already handled a
# --pythonpath option, if set. So we remove them.
new_options = []
for option in fp_BaseCommand.option_list:
if not option.dest in ['config', 'pythonpath']:
new_options.append(option)
fp_BaseCommand.option_list = tuple(new_options)
def _get_command_list():
result = "Available subcommands:\n"
for command in fp_get_commands():
result += '\n %s' % command
return result
class Command(BaseCommand):
help = "Gateway to the FeedPlatform management tool.\n\n" + _get_command_list()
args = '[SUBCOMMAND]'
def create_parser(self, prog_name, subcommand):
# LaxOptionParser will ignore argument errors. We need this since
# all the options that are intended for the FeedPlatform command
# are not supported by this Django-wrapper command and would
# otherwise cause it to fail.
parser = super(Command, self).create_parser(prog_name, subcommand)
parser.__class__ = LaxOptionParser
return parser
def handle(self, *args, **options):
"""
When this is called, ``options`` will contain the valid
Django-level options that have been found, including those
potentially supported by this (Django)-command.
``args`` contains everything else, the arguments as well as all
the unsupported options, which we want to give to the
FeedPlatform-level command. If that can't handle them either,
then we can raise an error.
So for example, the following call:
./manage.py feedplatform run --daemonize --pythonpath .
results in:
args = ('run', '--daemonize')
options = {'pythonpath': '.', 'traceback': None, 'settings': None}
"""
try:
subcommand = args[0]
except IndexError:
sys.stdout.write('Subcommand needed. Use "help" for usage.\n')
sys.exit(1)
try:
# special case the "help" command, since the default version
# by is unaware of the wrapping and it's subcommand status and
# displays the "Usage: ..." line incorrectly.
if subcommand == 'help':
if len(args) <= 1:
sys.stdout.write(_get_command_list()+"\n\n")
else:
# let the feedplatform command print it's own help
fp_get_command(args[1]).print_help('feedplatform', args[1])
sys.exit(1)
else:
# forward to feedplatform handler
fp_get_command(subcommand).run_from_argv(
sys.argv[:1] + [subcommand] + list(args[1:]))
except fp_UnknownCommandError, e:
self._fail("Unknown subcommand: %s\n" %e.name)
except fp_CommandError, e:
self._fail("%s\n" %e)
def _fail(self, msg):
sys.stdout.write(msg)
sys.stderr.write("Type '%s feedplatform help' for usage.\n" %
os.path.basename(sys.argv[0]))
sys.exit(1) | {
"repo_name": "miracle2k/feedplatform",
"path": "feedplatform/integration/django/management/commands/feedplatform.py",
"copies": "1",
"size": "4101",
"license": "bsd-2-clause",
"hash": -2936878308026166000,
"line_mean": 36.3457943925,
"line_max": 83,
"alpha_frac": 0.6166788588,
"autogenerated": false,
"ratio": 4.307773109243698,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5424451968043698,
"avg_score": null,
"num_lines": null
} |
"""Attempt to manage the disaster that is IEM symlinking"""
import os
import datetime
from pyiem.util import logger
LOG = logger()
# LINK , TARGET
PAIRS = [
["/mesonet/data/merra2", "/mnt/mesonet2/data/merra2"],
["/mesonet/nawips", "/mnt/mesonet2/gempak"],
["/mesonet/scripts", "/mnt/mesonet2/scripts"],
["/mesonet/wepp", "/mnt/mesonet2/idep"],
["/mesonet/ARCHIVE/gempak", "/mnt/mesonet2/longterm/gempak"],
["/mesonet/ARCHIVE/raw", "/mnt/mesonet2/longterm/raw"],
["/mesonet/ARCHIVE/rer", "/mnt/mesonet/ARCHIVE/rer"],
["/mesonet/data/dotcams", "/mnt/mesonet2/data/dotcams"],
["/mesonet/data/gempak", "/mnt/mesonet2/data/gempak"],
["/mesonet/data/iemre", "/mnt/mesonet2/data/iemre"],
["/mesonet/data/prism", "/mnt/mesonet2/data/prism"],
["/mesonet/data/incoming", "/mnt/mesonet2/data/incoming"],
["/mesonet/data/madis", "/mnt/mesonet2/data/madis"],
["/mesonet/data/model", "/mnt/mesonet2/data/model"],
["/mesonet/data/ndfd", "/mnt/mesonet2/data/ndfd"],
["/mesonet/data/nexrad", "/data/gempak/nexrad"],
["/mesonet/data/smos", "/mnt/mesonet2/data/smos"],
["/mesonet/data/stage4", "/mnt/mesonet2/data/stage4"],
["/mesonet/data/text", "/mnt/mesonet2/data/text"],
["/mesonet/share/cases", "/mnt/mesonet/share/cases"],
["/mesonet/share/climodat", "/mnt/mesonet2/share/climodat"],
["/mesonet/share/features", "/mnt/mesonet/share/features"],
["/mesonet/share/frost", "/mnt/mesonet/share/frost"],
["/mesonet/share/iemmaps", "/mnt/mesonet/share/iemmaps"],
["/mesonet/share/lapses", "/mnt/mesonet/share/lapses"],
["/mesonet/share/pickup", "/mnt/mesonet2/share/pickup"],
["/mesonet/share/pics", "/mnt/mesonet/share/pics"],
["/mesonet/share/present", "/mnt/mesonet/share/present"],
["/mesonet/share/usage", "/mnt/mesonet/share/usage"],
["/mesonet/share/windrose", "/mnt/mesonet2/share/windrose"],
]
def workflow(link, target):
"""Do things"""
if not os.path.isdir(target):
LOG.info("ERROR: link target: %s is not found", target)
return
if not os.path.islink(link) and os.path.isdir(link):
LOG.info("ERROR: symlink: %s is already a directory!", link)
return
if os.path.islink(link):
oldtarget = os.path.realpath(link)
if oldtarget == target:
return
os.unlink(link)
LOG.info("%s -> %s", link, target)
os.symlink(target, link)
def main():
"""Go Main"""
# Ensure some base folders exist
for mysubdir in ["share", "ARCHIVE", "data"]:
path = "/mesonet/%s" % (mysubdir,)
if not os.path.isdir(path):
os.makedirs(path)
# Quasi dynamic generation of /mesonet/ARCHIVE/data/YYYY links
if not os.path.isdir("/mesonet/ARCHIVE/data"):
os.makedirs("/mesonet/ARCHIVE/data")
for year in range(1893, 2020):
link = "/mesonet/ARCHIVE/data/%s" % (year,)
target = "/mnt/mtarchive3/ARCHIVE/data/%s" % (year,)
workflow(link, target)
for year in range(2020, datetime.date.today().year + 2):
link = "/mesonet/ARCHIVE/data/%s" % (year,)
target = "/mnt/archive00/ARCHIVE/data/%s" % (year,)
workflow(link, target)
for (link, target) in PAIRS:
workflow(link, target)
if __name__ == "__main__":
main()
| {
"repo_name": "akrherz/iem",
"path": "deployment/symlink_manager.py",
"copies": "1",
"size": "3305",
"license": "mit",
"hash": -5575315607649993000,
"line_mean": 37.8823529412,
"line_max": 68,
"alpha_frac": 0.619667171,
"autogenerated": false,
"ratio": 2.9430097951914513,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40626769661914514,
"avg_score": null,
"num_lines": null
} |
"""Attempt to rerun all feature plots to see what I broke."""
import datetime
import sys
from multiprocessing import Pool
from bs4 import BeautifulSoup
import requests
import pandas as pd
from pyiem.util import get_dbconn, logger
LOG = logger()
def run_plot(uri):
"""Run this plot"""
uri = "http://iem.local/%s" % (uri,)
try:
res = requests.get(uri, timeout=600)
soup = BeautifulSoup(res.content, "html.parser")
img = soup.find_all(id="theimage")
if not img:
return True
uri = "http://iem.local%s" % (img[0]["src"],)
res = requests.get(uri, timeout=600)
except requests.exceptions.ReadTimeout:
print("%s -> Read Timeout" % (uri[16:],))
return False
# Known failures likely due to missing data
if res.status_code == 400:
return True
if res.status_code == 504:
print("%s -> HTTP: %s (timeout)" % (uri, res.status_code))
return False
if res.status_code != 200 or res.content == "":
print(
"%s -> HTTP: %s len(content): %s"
% (uri[16:], res.status_code, len(res.content))
)
return False
return True
def workflow(entry):
"""Run our queued entry of id and format"""
sts = datetime.datetime.now()
res = run_plot(entry[1])
if res is False:
return [entry[0], entry[1], False]
ets = datetime.datetime.now()
return [entry[0], entry[1], (ets - sts).total_seconds()]
def main():
"""Do Something"""
pgconn = get_dbconn("mesosite")
cursor = pgconn.cursor()
cursor.execute(
"""
SELECT date(valid), appurl from feature
WHERE appurl ~* '/plotting/auto/'
ORDER by valid ASC
"""
)
queue = []
for row in cursor:
queue.append(row)
LOG.info("found %s features", len(queue))
timing = []
failed = []
pool = Pool(4)
for res in pool.imap_unordered(workflow, queue):
if res[2] is False:
failed.append({"i": res[0], "fmt": res[1]})
continue
timing.append({"i": res[0], "fmt": res[1], "secs": res[2]})
if not timing:
print("WARNING: no timing results found!")
return
df = pd.DataFrame(timing)
df.set_index("i", inplace=True)
df.sort_values("secs", ascending=False, inplace=True)
print(df.head(5))
if failed:
print("Failures:")
for f in failed:
print("%s %s" % (f["i"], f["fmt"]))
sys.exit(1)
if __name__ == "__main__":
main()
| {
"repo_name": "akrherz/iem",
"path": "test/run_feature_autoplots.py",
"copies": "1",
"size": "2536",
"license": "mit",
"hash": 8241654511635232000,
"line_mean": 26.2688172043,
"line_max": 67,
"alpha_frac": 0.5611198738,
"autogenerated": false,
"ratio": 3.556802244039271,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4617922117839271,
"avg_score": null,
"num_lines": null
} |
"""Attempt to tabulate single qubit gates required to generate a target 2Q gate
with a product A k A."""
from functools import reduce
from typing import Tuple, Sequence, List, NamedTuple
from dataclasses import dataclass
import numpy as np
import cirq
from cirq import value
from cirq._compat import proper_repr, proper_eq
from cirq_google.optimizers.two_qubit_gates.math_utils import (
kak_vector_infidelity,
vector_kron,
weyl_chamber_mesh,
random_qubit_unitary,
kak_vector_to_unitary,
)
_SingleQubitGatePair = Tuple[np.ndarray, np.ndarray]
class TwoQubitGateCompilation(NamedTuple):
r"""Represents a compilation of a target 2-qubit with respect to a base
gate.
This object encodes the relationship between 4x4 unitary operators
U_target ~ k_N · U_base · k_{N-1} · ... · k_1 · U_base · k_0
where U_target, U_base are 2-local and k_j are 1-local.
Attributes:
base_gate: 4x4 unitary denoting U_base above.
target_gate: 4x4 unitary denoting U_target above.
local_unitaries: Sequence of 2-tuples
$(k_{00}, k_{01}), (k_{10}, k_{11}) \ldots$ where
$k_j = k_{j0} \otimes k_{j1}$ in the product above.
Each $k_{j0}, k_{j1}$ is a 2x2 unitary.
actual_gate: 4x4 unitary denoting the right hand side above, ideally
equal to U_target.
success: Whether actual_gate is expected to be close to U_target.
"""
base_gate_unitary: np.ndarray
target_gate: np.ndarray
local_unitaries: Tuple[_SingleQubitGatePair, ...]
actual_gate: np.ndarray
success: bool
@dataclass
class GateTabulation:
"""A 2-qubit gate compiler based on precomputing/tabulating gate products."""
base_gate: np.ndarray # Base two qubit gate. (4x4 unitary)
# Sequence of KAK vectors, ideally "dense" in the Weyl chamber. Shape (N,3).
kak_vecs: np.ndarray
# Sequence of 1-local operations required to achieve a given KAK vector.
# Index j corresponds to KAK_vecs[j], and is of the form
# ( (u0[0],u1[0]), (u0[1],u1[1]), ...) where u0[k] is the kth single qubit
# unitary acting on qubit 0 (similarly for u1)
single_qubit_gates: Sequence[Sequence[_SingleQubitGatePair]]
max_expected_infidelity: float # Defined using entanglement fidelity.
summary: str # Text summarizing the results of the tabulation procedure.
# Any KAK vectors which are expected to be compilable (within infidelity
# max_expected_infidelity) using 2 or 3 base gates.
missed_points: Tuple[np.ndarray, ...]
def compile_two_qubit_gate(self, unitary: np.ndarray) -> TwoQubitGateCompilation:
r"""Compute single qubit gates required to compile a desired unitary.
Given a desired unitary U, this computes the sequence of 1-local gates
$k_j$ such that the product
$k_{n-1} A k_{n-2} A ... k_1 A k_0$
is close to U. Here A is the base_gate of the tabulation.
Args:
unitary: Unitary (U above) to compile.
Returns:
A TwoQubitGateCompilation object encoding the required local
unitaries and resulting product above.
"""
unitary = np.asarray(unitary)
kak_vec = cirq.kak_vector(unitary, check_preconditions=False)
infidelities = kak_vector_infidelity(kak_vec, self.kak_vecs, ignore_equivalent_vectors=True)
nearest_ind = infidelities.argmin()
success = infidelities[nearest_ind] < self.max_expected_infidelity
# shape (n,2,2,2)
inner_gates = np.array(self.single_qubit_gates[nearest_ind])
if inner_gates.size == 0: # Only need base gate
kR, kL, actual = _outer_locals_for_unitary(unitary, self.base_gate)
return TwoQubitGateCompilation(self.base_gate, unitary, (kR, kL), actual, success)
# reshape to operators on 2 qubits, (n,4,4)
inner_gates = vector_kron(inner_gates[..., 0, :, :], inner_gates[..., 1, :, :])
assert inner_gates.ndim == 3
inner_product = reduce(lambda a, b: self.base_gate @ b @ a, inner_gates, self.base_gate)
kR, kL, actual = _outer_locals_for_unitary(unitary, inner_product)
out = [kR]
out.extend(self.single_qubit_gates[nearest_ind])
out.append(kL)
return TwoQubitGateCompilation(self.base_gate, unitary, tuple(out), actual, success)
def _json_dict_(self):
return {
'cirq_type': self.__class__.__name__,
'base_gate': self.base_gate.tolist(),
'kak_vecs': self.kak_vecs.tolist(),
'single_qubit_gates': self.single_qubit_gates,
'max_expected_infidelity': self.max_expected_infidelity,
'summary': self.summary,
'missed_points': self.missed_points,
}
def __repr__(self) -> str:
# Construct the repr for single_qubit_gates, which is a sequence of
# sequences of tuples of NumPy arrays, which needs to be encoded with
# proper_repr.
numpy_single_qubit_gates = []
for single_qubit_gate in self.single_qubit_gates:
gate_repr = [
f"({proper_repr(pair[0])}, {proper_repr(pair[1])})" for pair in single_qubit_gate
]
numpy_single_qubit_gates.append(f"[{','.join(gate_repr)}]")
return (
f'cirq_google.optimizers.two_qubit_gates.gate_compilation'
f'.GateTabulation({proper_repr(self.base_gate)}, '
f'{proper_repr(self.kak_vecs)}, '
f'[{",".join(numpy_single_qubit_gates)}], '
f' {proper_repr(self.max_expected_infidelity)}, '
f'{proper_repr(self.summary)}, '
f'{proper_repr(self.missed_points)})'
)
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return (
np.array_equal(self.base_gate, other.base_gate)
and np.array_equal(self.kak_vecs, other.kak_vecs)
and proper_eq(self.single_qubit_gates, other.single_qubit_gates)
and self.max_expected_infidelity == other.max_expected_infidelity
and self.summary == other.summary
and np.array_equal(self.missed_points, other.missed_points)
)
@classmethod
def _from_json_dict_(
cls,
base_gate,
kak_vecs,
single_qubit_gates,
max_expected_infidelity,
summary,
missed_points,
**kwargs,
):
numpy_single_qubit_gates = []
for single_qubit_gate in single_qubit_gates:
numpy_single_qubit_gate = []
for pair in single_qubit_gate:
numpy_tuple = (np.array(pair[0]), np.array(pair[1]))
numpy_single_qubit_gate.append(numpy_tuple)
numpy_single_qubit_gates.append(numpy_single_qubit_gate)
return cls(
base_gate=np.array(base_gate),
kak_vecs=np.array(kak_vecs),
single_qubit_gates=numpy_single_qubit_gates,
max_expected_infidelity=max_expected_infidelity,
summary=summary,
missed_points=missed_points,
)
def _outer_locals_for_unitary(
target: np.ndarray, base: np.ndarray
) -> Tuple[_SingleQubitGatePair, _SingleQubitGatePair, np.ndarray]:
"""Local unitaries mapping between locally equivalent 2-local unitaries.
Finds the left and right 1-local unitaries kL, kR such that
U_target = kL @ U_base @ kR
Args:
target: The unitary to which we want to map.
base: The base unitary which maps to target.
Returns:
kR: The right 1-local unitaries in the equation above, expressed as
2-tuples of (2x2) single qubit unitaries.
kL: The left 1-local unitaries in the equation above, expressed as
2-tuples of (2x2) single qubit unitaries.
actual: The outcome of kL @ base @ kR
"""
target_decomp = cirq.kak_decomposition(target)
base_decomp = cirq.kak_decomposition(base)
# From the KAK decomposition, we have
# kLt At kRt = kL kLb Ab KRb kR
# If At=Ab, we can solve for kL and kR as
# kLt = kL kLb --> kL = kLt kLb^\dagger
# kRt = kRb kR --> kR = kRb\dagger kRt
# 0 and 1 are qubit indices.
kLt0, kLt1 = target_decomp.single_qubit_operations_after
kLb0, kLb1 = base_decomp.single_qubit_operations_after
kL = kLt0 @ kLb0.conj().T, kLt1 @ kLb1.conj().T
kRt0, kRt1 = target_decomp.single_qubit_operations_before
kRb0, kRb1 = base_decomp.single_qubit_operations_before
kR = kRb0.conj().T @ kRt0, kRb1.conj().T @ kRt1
actual = np.kron(*kL) @ base
actual = actual @ np.kron(*kR)
actual *= np.conj(target_decomp.global_phase)
return kR, kL, actual
class _TabulationStepResult(NamedTuple):
# Generated KAK vectors that are uniquely close to at least one mesh point.
kept_kaks: List[np.ndarray]
# The corresponding single qubit unitaries required to obtain the desired
# KAK vectors.
kept_cycles: List[Tuple[_SingleQubitGatePair, ...]]
def _tabulate_kak_vectors(
*,
already_tabulated: np.ndarray,
base_gate: np.ndarray,
max_dist: float,
kak_mesh: np.ndarray,
local_unitary_pairs: Sequence[_SingleQubitGatePair],
) -> _TabulationStepResult:
"""Tabulate KAK vectors from products of local unitaries with a base gate.
Args:
already_tabulated: Record of which KAK vectors have already been
tabulated. kak_mesh[i] has been calculated if i is in tabulation.
base_gate: The base 2 qubit gate used in the gate product.
max_dist: The largest allowed Pauli error between a generated 2Q
unitary and a KAK vector mesh point that it is tabulated to.
kak_mesh: Sequence of KAK vectors filling the Weyl chamber whose
nearest neighbor distance is about 2*max_error.
local_unitary_pairs: Sequence of 2-tuples of single qubit unitary
tensors, each of shape (N,2,2).
Returns:
The newly tabulated KAK vectors and the local unitaries used to generate
them. This function also updates already_tabulated to include the
indices of these vectors (within kak_mesh).
"""
shapes = {pair[0].shape for pair in local_unitary_pairs}
shapes.update({pair[0].shape for pair in local_unitary_pairs})
assert len(shapes) == 1
assert len(shapes.pop()) == 3
# Generate products
local_cycles = np.array([vector_kron(*pairs) for pairs in local_unitary_pairs])
prods = np.einsum('ab,...bc,cd', base_gate, local_cycles[0], base_gate)
for local_cycle in local_cycles[1:]:
np.einsum('ab,...bc,...cd', base_gate, local_cycle, prods, out=prods)
kak_vectors = cirq.kak_vector(prods, check_preconditions=False)
kept_kaks = []
kept_cycles = []
for ind, vec in enumerate(kak_vectors):
# The L2 distance is an upper bound to the locally invariant distance,
# but it's much faster to compute.
dists = np.sqrt(np.sum((kak_mesh - vec) ** 2, axis=-1))
close = (dists < max_dist).nonzero()[0]
assert close.shape[0] in (0, 1), f'close.shape: {close.shape}'
cycles_for_gate = tuple((k_0[ind], k_1[ind]) for k_0, k_1 in local_unitary_pairs)
# Add the vector and its cycles to the tabulation if it's not already
# tabulated.
if not np.all(already_tabulated[close]):
already_tabulated[close] = True
kept_kaks.append(vec)
kept_cycles.append(cycles_for_gate)
return _TabulationStepResult(kept_kaks, kept_cycles)
def gate_product_tabulation(
base_gate: np.ndarray,
max_infidelity: float,
*,
sample_scaling: int = 50,
allow_missed_points: bool = True,
random_state: cirq.RANDOM_STATE_OR_SEED_LIKE = None,
) -> GateTabulation:
r"""Generate a GateTabulation for a base two qubit unitary.
Args:
base_gate: The base gate of the tabulation.
max_infidelity: Sets the desired density of tabulated product unitaries.
The typical nearest neighbor Euclidean spacing (of the KAK vectors)
will be on the order of $\sqrt{max\_infidelity}$. Thus the number of
tabulated points will scale as $max\_infidelity^{-3/2}$.
sample_scaling: Relative number of random gate products to use in the
tabulation. The total number of random local unitaries scales as
~ $max\_infidelity^{-3/2} * sample\_scaling$. Must be positive.
random_state: Random state or random state seed.
allow_missed_points: If True, the tabulation is allowed to conclude
even if not all points in the Weyl chamber are expected to be
compilable using 2 or 3 base gates. Otherwise an error is raised
in this case.
Returns:
A GateTabulation object used to compile new two-qubit gates from
products of the base gate with 1-local unitaries.
"""
rng = value.parse_random_state(random_state)
assert 1 / 2 > max_infidelity > 0
spacing = np.sqrt(max_infidelity / 3)
mesh_points = weyl_chamber_mesh(spacing)
# Number of random gate products to sample over in constructing the
# tabulation. This has to be at least the number of mesh points, as
# a single product can only be associated with one mesh point.
assert sample_scaling > 0, 'Input sample_scaling must positive.'
num_mesh_points = mesh_points.shape[0]
num_samples = num_mesh_points * sample_scaling
# include the base gate itself
kak_vecs = [cirq.kak_vector(base_gate, check_preconditions=False)]
sq_cycles: List[Tuple[_SingleQubitGatePair, ...]] = [()]
# Tabulate gates that are close to gates in the mesh
u_locals_0 = random_qubit_unitary((num_samples,), rng=rng)
u_locals_1 = random_qubit_unitary((num_samples,), rng=rng)
tabulated_kak_inds = np.zeros((num_mesh_points,), dtype=bool)
tabulation_cutoff = 0.5 * spacing
out = _tabulate_kak_vectors(
already_tabulated=tabulated_kak_inds,
base_gate=base_gate,
max_dist=tabulation_cutoff,
kak_mesh=mesh_points,
local_unitary_pairs=[(u_locals_0, u_locals_1)],
)
kak_vecs.extend(out.kept_kaks)
sq_cycles.extend(out.kept_cycles)
# Will be used later for getting missing KAK vectors.
kak_vecs_single = np.array(kak_vecs)
sq_cycles_single = list(sq_cycles)
summary = (
f'Fraction of Weyl chamber reached with 2 gates'
f': {tabulated_kak_inds.sum() / num_mesh_points :.3f}'
)
# repeat for double products
# Multiply by the same local unitary in the gate product
out = _tabulate_kak_vectors(
already_tabulated=tabulated_kak_inds,
base_gate=base_gate,
max_dist=tabulation_cutoff,
kak_mesh=mesh_points,
local_unitary_pairs=[(u_locals_0, u_locals_1)] * 2,
)
kak_vecs.extend(out.kept_kaks)
sq_cycles.extend(out.kept_cycles)
summary += (
f'\nFraction of Weyl chamber reached with 2 gates and 3 gates'
f'(same single qubit): '
f'{tabulated_kak_inds.sum() / num_mesh_points :.3f}'
)
# If all KAK vectors in the mesh have been tabulated, return.
missing_vec_inds = np.logical_not(tabulated_kak_inds).nonzero()[0]
if not np.any(missing_vec_inds):
# coverage: ignore
return GateTabulation(base_gate, np.array(kak_vecs), sq_cycles, max_infidelity, summary, ())
# Run through remaining KAK vectors that don't have products and try to
# correct them
u_locals_0p = random_qubit_unitary((100,), rng=rng)
u_locals_1p = random_qubit_unitary((100,), rng=rng)
u_locals = vector_kron(u_locals_0p, u_locals_1p)
# Loop through the mesh points that have not yet been tabulated.
# Consider their nonlocal parts A and compute products of the form
# base_gate^\dagger k A
# Compare the KAK vector of any of those products to the already tabulated
# KAK vectors from single products of the form
# base_gate k0 base_gate.
# If they are close, then base_gate^\dagger k A ~ base_gate k0 base_gate
# So we may compute the outer local unitaries kL, kR such that
# base_gate^\dagger k A = kL base_gate k0 base_gate kR
# A = k^\dagger base_gate kL base_gate k0 base_gate kR
# the single-qubit unitary kL is the one we need to get the desired
# KAK vector.
missed_points = []
base_gate_dag = base_gate.conj().T
for ind in missing_vec_inds:
missing_vec = mesh_points[ind]
# Unitary A we wish to solve for
missing_unitary = kak_vector_to_unitary(missing_vec)
# Products of the from base_gate^\dagger k A
products = np.einsum('ab,...bc,cd', base_gate_dag, u_locals, missing_unitary)
# KAK vectors for these products
kaks = cirq.kak_vector(products, check_preconditions=False)
kaks = kaks[..., np.newaxis, :]
# Check if any of the product KAK vectors are close to a previously
# tabulated KAK vector
dists2 = np.sum((kaks - kak_vecs_single) ** 2, axis=-1)
min_dist_inds = np.unravel_index(dists2.argmin(), dists2.shape)
min_dist = np.sqrt(dists2[min_dist_inds])
if min_dist < tabulation_cutoff:
# If so, compute the single qubit unitary k_L such that
# base_gate^\dagger k A = kL base_gate k0 base_gate kR
# where k0 is the old (previously tabulated) single qubit unitary
# and k is one of the single qubit unitaries used above.
# Indices below are for k, k0 respectively
new_ind, old_ind = min_dist_inds
# Special case where the RHS is just base_gate (no single qubit
# gates yet applied). I.e. base_gate^\dagger k A ~ base_gate
# which implies base_gate^\dagger k A = k_L base_gate k_R
new_product = products[new_ind]
if old_ind == 0:
assert not sq_cycles_single[old_ind]
base_product = base_gate
_, kL, actual = _outer_locals_for_unitary(new_product, base_product)
# Add to the enumeration
sq_cycles.append((kL,))
else: # typical case mentioned above
assert len(sq_cycles_single[old_ind]) == 1
old_sq_cycle = sq_cycles_single[old_ind][0]
old_k = np.kron(*old_sq_cycle)
base_product = base_gate @ old_k @ base_gate
_, kL, actual = _outer_locals_for_unitary(new_product, base_product)
# Add to the enumeration
sq_cycles.append((old_sq_cycle, kL))
kak_vecs.append(cirq.kak_vector(base_gate @ actual, check_preconditions=False))
elif not allow_missed_points:
raise ValueError(f'Failed to tabulate a KAK vector near {missing_vec}')
else:
missed_points.append(missing_vec)
kak_vecs = np.array(kak_vecs)
summary += (
f'\nFraction of Weyl chamber reached with 2 gates and 3 gates '
f'(after patchup)'
f': {(len(kak_vecs) - 1) / num_mesh_points :.3f}'
)
return GateTabulation(
base_gate, kak_vecs, sq_cycles, max_infidelity, summary, tuple(missed_points)
)
| {
"repo_name": "quantumlib/Cirq",
"path": "cirq-google/cirq_google/optimizers/two_qubit_gates/gate_compilation.py",
"copies": "1",
"size": "19289",
"license": "apache-2.0",
"hash": 5839971014964772000,
"line_mean": 39.4255765199,
"line_max": 100,
"alpha_frac": 0.6343929886,
"autogenerated": false,
"ratio": 3.3576527947065995,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4492045783306599,
"avg_score": null,
"num_lines": null
} |
# Attemp using words to paragraph : (Features) Vector averaging method
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
import gensim, re
from nltk.corpus import stopwords
train = pd.read_csv( "../data/labeledTrainData.tsv", delimiter="\t", quoting=3 )
test = pd.read_csv( "../data/testData.tsv", delimiter="\t", quoting=3 )
def makeFeatureVec(words, model, num_features):
# Function to average all of the word vectors in a given
# paragraph
#
# Pre-initialize an empty numpy array (for speed)
featureVec = np.zeros((num_features,),dtype="float32")
#
nwords = 0.
#
# Index2word is a list that contains the names of the words in
# the model's vocabulary. Convert it to a set, for speed
index2word_set = set(model.index2word)
#
# Loop over each word in the review and, if it is in the model's
# vocaublary, add its feature vector to the total
for word in words:
if word in index2word_set:
nwords = nwords + 1
featureVec = np.add(featureVec,model[word])
#
# Divide the result by the number of words to get the average
featureVec = np.divide(featureVec,nwords)
return featureVec
def getAvgFeaturesVecs(reviews, model, num_features):
counter = 0
reviewFeatureVecs = np.zeros((len(reviews), num_features), dtype="float32")
for review in reviews:
if counter%1000 == 0:
print ("Review %d of %d" % (counter, len(review)))
reviewFeatureVecs[counter] = makeFeatureVec(review, model, num_features)
counter += 1
return reviewFeatureVecs
def review_to_wordlist( review, remove_stopwords=False ):
# Function to convert a document to a sequence of words,
# optionally removing stop words. Returns a list of words.
#
# 1. Remove HTML
review_text = BeautifulSoup(review).get_text()
#
# 2. Remove non-letters
review_text = re.sub("[^a-zA-Z]"," ", review_text)
#
# 3. Convert words to lower case and split them
words = review_text.lower().split()
#
# 4. Optionally remove stop words (false by default)
if remove_stopwords:
stops = set(stopwords.words("english"))
words = [w for w in words if not w in stops]
#
# 5. Return a list of words
return(words)
model = gensim.models.Word2Vec.load('300features_40minwords_10context')
num_features = 300
clean_train_reviews = []
for review in train["review"]:
clean_train_reviews.append(review_to_wordlist(review, remove_stopwords=True))
trainDataVecs = getAvgFeaturesVecs(clean_train_reviews, model, num_features)
print ("Creating average feature vecs for the test reviews")
clean_test_reviews = []
for review in test["review"]:
clean_test_reviews.append(review_to_wordlist(review, remove_stopwords=True))
testDataVecs = getAvgFeaturesVecs(clean_test_reviews, model, num_features)
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier(n_estimators=100)
print ("Fitting a random forest to labelled training data...")
forest = forest.fit(trainDataVecs, train["sentiment"])
# test and extract results
result = forest.predict(testDataVecs)
# Write and test results
output = pd.DataFrame(data={'id':test['id'], 'sentiment':result})
output.to_csv("../output/Word2Vec_AvgVectors.csv", index=False, quoting=3)
| {
"repo_name": "switchkiller/ml_imdb",
"path": "src/word2vec_vectorAveraging.py",
"copies": "1",
"size": "3350",
"license": "mit",
"hash": -629967639560238000,
"line_mean": 33.8958333333,
"line_max": 84,
"alpha_frac": 0.6919402985,
"autogenerated": false,
"ratio": 3.5828877005347595,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9751365361855606,
"avg_score": 0.0046925274358305645,
"num_lines": 96
} |
import time
class AttendanceManager(object):
prefillKeys = ["first_name", "last_name", "tags", "badge_icons"]
def __init__(self, cardStore, eventKey, onAttend):
self.cardStore = cardStore
self.onAttend = onAttend
self.attendKey = "attended_"+eventKey
def prefill(self, key):
return self.cardStore.getCard(key, self.prefillKeys)
def attend(self, key, updates):
justAttended = self.cardStore.getCard(key, [self.attendKey])
itsANewKid = justAttended[self.attendKey] is None
filteredUpdates = \
dict([(k, updates.get(k,None)) for k in self.prefillKeys])
filteredUpdates[self.attendKey] = int(time.time())
self.cardStore.updateCard(key, filteredUpdates)
if itsANewKid: self.onAttend(key)
| {
"repo_name": "novas0x2a/devhouse",
"path": "attendance.py",
"copies": "1",
"size": "2164",
"license": "mit",
"hash": 3559714082186833400,
"line_mean": 39.0740740741,
"line_max": 73,
"alpha_frac": 0.7218114603,
"autogenerated": false,
"ratio": 3.857397504456328,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5079208964756328,
"avg_score": null,
"num_lines": null
} |
"""attendance tweaks
Revision ID: 44d9d3bf2abb
Revises: 3559c36f279d
Create Date: 2015-07-21 17:28:29.177619
"""
# revision identifiers, used by Alembic.
revision = '44d9d3bf2abb'
down_revision = '3559c36f279d'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.execute("TRUNCATE TABLE committee_meeting_attendance")
op.drop_column('committee_meeting_attendance', 'alternate_member')
op.execute("CREATE TYPE meeting_attendance_enum AS ENUM ('A', 'AP', 'DE', 'L', 'LDE', 'P', 'Y')")
op.add_column('committee_meeting_attendance', sa.Column('alternate_member', sa.Boolean(), server_default=sa.text(u'false'), nullable=False))
op.add_column('committee_meeting_attendance', sa.Column('attendance', sa.Enum('A', 'AP', 'DE', 'L', 'LDE', 'P', 'Y', name='meeting_attendance_enum'), nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('committee_meeting_attendance', 'attendance')
op.execute("DROP TYPE meeting_attendance_enum")
op.drop_column('committee_meeting_attendance', 'alternate_member')
op.add_column('committee_meeting_attendance', sa.Column('alternate_member', sa.Boolean(), nullable=True))
### end Alembic commands ###
| {
"repo_name": "Code4SA/pmg-cms-2",
"path": "migrations/versions/44d9d3bf2abb_attendance_tweaks.py",
"copies": "1",
"size": "1380",
"license": "apache-2.0",
"hash": 73121187074579250,
"line_mean": 38.4285714286,
"line_max": 170,
"alpha_frac": 0.7014492754,
"autogenerated": false,
"ratio": 3.3014354066985647,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9473078921563944,
"avg_score": 0.0059611521069240355,
"num_lines": 35
} |
attendees = ['Alison', 'Carolyn', 'Hannah']
for person in attendees: #for "name of variable that we're going to use as we go through this list" in "this list"
print person
#person = 'Alison', print person
#person = 'Carolyn', print person
for cow in range(5):
print cow
print '\n\n\n\n\n'
days_of_week = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
for day in days_of_week:
print day
##print days_of_week[0]
##print days_of_week[1]
##print days_of_week[2]
for week in range(1,5): #[1,2,3,4]
print "Week {0}".format(week)
print '\n\n\n\n\n'
##for week in range(1,5): #[1,2,3,4]
## print "Week {0}".format(week)
## for day in days_of_week:
## print day
months = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
##for cake in months:
## print '\n'
## print cake
##
## for week in range(1,5): #[1,2,3,4]
## print "\tWeek {0}".format(week)
##
## for day in days_of_week:
## print "\t\t", day
print '\n\n\n\n\n'
for index, day in enumerate(days_of_week): #or, for dog, day in ...
print "Day {0}: {1}".format(index + 1, day)
| {
"repo_name": "codelikeagirlcny/python-lessons-cny",
"path": "code-exercises-etc/section_05_(loops)/ajm.loops_basic-for.20170715.py",
"copies": "1",
"size": "1246",
"license": "mit",
"hash": -5606292625204184000,
"line_mean": 14.7721518987,
"line_max": 131,
"alpha_frac": 0.5722311396,
"autogenerated": false,
"ratio": 2.5585215605749485,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.36307527001749484,
"avg_score": null,
"num_lines": null
} |
##attendees = ['Hannah', 'Kseniya', 'Sarah', 'Christy']
##print [0]
##print [1]
##print [2]
##
##number_of_attendees = len(attendees)
##print number_of_attendees
##
###append, add one item at a time to the end of the list
##attendees.append ('Alison')
##print attendees
##
##attendees_ages = []
##attendees_ages.append(29)
##attendees_ages.append(33)
##print attendees_ages
##
##attendees_ages[0] = 30
##print attendees_ages
##
##days_week = ['Monday','Tuesday']
##days_week.append('Wednesday')
##days_week.append('Thursday')
##days_week.append('Friday')
##days_week.append('Saturday')
##days_week.append('Sunday')
##print days_week
##print len(days_week)
##
###.pop by default removes last item in list
##days_week.pop() #or days_week.pop(), same thing without index
##
##print days_week
##print len(days_week)
##
##day = days_week.pop(3)
##print day
##
##print "\nHere is the months exercise:"
##months = ['January','February']
##month3 = 'March'
##months.append(month3)
##months.extend(['April','May','June','July','August','September','October','November','December'])
##months.insert (0, 'New 1st month of the year!')
##print months.pop(12)
##print months
##
###split exercise
##print "Here is the split example - making an address into a list, separating at each space."
##address = "1133 19th St NW Washington, DC 20036"
##address_as_list = address.split(" ")
##print address_as_list
##
###in keyword allows checking whether a value exists in the list
##print 'ann' in 'Shannon'
##name = 'Shannon'
##print 'ann' in name
##if 'Wednesday' in days_week:
## print "Wednesday is a day of the week!"
##print 'Frankenstein' in address
##
##print "Hello there, {0}!".format(raw_input('Enter your name.'))
##attendees = []
##attendee1_name = raw_input('Provide the first attendee name.')
##print attendee1_name
##attendees.append(attendee1_name)
##print attendees
##print len(attendees)
##print "Hello there, {0}! Please enter an address.".format(raw_input('Enter your name.'))
##
##addresses = []
##addressesNW = []
##addressesNE = []
##addressesSE = []
##addressesSW = []
##
##address1 = raw_input('Provide the first address.')
##
##addresses.append(address1)
##if ' NW ' in address1 or ' nw ' in address1:
## addressesNW.append(address1)
##elif ' NE ' in address1 or ' ne ' in address1:
## addressesNE.append(address1)
##elif ' SE ' in address1 or ' se ' in address1:
## addressesSE.append(address1)
##elif ' SW ' in address1 or 'sw ' in address1:
## addressesSW.append(address1)
##else:
## print "Your address is not located in a quadrant in Washington, DC."
##
##address2 = raw_input('Provide the second address.')
##
##addresses.append(address2)
##if ' NW ' in address2 or ' nw ' in address2:
## addressesNW.append(address2)
##elif ' NE ' in address2 or ' ne ' in address2:
## addressesNE.append(address2)
##elif ' SE ' in address2 or ' se ' in address2:
## addressesSE.append(address2)
##elif ' SW ' in address2 or ' sw ' in address2:
## addressesSW.append(address2)
##else:
## print "Your address is not located in a quadrant in Washington, DC."
##
##address3 = raw_input('Provide the third address.')
##if ' NW ' in address3 or ' nw ' in address3:
## addressesNW.append(address3)
##elif ' NE ' in address3 or ' ne ' in address3:
## addressesNE.append(address3)
##elif ' SE ' in address3 or ' se ' in address3:
## addressesSE.append(address3)
##elif ' SW ' in address3 or ' sw ' in address3:
## addressesSW.append(address3)
##else:
## print "Your address is not located in a quadrant in Washington, DC."
##
##print "There are {0} addresses in the list \"addressesNW.\" They are: {1}.".format(len(addressesNW), addressesNW)
##print "There are {0} addresses in the list \"addressesNE.\" They are: {1}.".format(len(addressesNE), addressesNE)
##print "There are {0} addresses in the list \"addressesSE.\" They are: {1}.".format(len(addressesSE), addressesSE)
##print "There are {0} addresses in the list \"addressesSW.\" They are: {1}.".format(len(addressesSW), addressesSW)
##print len(addresses)
##
###loops iterate through lists... for variable in range, "variable" is a local variable, set at each iteration in a loop
##print range(10)
##my_list = range(10)
##for cow in range(10):
## #cow = first iten in the list you're asking me to iterate through... which is 0
## #print that first
## print cow
print "Hello there, {0}! Please enter an address.".format(raw_input('Enter your name.'))
addresses = []
addressesNW = []
addressesNE = []
addressesSE = []
addressesSW = []
for address in range(3):
address = raw_input('Provide the first address.')
addresses.append(address)
if ' NW ' in address or ' nw ' in address:
addressesNW.append(address1)
elif ' NE ' in address or ' ne ' in address:
addressesNE.append(address1)
elif ' SE ' in address or ' se ' in address:
addressesSE.append(address1)
elif ' SW ' in address or 'sw ' in address:
addressesSW.append(address1)
else:
print "Your address is not located in a quadrant in Washington, DC."
print "There are {0} addresses in the list \"addressesNW.\" They are: {1}.".format(len(addressesNW), addressesNW)
print "There are {0} addresses in the list \"addressesNE.\" They are: {1}.".format(len(addressesNE), addressesNE)
print "There are {0} addresses in the list \"addressesSE.\" They are: {1}.".format(len(addressesSE), addressesSE)
print "There are {0} addresses in the list \"addressesSW.\" They are: {1}.".format(len(addressesSW), addressesSW)
print "There are {0} addresses in the list \"addresses.\"".format(len(addresses))
for month in months_in_year:
print month
for week in range(1, 5):
print "Week {0}".format(week)
for day in days_of_week:
print day
#enumerate
twitter_handle = ['@one','@two','@three']
for index, handle in enumerate(twitter_handle):
print index
print handle
at_sign_index = handle.find('@')
twitter_handles[index] = handle[at_sign_index + 1:]
#zip allows you to do a for loop to use each item in mutiple lists all at once; stops when it runs out of matches
| {
"repo_name": "hannahkwarren/CLaG-Sp2016",
"path": "code-exercises-etc/section_02_(strings)/HKW-Lesson2.py",
"copies": "1",
"size": "6087",
"license": "mit",
"hash": 8736106151340556000,
"line_mean": 32.8166666667,
"line_max": 120,
"alpha_frac": 0.6771808773,
"autogenerated": false,
"ratio": 3.102446483180428,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4279627360480428,
"avg_score": null,
"num_lines": null
} |
# ATTENTION: Add a file called settings.py with appropriate settings
# as imported below.
import tweepy
from settings import consumer_key, consumer_secret, access_token, access_token_secret, hashtag
print ("Authenticating.")
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
print ("Authenticated.")
print ("Now fetching tweets.")
tweets = tweepy.Cursor(api.search,
q=hashtag,
count=100,
result_type="recent",
include_entities=True,
lang="en").items()
print ("fetched all tweets")
# print(tweets)
print ()
counter = {}
users = {}
index_lulz = 0
for t in tweets:
if t.user.id in counter:
counter[t.user.id] += 1
else:
counter[t.user.id] = 1
users[t.user.id] = t.user
index_lulz +=1
if index_lulz>500: break
tuple_list = counter.items()
tuple_list = sorted(tuple_list, key=lambda x: x[1], reverse=True)
name_list = [(users[x[0]].name, x[1]) for x in tuple_list]
i = 1
print ("Leaderboard:")
print ("-"*80)
for name, count in name_list[:10]:
print ("{0}. {1} with {2} tweets".format(i, name, count))
i += 1
| {
"repo_name": "svineet/TweetCounter",
"path": "main.py",
"copies": "1",
"size": "1265",
"license": "mit",
"hash": 4483028826675521500,
"line_mean": 24.8163265306,
"line_max": 94,
"alpha_frac": 0.6134387352,
"autogenerated": false,
"ratio": 3.1944444444444446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9153320848574855,
"avg_score": 0.030912466213918084,
"num_lines": 49
} |
"""Attention combination strategies.
This modules implements attention combination strategies for multi-encoder
scenario when we may want to combine the hidden states of the encoders in
more complicated fashion.
Currently there are two attention combination strategies flat and hierarchical
(see paper `Attention Combination Strategies for Multi-Source
Sequence-to-Sequence Learning <https://arxiv.org/pdf/1704.06567.pdf>`_).
The combination strategies may use the sentinel mechanism which allows the
decoder not to attend to the, and extract information on its own hidden state
(see paper `Knowing when to Look: Adaptive Attention via a Visual Sentinel for
Image Captioning <https://arxiv.org/pdf/1612.01887.pdf>`_).
"""
from typing import Any, List, Union, Type, Tuple, NamedTuple
import tensorflow as tf
from neuralmonkey.dataset import Dataset
from neuralmonkey.decoding_function import (BaseAttention, AttentionLoopState,
empty_attention_loop_state)
from neuralmonkey.model.model_part import ModelPart, FeedDict
from neuralmonkey.encoders.attentive import Attentive
from neuralmonkey.checking import assert_shape
from neuralmonkey.nn.projection import linear
class EncoderWrapper(ModelPart, Attentive):
"""Wrapper doing attention combination behaving as a single encoder.
This class wraps encoders and performs the attention combination in such a
way that for the decoder, it looks like a single encoder capable to
generate a single context vector.
"""
def __init__(self,
name: str,
encoders: List[Any],
attention_type: Type,
attention_state_size: int,
use_sentinels=False,
share_attn_projections=False) -> None:
"""Initializes the encoder wrapper.
Args:
name: Name of the encoder / its scope.
encoders: List of encoders to be wrapped.
attention_type: Type of the attention combination.
attention_state_size: Dimension of the state projection of
attention energy computation.
use_sentinels: Flag whether the sentinel mechanism should be added
to the attention combination.
share_attn_projections: Flag whether the hidden state projection
should be shared for the both the energies computation and
context vector computation.
"""
ModelPart.__init__(self, name, None, None)
Attentive.__init__(self, attention_type)
self.encoders = encoders
self._attention_type = attention_type
self._attention_state_size = attention_state_size
self._use_sentinels = use_sentinels
self._share_attn_projections = share_attn_projections
self.encoded = tf.concat([e.encoded for e in encoders], 1)
def create_attention_object(self):
return self._attention_type(
self.encoders,
self._attention_state_size,
"attention_{}".format(self.name),
use_sentinels=self._use_sentinels,
share_projections=self._share_attn_projections)
def feed_dict(self, dataset: Dataset, train: bool) -> FeedDict:
return {}
@property
def _attention_tensor(self):
raise NotImplementedError("Encoder wrapper does not contain the"
" attention tensor")
@property
def _attention_mask(self):
raise NotImplementedError("Encoder wrapper does not contain the"
" attention mask")
class MultiAttention(BaseAttention):
"""Base class for attention combination."""
# pylint: disable=unused-argument
def __init__(self,
encoders: List[Attentive],
attention_state_size: int,
scope: Union[tf.VariableScope, str],
share_projections: bool = False,
use_sentinels: bool = False) -> None:
super().__init__(scope, None, attention_state_size)
self._encoders = encoders
self.attentions_in_time = [] # type: List[tf.Tensor]
self._share_projections = share_projections
self._use_sentinels = use_sentinels
with tf.variable_scope(self.scope):
self.attn_v = tf.get_variable(
"attn_v", [1, 1, self.attention_state_size],
initializer=tf.random_normal_initializer(stddev=.001))
# pylint: enable=unused-argument
def attention(self, decoder_state, decoder_prev_state,
decoder_input, _, step):
"""Get context vector for given decoder state."""
raise NotImplementedError("Abstract method")
@property
def attn_size(self):
return self.attention_state_size
def _vector_logit(self,
projected_decoder_state: tf.Tensor,
vector_value: tf.Tensor,
scope: str) -> tf.Tensor:
"""Get logit for a single vector, e.g., sentinel vector."""
assert_shape(projected_decoder_state, [-1, 1, -1])
assert_shape(vector_value, [-1, -1])
with tf.variable_scope("{}_logit".format(scope)):
vector_bias = tf.get_variable(
"vector_bias", [],
initializer=tf.constant_initializer(0.0))
proj_vector_for_logit = tf.expand_dims(
linear(vector_value, self.attention_state_size,
scope="vector_projection"), 1)
if self._share_projections:
proj_vector_for_ctx = proj_vector_for_logit
else:
proj_vector_for_ctx = tf.expand_dims(
linear(vector_value, self.attention_state_size,
scope="vector_ctx_proj"), 1)
vector_logit = tf.reduce_sum(
self.attn_v *
tf.tanh(projected_decoder_state + proj_vector_for_logit),
[2]) + vector_bias
assert_shape(vector_logit, [-1, 1])
return proj_vector_for_ctx, vector_logit
class FlatMultiAttention(MultiAttention):
"""Flat attention combination strategy.
Using this attention combination strategy, hidden states of the encoders
are first projected to the same space (different projection for different
encoders) and then we compute a joint distribution over all the hidden
states. The context vector is then a weighted sum of another / then
projection of the encoders hidden states. The sentinel vector can be added
as an additional hidden state.
See equations 8 to 10 in the Attention Combination Strategies paper.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# pylint: disable=protected-access
self._encoders_tensors = [e._attention_tensor for e in self._encoders]
self._encoders_masks = [e._attention_mask for e in self._encoders]
# pylint: enable=protected-access
for e_m in self._encoders_masks:
assert_shape(e_m, [-1, -1])
for e_t in self._encoders_tensors:
assert_shape(e_t, [-1, -1, -1])
with tf.variable_scope(self.scope):
self.encoder_projections_for_logits = \
self.get_encoder_projections("logits_projections")
self.encoder_attn_biases = [
tf.get_variable(name="attn_bias_{}".format(i),
shape=[],
initializer=tf.constant_initializer(0.))
for i in range(len(self._encoders_tensors))]
if self._share_projections:
self.encoder_projections_for_ctx = \
self.encoder_projections_for_logits
else:
self.encoder_projections_for_ctx = \
self.get_encoder_projections("context_projections")
if self._use_sentinels:
self._encoders_masks.append(
tf.ones([tf.shape(self._encoders_masks[0])[0], 1]))
self.masks_concat = tf.concat(self._encoders_masks, 1)
def initial_loop_state(self) -> AttentionLoopState:
return empty_attention_loop_state()
def get_encoder_projections(self, scope):
encoder_projections = []
with tf.variable_scope(scope):
for i, encoder_tensor in enumerate(self._encoders_tensors):
encoder_state_size = encoder_tensor.get_shape()[2].value
encoder_tensor_shape = tf.shape(encoder_tensor)
proj_matrix = tf.get_variable(
"proj_matrix_{}".format(i),
[encoder_state_size, self.attention_state_size],
initializer=tf.random_normal_initializer(stddev=0.001))
proj_bias = tf.get_variable(
"proj_bias_{}".format(i),
shape=[self.attention_state_size],
initializer=tf.zeros_initializer())
encoder_tensor_2d = tf.reshape(
encoder_tensor, [-1, encoder_state_size])
projected_2d = tf.matmul(
encoder_tensor_2d, proj_matrix) + proj_bias
assert_shape(projected_2d, [-1, self.attention_state_size])
projection = tf.reshape(
projected_2d, [encoder_tensor_shape[0],
encoder_tensor_shape[1],
self.attention_state_size])
encoder_projections.append(projection)
return encoder_projections
# pylint: disable=too-many-locals
def attention(self,
decoder_state: tf.Tensor,
decoder_prev_state: tf.Tensor,
decoder_input: tf.Tensor,
loop_state: AttentionLoopState,
step: tf.Tensor) -> Tuple[tf.Tensor, AttentionLoopState]:
with tf.variable_scope(self.scope):
projected_state = linear(decoder_state, self.attention_state_size)
projected_state = tf.expand_dims(projected_state, 1)
assert_shape(projected_state, [-1, 1, self.attention_state_size])
logits = []
for proj, bias in zip(self.encoder_projections_for_logits,
self.encoder_attn_biases):
logits.append(tf.reduce_sum(
self.attn_v * tf.tanh(projected_state + proj), [2]) + bias)
if self._use_sentinels:
sentinel_value = _sentinel(decoder_state,
decoder_prev_state,
decoder_input)
projected_sentinel, sentinel_logit = self._vector_logit(
projected_state, sentinel_value, scope="sentinel")
logits.append(sentinel_logit)
attentions = self._renorm_softmax(tf.concat(logits, 1))
self.attentions_in_time.append(attentions)
if self._use_sentinels:
tiled_encoder_projections = self._tile_encoders_for_beamsearch(
projected_sentinel)
projections_concat = tf.concat(
tiled_encoder_projections + [projected_sentinel], 1)
else:
projections_concat = tf.concat(
self.encoder_projections_for_ctx, 1)
contexts = tf.reduce_sum(
tf.expand_dims(attentions, 2) * projections_concat, [1])
next_loop_state = AttentionLoopState(
contexts=loop_state.contexts.write(step, contexts),
weights=loop_state.weights.write(step, attentions))
return contexts, next_loop_state
# pylint: enable=too-many-locals
def _tile_encoders_for_beamsearch(self, projected_sentinel):
sentinel_batch_size = tf.shape(projected_sentinel)[0]
encoders_batch_size = tf.shape(
self.encoder_projections_for_ctx[0])[0]
modulo = tf.mod(sentinel_batch_size, encoders_batch_size)
with tf.control_dependencies([tf.assert_equal(modulo, 0)]):
beam_size = tf.div(sentinel_batch_size,
encoders_batch_size)
return [tf.tile(proj, [beam_size, 1, 1])
for proj in self.encoder_projections_for_ctx]
def _renorm_softmax(self, logits):
"""Renormalized softmax wrt. attention mask."""
softmax_concat = tf.nn.softmax(logits) * self.masks_concat
norm = tf.reduce_sum(softmax_concat, 1, keep_dims=True) + 1e-8
attentions = softmax_concat / norm
return attentions
def finalize_loop(self, key: str,
last_loop_state: AttentionLoopState) -> None:
# TODO factorization of the flat distribution across encoders
# could take place here.
self.histories[key] = last_loop_state.weights.stack()
def _sentinel(state, prev_state, input_):
"""Sentinel value given the decoder state."""
with tf.variable_scope("sentinel"):
decoder_state_size = state.get_shape()[-1].value
concatenation = tf.concat([prev_state, input_], 1)
gate = tf.nn.sigmoid(linear(concatenation, decoder_state_size))
sentinel_value = gate * state
assert_shape(sentinel_value, [-1, decoder_state_size])
return sentinel_value
# pylint: disable=invalid-name
HierarchicalLoopState = NamedTuple(
"HierarchicalLoopState",
[("child_loop_states", List),
("loop_state", AttentionLoopState)])
# pylint: enable=invalid-name
class HierarchicalMultiAttention(MultiAttention):
"""Hierarchical attention combination.
Hierarchical attention combination strategy first computes the context
vector for each encoder separately using whatever attention type the
encoders have. After that it computes a second attention over the resulting
context vectors and optionally the sentinel vector.
See equations 6 and 7 in the Attention Combination Strategies paper.
"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
with tf.variable_scope(self.scope):
self._attn_objs = [
e.create_attention_object() for e in self._encoders]
def initial_loop_state(self) -> HierarchicalLoopState:
return HierarchicalLoopState(
child_loop_states=[a.initial_loop_state()
for a in self._attn_objs],
loop_state=empty_attention_loop_state())
# pylint: disable=too-many-locals
def attention(self,
decoder_state: tf.Tensor,
decoder_prev_state: tf.Tensor,
decoder_input: tf.Tensor,
loop_state: HierarchicalLoopState,
step: tf.Tensor) -> Tuple[tf.Tensor, HierarchicalLoopState]:
with tf.variable_scope(self.scope):
projected_state = linear(decoder_state, self.attention_state_size)
projected_state = tf.expand_dims(projected_state, 1)
assert_shape(projected_state, [-1, 1, self.attention_state_size])
attn_ctx_vectors, child_loop_states = zip(*[
a.attention(decoder_state, decoder_prev_state, decoder_input,
ls, step)
for a, ls in zip(self._attn_objs,
loop_state.child_loop_states)])
proj_ctxs, attn_logits = [list(t) for t in zip(*[
self._vector_logit(projected_state,
ctx_vec, scope=enc.name) # type: ignore
for ctx_vec, enc in zip(attn_ctx_vectors, self._encoders)])]
if self._use_sentinels:
sentinel_value = _sentinel(decoder_state,
decoder_prev_state,
decoder_input)
proj_sentinel, sentinel_logit = self._vector_logit(
projected_state, sentinel_value, scope="sentinel")
proj_ctxs.append(proj_sentinel)
attn_logits.append(sentinel_logit)
attention_distr = tf.nn.softmax(tf.concat(attn_logits, 1))
self.attentions_in_time.append(attention_distr)
if self._share_projections:
output_cxts = proj_ctxs
else:
output_cxts = [
tf.expand_dims(
linear(ctx_vec, self.attention_state_size,
scope="proj_attn_{}".format(
enc.name)), 1) # type: ignore
for ctx_vec, enc in zip(attn_ctx_vectors, self._encoders)]
if self._use_sentinels:
output_cxts.append(tf.expand_dims(
linear(sentinel_value, self.attention_state_size,
scope="proj_sentinel"), 1))
projections_concat = tf.concat(output_cxts, 1)
context = tf.reduce_sum(
tf.expand_dims(attention_distr, 2) * projections_concat, [1])
prev_loop_state = loop_state.loop_state
next_contexts = prev_loop_state.contexts.write(step, context)
next_weights = prev_loop_state.weights.write(step, attention_distr)
next_loop_state = AttentionLoopState(
contexts=next_contexts,
weights=next_weights)
next_hier_loop_state = HierarchicalLoopState(
child_loop_states=list(child_loop_states),
loop_state=next_loop_state)
return context, next_hier_loop_state
# pylint: enable=too-many-locals
def finalize_loop(self, key: str, last_loop_state: Any) -> None:
for c_attention, c_loop_state in zip(
self._attn_objs, last_loop_state.child_loop_states):
c_attention.finalize_loop(key, c_loop_state)
self.histories[key] = last_loop_state.loop_state.weights.stack()
| {
"repo_name": "bastings/neuralmonkey",
"path": "neuralmonkey/encoders/encoder_wrapper.py",
"copies": "1",
"size": "18095",
"license": "bsd-3-clause",
"hash": -6862849683702879000,
"line_mean": 40.0317460317,
"line_max": 79,
"alpha_frac": 0.5879524731,
"autogenerated": false,
"ratio": 4.2209003965477025,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5308852869647702,
"avg_score": null,
"num_lines": null
} |
"""AttentionDecoderCorr takes an array of correlations and outputs attention.
This code is based on Matlab code published here:
https://github.com/sinamiran/Real-Time-Tracking-of-Selective-Auditory-Attention
Based on the work:
S. Miran, S. Akram, A. Sheikhattar, J. Z. Simon, T. Zhang, and B. Babadi,
Real-Time Tracking of Selective Auditory Attention from M/EEG: A Bayesian
Filtering Approach, Frontiers in Neuroscience, Vol. 12, pp. 262, May 2018
and
S. Akram, J. Z. Simon, S. A. Shamma, and B. Babadi,
A State-Space Model for Decoding Auditory Attentional Modulation from MEG in a
Competing-Speaker Environment, 2014 Neural Information Processing Systems,
Dec 2014, Montreal, QC, Canada.
"""
import itertools
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
def plot_aad_results(decision, attention_flag=None,
decision_upper=None, decision_lower=None,
t=None, xlabel='Time (frames)',
ylabel='Prob of Speaker 1',
title='AAD Decoding Result',
linecolor='blue'):
"""Plots the results of an attention decoding experiment.
Show decision variable, along with upper and lower probability bounds.
Note: the graph is not cleared, so multiple results can be plotted on the same
axis.
Args:
decision: estimate for attention direction to speaker 1
attention_flag: ground truth indication of attended speaker: 0 or 1
decision_upper: positive delta /probability of SSD estimate
decision_lower: minus delta probability of SSD estimate
t: optional time axis, otherwise x-axis is in terms of frames
xlabel: Label for the x-axis of the graph
ylabel: Label for the y-axis of the graph
title: Title of the graph
linecolor: Color of the line, and then lightColor for the confidence bounds.
"""
if not isinstance(decision, np.ndarray):
raise TypeError('Argument decision must be an np array, not %s' %
type(decision))
if attention_flag is not None:
if not isinstance(attention_flag, np.ndarray):
raise TypeError('Argument attention_flag must be an np array, not %s' %
type(attention_flag))
elif len(decision) != len(attention_flag):
raise TypeError('Input attention_flag must match length of decision,'
' not %d and %d' % (len(decision), len(attention_flag)))
if decision_upper is not None:
if not isinstance(decision_upper, np.ndarray):
raise TypeError('Argument decision_upper must be an np array, not %s' %
type(decision_upper))
elif len(decision) != len(decision_upper):
raise TypeError('Input decision_upper must match length of decision,'
' not %d and %d' % (len(decision), len(decision_upper)))
if decision_lower is not None:
if not isinstance(decision_lower, np.ndarray):
raise TypeError('Argument decision_lower must be an np array, not %s' %
type(decision_lower))
elif len(decision) != len(decision_lower):
raise TypeError('Input decision_lower must match length of decision,'
' not %d and %d' % (len(decision), len(decision_lower)))
if t is not None:
if not isinstance(t, np.ndarray):
raise TypeError('Argument t must be an np array, not %s' %
type(t))
elif len(decision) != len(t):
raise TypeError('Input t must match length of decision,'
' not %d and %d' % (len(decision), len(t)))
else: # Default is sample based time axis.
t = np.arange(len(decision))
plt.plot(t, decision, linecolor)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
if decision_upper is not None and decision_lower is not None:
# Add blue band for the confidence interval.
plt.fill_between(t, decision_upper, decision_lower, color='light'+linecolor)
# Check to see if the attention signal has two values, indicating that
# there is at least one attention switch.
if attention_flag is not None and (np.sum(attention_flag == 0) and
np.sum(attention_flag != 0)):
# Add gray boxes from top to bottom for every other attention section.
axis_limits = plt.axis()
start_index = 0
attention_flag = list(attention_flag)
for attention_value, values in itertools.groupby(attention_flag):
duration = len(list(values))
if attention_value: # Draw a gray box where attention value is != 0
rect = patches.Rectangle((t[start_index], axis_limits[2]),
t[start_index + duration - 1] - t[start_index],
axis_limits[3]-axis_limits[2],
facecolor='lightgray',
alpha=0.5)
plt.gca().add_patch(rect)
start_index += duration
class AttentionDecoder(object):
"""Base attention decoder. Winner takes all.
"""
def attention(self, r1, r2):
return np.mean(r1) > np.mean(r2), 0, 0
def tune(self, r1, r2):
"""An optional training step for tuning parameters."""
del r1, r2
class StepAttentionDecoder(AttentionDecoder):
"""Decodes using a bit of hysteresis. Steps between 0.1 and 0.9.
Decode final decision by maintaining a state variable. It starts at 0.5
and increases by 0.1 everytime correlation 1 is bigger than correlation 2. It
decreases by 0.1 in the opposite case. The state variable is limited to be
between 0.1 and 0.9.
"""
def __init__(self):
self.state = 0.5
def attention(self, r1, r2):
"""Calculate the attention decision using simple comparison at this time.
Args:
r1: Scalar signal indicating likelihood of speaker 1.
r2: Scalar signal indicating likelihood of speaker 2.
Returns:
Boolean decision, plus empty confidence bounds (delta is zero for both
upper and lower intervals.)
"""
if np.mean(r1) > np.mean(r2):
self.state = min(0.9, self.state + 0.1)
else:
self.state = max(0.1, self.state - 0.1)
return self.state > 0.5, 0, 0
class StateSpaceAttentionDecoder(AttentionDecoder):
"""Object to contain the attention state decoding.
Will expect an incoming stream of real-time correlations computed using
some sort of decoding algorithm.
"""
def __init__(self, outer_iter, inner_iter, newton_iter, fs_corr,
forward_lag=0, backward_lag=13, offset=0.0):
"""Initializer.
Args:
outer_iter: number of iterations in the outer EM loop.
inner_iter: number of iterations in the inner EM loop.
newton_iter: number of iteration in the application of Newton's algo.
fs_corr: sample rate of the correlations (not EEG data).
forward_lag: amount of data used in frames after the prediction t to
make the predictions.
backward_lag: amount of data used in frames prior to the
prediction t to make the predictions.
offset: Temporary hack to move data away from negative #s, use positive
1 or 2 to move things away from zero.
"""
self._offset = offset
self.outer_iter = outer_iter # number of iterations in outer EM loop
self.inner_iter = inner_iter # number of iterations in inner EM loop
self.newton_iter = newton_iter # number of iterations in Newton Alg loop
# parameters of the fixed-lag sliding window
self.fs_corr = fs_corr
self.forward_lag = forward_lag
self.backward_lag = backward_lag
self.k_f = self.forward_lag
self.k_b = self.backward_lag
self.k_w = self.k_f + self.k_b + 1 # sliding window size of the decoding
print('StateSpaceAttentionDecoder init: k_f is %d, k_b is %d' %
(self.k_f, self.k_b))
# 95% confidence intervals (changed from 90%)
self.c0 = 1.96
# parameters of the inverse-gamma prior on the state-space variances
self.mean_p = 0.2
self.var_p = 5
self.a_0 = 2 + self.mean_p**2 / self.var_p
self.b_0 = self.mean_p * (self.a_0 - 1)
# Keep track of the number of times the attention function is called to
# easily track when to update the correlation vectors and state estimates
self.calls = 0
# track the computed correlations
self.r1 = []
self.r2 = []
# track the estimated parameters of the state space model, initialised as
# below.
self.z_smoothed = []
self.eta_smoothed = []
self.z_dyn = []
self.eta_dyn = []
for _ in range(self.k_w):
self.z_smoothed.append(0.0)
self.z_dyn.append(0.0)
self.eta_smoothed.append(0.3)
self.eta_dyn.append(0.0)
# set the degree of smoothness of attention over the window
self.lambda_state = 1.0
# initialise the Kalman filtering variables used for smoothing
self.z_k_k = np.zeros((self.k_w+1,))
self.sig_k_k = np.zeros((self.k_w+1,))
self.z_k_k_1 = np.zeros((self.k_w+1,))
self.sig_k_k_1 = np.zeros((self.k_w+1))
self.z_k_k_cap = np.zeros((self.k_w+1,))
self.sig_k_k_cap = np.zeros((self.k_w+1,))
self.sm = np.zeros((self.k_w,))
# Default tuned prior hyperparameters of the attended and unattended
# Log-Normal distributions
self.alpha_0 = [6.4113e+02, 4.0434e+03]
self.beta_0 = [3.7581e+02, 6.2791e+03]
self.mu_0 = [-0.3994, -1.5103]
self.rho_d = [1.7060, 0.64395]
self.mu_d = [-0.3994, -1.5103]
def tune(self, r1, r2):
"""A more user friendly name, to mirror super class' name."""
return self.tune_log_normal_priors(r1, r2)
def tune_log_normal_priors(self, r1, r2):
"""Tune the prior distributions' parameters on some initial data.
Find the MLE estimate of the log normal parameters for the attended and
unattended log-normal distributions. Source1 MUST be the attended
speaker in the training phase
Args:
r1: an initial 1d vector of correlations for the attended speaker.
r2: an initial 1d vector of correlations for the unattended speaker.
"""
# Normalize correlations between 0 and 1.
abs_r1 = np.absolute(np.asarray(r1) + self._offset)
abs_r2 = np.absolute(np.asarray(r2) + self._offset)
n = abs_r1.shape[0]
# Compute the mean and precision of the attended speaker r-values
u_a = np.sum(abs_r1)/n
v_a = np.sum((abs_r1-u_a)**2)/n
rho_a = 1/np.log(v_a/u_a**2 + 1)
mu_a = np.log(u_a) - 0.5/rho_a
# Compute the mean and precision of the unattended speaker r-values
# These equations implement the parameter estimation in the Wikipedia page
# https://en.wikipedia.org/wiki/Log-normal_distribution#Estimation_of_parameters
# using the sample mean and variances.
u_u = np.sum(abs_r2)/n
v_u = np.sum((abs_r2-u_u)**2)/n
rho_u = 1/np.log(v_u/u_u**2 + 1) # This is actually 1.0/variance
mu_u = np.log(u_u) - 0.5/rho_u
# Initialised attended and unattended Log-Normal distribution parameters
# From Behtash: mu is the mean of the log-normal and rho is the "precision
# parameter", which is the inverse of the variance. It is notationally more
# convenient to parameterize the log-normal density with the inverse of the
# variance.
#
self.rho_d = [rho_a, rho_u]
self.mu_d = [mu_a, mu_u]
self.mu_0 = [mu_a, mu_u]
# tuned prior hyperparameters of the attended and unattended Log-Normal
# distributions, these were computed by the UMD researchers on their data by
# cross-validation. They appear hardcoded in the original MATLAB code and
# seem to produce fine results.
self.alpha_0 = [6.4113e+02, 4.0434e+03]
self.beta_0 = [3.7581e+02, 6.2791e+03]
def attention(self, r1, r2):
"""Compute the attentional state after receiving two new correlations.
Returns the mean and error bounded prediction of the attentional state
given the history of the correlation values for the two speakers and the
previous attention, which are stored in AttentionDecoderCorr, along with the
two new correlation values r1, r2, corresponding to speaker 1 and 2.
Args:
r1: a new correlation value for speaker 1.
r2: a new correlation value for speaker 2.
Returns:
Tuple: (mean, lowerbound, upperbound)
"""
self.calls += 1
self.r1.append(np.abs(r1 + self._offset))
self.r2.append(np.abs(r2 + self._offset))
if self.calls >= self.k_w:
r1 = np.array(self.r1[-self.k_w:])
r2 = np.array(self.r2[-self.k_w:])
z = np.array(self.z_smoothed[-self.k_w:])
eta = np.array(self.eta_smoothed[-self.k_w:])
# Begin Outer EM loop
for _ in range(self.outer_iter):
# Calculating epsilon_k's in the current iteration (E-Step)
p_11 = (1.0/r1)*np.sqrt(self.rho_d[0])*np.exp(
-0.5*self.rho_d[0]*(np.log(r1)-self.mu_d[0])**2)
p_12 = (1.0/r1)*np.sqrt(self.rho_d[1])*np.exp(
-0.5*self.rho_d[1]*(np.log(r1)-self.mu_d[1])**2)
p_21 = (1.0/r2)*np.sqrt(self.rho_d[1])*np.exp(
-0.5*self.rho_d[1]*(np.log(r2)-self.mu_d[1])**2)
p_22 = (1.0/r2)*np.sqrt(self.rho_d[0])*np.exp(
-0.5*self.rho_d[0]*(np.log(r2)-self.mu_d[0])**2)
p = 1.0/(1.0+np.exp(-z))
ep = (p*p_11*p_21)/(p*p_11*p_21+(1.0-p)*p_12*p_22)
# distribution parameters update (M-step)
self.mu_d[0] = (np.sum(ep*np.log(r1)+(1.0-ep)*np.log(r2)) +
self.k_w*self.mu_0[0])/(2.0*self.k_w)
self.mu_d[1] = (np.sum(ep*np.log(r2)+(1.0-ep)*np.log(r1)) +
self.k_w*self.mu_0[1])/(2.0*self.k_w)
self.rho_d[0] = (2.0*self.k_w*self.alpha_0[0])/ \
(np.sum(ep*((np.log(r1)-self.mu_d[0])**2)+
(1.0-ep)*((np.log(r2)-self.mu_d[0])**2))+
self.k_w*(2.0*self.beta_0[0]+(self.mu_d[0]-self.mu_0[0])**2))
self.rho_d[1] = (2.0*self.k_w*self.alpha_0[1])/ \
(np.sum(ep*((np.log(r2)-self.mu_d[1])**2)+
(1.0-ep)*((np.log(r1)-self.mu_d[1])**2))+
self.k_w*(2.0*self.beta_0[1]+(self.mu_d[1]-self.mu_0[1])**2))
# begin inner EM loop
for _ in range(self.inner_iter):
# Filtering
for k in range(1, self.k_w+1):
self.z_k_k_1[k] = self.lambda_state*self.z_k_k[k-1]
self.sig_k_k_1[k] = self.lambda_state**2*self.sig_k_k[k-1]+eta[k-1]
# Newton's Algorithm
for _ in range(self.newton_iter):
self.z_k_k[k] = self.z_k_k[k]- \
(self.z_k_k[k] - self.z_k_k_1[k] -
self.sig_k_k_1[k]*(ep[k-1] -
np.exp(self.z_k_k[k])/
(1+np.exp(self.z_k_k[k]))))/ \
(1 + self.sig_k_k_1[k]*np.exp(self.z_k_k[k])/
((1+np.exp(self.z_k_k[k]))**2))
self.sig_k_k[k] = 1.0/ (1.0/self.sig_k_k_1[k] +
np.exp(self.z_k_k[k])/
((1+np.exp(self.z_k_k[k]))**2))
# Smoothing
self.z_k_k_cap[self.k_w] = self.z_k_k[self.k_w]
self.sig_k_k_cap[self.k_w] = self.sig_k_k[self.k_w]
# sig_k_k_cap vs sig_k_k?
for k in range(self.k_w):
self.sm[k] = self.sig_k_k[k]*self.lambda_state/self.sig_k_k_1[k+1]
self.z_k_k_cap[k] = self.z_k_k[k] + self.sm[k]*(self.z_k_k_cap[k+1]-
self.z_k_k_1[k+1])
self.sig_k_k_cap[k] = self.sig_k_k[k] + self.sm[k]**2* \
(self.sig_k_k_cap[k+1]-self.sig_k_k_1[k+1])
self.z_k_k[0] = self.z_k_k_cap[0]
self.sig_k_k[0] = self.sig_k_k_cap[0]
eta = ((self.z_k_k_cap[1:]-self.z_k_k_cap[:-1])**2+
self.sig_k_k_cap[1:]+self.sig_k_k_cap[:-1]-
2.0*self.sig_k_k_cap[1:]*self.sm+2*self.b_0)/(1+2*(self.a_0+1))
z = self.z_k_k_cap[1:]
# Updated the z's and eta's
self.z_smoothed += list(self.z_k_k_cap[1:])
self.eta_smoothed += list(eta)
self.z_k_k[0] = self.z_k_k_cap[1]
self.z_dyn.append(self.z_smoothed[-1 - self.k_f])
self.eta_dyn.append(self.eta_smoothed[-1 - self.k_f])
return (1.0/(1+np.exp(-self.z_dyn[-1])),
1.0/(1+np.exp(-self.z_dyn[-1]-self.c0*np.sqrt(self.eta_dyn[-1]))),
1.0/(1+np.exp(-self.z_dyn[-1]+self.c0*np.sqrt(self.eta_dyn[-1]))))
return (0.5, 0.5, 0.5) # No information, so return undecided.
def create_attention_decoder(type_name, window_step=100, frame_rate=100.0,
ssd_offset=0.0):
"""Creates any of the attention decoders, based on a name string.
Args:
type_name: One of wta, stepped or ssd to indicate the desired decoder type.
window_step: How many frames between the start of each window
frame_rate: The sampling rate of frames in frames/second.
ssd_offset: How much to offset the values of the input correlation, in order
to prevent correlations going negative, which doesn't fit the log-normal
model.
Returns:
The desired type of Attention Decoder.
"""
if type_name == 'wta':
return AttentionDecoder()
elif type_name == 'stepped' or type_name == 'step':
return StepAttentionDecoder()
elif type_name == 'ssd':
outer_iter = 20
inner_iter = 1
newton_iter = 10
fs_corr = window_step * float(frame_rate) / 2.0
return StateSpaceAttentionDecoder(outer_iter, inner_iter, newton_iter,
fs_corr, offset=ssd_offset)
raise ValueError('Unknown type (%s) requested from create_attention_decoder' %
type_name)
| {
"repo_name": "google/telluride_decoding",
"path": "telluride_decoding/attention_decoder.py",
"copies": "1",
"size": "17663",
"license": "apache-2.0",
"hash": 5979323538553667000,
"line_mean": 38.4263392857,
"line_max": 86,
"alpha_frac": 0.6087301138,
"autogenerated": false,
"ratio": 3.131737588652482,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9215314592410786,
"avg_score": 0.00503062200833912,
"num_lines": 448
} |
""" Attention Factory
Hacked together by / Copyright 2021 Ross Wightman
"""
import torch
from functools import partial
from .bottleneck_attn import BottleneckAttn
from .cbam import CbamModule, LightCbamModule
from .eca import EcaModule, CecaModule
from .gather_excite import GatherExcite
from .global_context import GlobalContext
from .halo_attn import HaloAttn
from .involution import Involution
from .lambda_layer import LambdaLayer
from .non_local_attn import NonLocalAttn, BatNonLocalAttn
from .selective_kernel import SelectiveKernel
from .split_attn import SplitAttn
from .squeeze_excite import SEModule, EffectiveSEModule
from .swin_attn import WindowAttention
def get_attn(attn_type):
if isinstance(attn_type, torch.nn.Module):
return attn_type
module_cls = None
if attn_type is not None:
if isinstance(attn_type, str):
attn_type = attn_type.lower()
# Lightweight attention modules (channel and/or coarse spatial).
# Typically added to existing network architecture blocks in addition to existing convolutions.
if attn_type == 'se':
module_cls = SEModule
elif attn_type == 'ese':
module_cls = EffectiveSEModule
elif attn_type == 'eca':
module_cls = EcaModule
elif attn_type == 'ecam':
module_cls = partial(EcaModule, use_mlp=True)
elif attn_type == 'ceca':
module_cls = CecaModule
elif attn_type == 'ge':
module_cls = GatherExcite
elif attn_type == 'gc':
module_cls = GlobalContext
elif attn_type == 'cbam':
module_cls = CbamModule
elif attn_type == 'lcbam':
module_cls = LightCbamModule
# Attention / attention-like modules w/ significant params
# Typically replace some of the existing workhorse convs in a network architecture.
# All of these accept a stride argument and can spatially downsample the input.
elif attn_type == 'sk':
module_cls = SelectiveKernel
elif attn_type == 'splat':
module_cls = SplitAttn
# Self-attention / attention-like modules w/ significant compute and/or params
# Typically replace some of the existing workhorse convs in a network architecture.
# All of these accept a stride argument and can spatially downsample the input.
elif attn_type == 'lambda':
return LambdaLayer
elif attn_type == 'bottleneck':
return BottleneckAttn
elif attn_type == 'halo':
return HaloAttn
elif attn_type == 'swin':
return WindowAttention
elif attn_type == 'involution':
return Involution
elif attn_type == 'nl':
module_cls = NonLocalAttn
elif attn_type == 'bat':
module_cls = BatNonLocalAttn
# Woops!
else:
assert False, "Invalid attn module (%s)" % attn_type
elif isinstance(attn_type, bool):
if attn_type:
module_cls = SEModule
else:
module_cls = attn_type
return module_cls
def create_attn(attn_type, channels, **kwargs):
module_cls = get_attn(attn_type)
if module_cls is not None:
# NOTE: it's expected the first (positional) argument of all attention layers is the # input channels
return module_cls(channels, **kwargs)
return None
| {
"repo_name": "rwightman/pytorch-image-models",
"path": "timm/models/layers/create_attn.py",
"copies": "1",
"size": "3633",
"license": "apache-2.0",
"hash": -3660059667459771400,
"line_mean": 38.064516129,
"line_max": 109,
"alpha_frac": 0.6072116708,
"autogenerated": false,
"ratio": 4.105084745762712,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007732434742719923,
"num_lines": 93
} |
"""Attention mechanisms.
This module defines the interface of attention mechanisms and a few
concrete implementations. For a gentle introduction and usage examples see
the tutorial TODO.
An attention mechanism decides to what part of the input to pay attention.
It is typically used as a component of a recurrent network, though one can
imagine it used in other conditions as well. When the input is big and has
certain structure, for instance when it is sequence or an image, an
attention mechanism can be applied to extract only information which is
relevant for the network in its current state.
For the purpose of documentation clarity, we fix the following terminology
in this file:
* *network* is the network, typically a recurrent one, which
uses the attention mechanism.
* The network has *states*. Using this word in plural might seem weird, but
some recurrent networks like :class:`~blocks.bricks.recurrent.LSTM` do
have several states.
* The big structured input, to which the attention mechanism is applied,
is called the *attended*. When it has variable structure, e.g. a sequence
of variable length, there might be a *mask* associated with it.
* The information extracted by the attention from the attended is called
*glimpse*, more specifically *glimpses* because there might be a few
pieces of this information.
Using this terminology, the attention mechanism computes glimpses
given the states of the network and the attended.
An example: in the machine translation network from [BCB]_ the attended is
a sequence of so-called annotations, that is states of a bidirectional
network that was driven by word embeddings of the source sentence. The
attention mechanism assigns weights to the annotations. The weighted sum of
the annotations is further used by the translation network to predict the
next word of the generated translation. The weights and the weighted sum
are the glimpses. A generalized attention mechanism for this paper is
represented here as :class:`SequenceContentAttention`.
"""
from abc import ABCMeta, abstractmethod
from theano import tensor
from six import add_metaclass
from blocks.bricks import (Brick, Initializable, Sequence,
Feedforward, Linear, Tanh)
from blocks.bricks.base import lazy, application
from blocks.bricks.parallel import Parallel, Distribute
from blocks.bricks.recurrent import recurrent, BaseRecurrent
from blocks.utils import dict_union, dict_subset, pack
class AbstractAttention(Brick):
"""The common interface for attention bricks.
First, see the module-level docstring for terminology.
A generic attention mechanism functions as follows. Its inputs are the
states of the network and the attended. Given these two it produces
so-called *glimpses*, that is it extracts information from the attended
which is necessary for the network in its current states
For computational reasons we separate the process described above into
two stages:
1. The preprocessing stage, :meth:`preprocess`, includes computation
that do not involve the state. Those can be often performed in advance.
The outcome of this stage is called *preprocessed_attended*.
2. The main stage, :meth:`take_glimpses`, includes all the rest.
When an attention mechanism is applied sequentially, some glimpses from
the previous step might be necessary to compute the new ones. A
typical example for that is when the focus position from the previous
step is required. In such cases :meth:`take_glimpses` should specify
such need in its interface (its docstring explains how to do that). In
addition :meth:`initial_glimpses` should specify some sensible
initialization for the glimpses to be carried over.
.. todo::
Only single attended is currently allowed.
:meth:`preprocess` and :meth:`initial_glimpses` might end up
needing masks, which are currently not provided for them.
Parameters
----------
state_names : list
The names of the network states.
state_dims : list
The state dimensions corresponding to `state_names`.
attended_dim : int
The dimension of the attended.
Attributes
----------
state_names : list
state_dims : list
attended_dim : int
"""
@lazy(allocation=['state_names', 'state_dims', 'attended_dim'])
def __init__(self, state_names, state_dims, attended_dim, **kwargs):
self.state_names = state_names
self.state_dims = state_dims
self.attended_dim = attended_dim
super(AbstractAttention, self).__init__(**kwargs)
@application(inputs=['attended'], outputs=['preprocessed_attended'])
def preprocess(self, attended):
"""Perform the preprocessing of the attended.
Stage 1 of the attention mechanism, see :class:`AbstractAttention`
docstring for an explanation of stages. The default implementation
simply returns attended.
Parameters
----------
attended : :class:`~theano.Variable`
The attended.
Returns
-------
preprocessed_attended : :class:`~theano.Variable`
The preprocessed attended.
"""
return attended
@abstractmethod
def take_glimpses(self, attended, preprocessed_attended=None,
attended_mask=None, **kwargs):
r"""Extract glimpses from the attended given the current states.
Stage 2 of the attention mechanism, see :class:`AbstractAttention`
for an explanation of stages. If `preprocessed_attended` is not
given, should trigger the stage 1.
This application method *must* declare its inputs and outputs.
The glimpses to be carried over are identified by their presence
in both inputs and outputs list. The attended *must* be the first
input, the preprocessed attended *must* be the second one.
Parameters
----------
attended : :class:`~theano.Variable`
The attended.
preprocessed_attended : :class:`~theano.Variable`, optional
The preprocessed attended computed by :meth:`preprocess`. When
not given, :meth:`preprocess` should be called.
attended_mask : :class:`~theano.Variable`, optional
The mask for the attended. This is required in the case of
padded structured output, e.g. when a number of sequences are
force to be the same length. The mask identifies position of
the `attended` that actually contain information.
\*\*kwargs : dict
Includes the states and the glimpses to be carried over from
the previous step in the case when the attention mechanism is
applied sequentially.
"""
pass
@abstractmethod
def initial_glimpses(self, batch_size, attended):
"""Return sensible initial values for carried over glimpses.
Parameters
----------
batch_size : int or :class:`~theano.Variable`
The batch size.
attended : :class:`~theano.Variable`
The attended.
Returns
-------
initial_glimpses : list of :class:`~theano.Variable`
The initial values for the requested glimpses. These might
simply consist of zeros or be somehow extracted from
the attended.
"""
pass
def get_dim(self, name):
if name in ['attended', 'preprocessed_attended']:
return self.attended_dim
if name in ['attended_mask']:
return 0
return super(AbstractAttention, self).get_dim(name)
class GenericSequenceAttention(AbstractAttention):
"""Logic common for sequence attention mechanisms."""
@application
def compute_weights(self, energies, attended_mask):
"""Compute weights from energies in softmax-like fashion.
.. todo ::
Use :class:`~blocks.bricks.Softmax`.
Parameters
----------
energies : :class:`~theano.Variable`
The energies. Must be of the same shape as the mask.
attended_mask : :class:`~theano.Variable`
The mask for the attended. The index in the sequence must be
the first dimension.
Returns
-------
weights : :class:`~theano.Variable`
Summing to 1 non-negative weights of the same shape
as `energies`.
"""
# Stabilize energies first and then exponentiate
energies = energies - energies.max(axis=0)
unnormalized_weights = tensor.exp(energies)
if attended_mask:
unnormalized_weights *= attended_mask
# If mask consists of all zeros use 1 as the normalization coefficient
normalization = (unnormalized_weights.sum(axis=0) +
tensor.all(1 - attended_mask, axis=0))
return unnormalized_weights / normalization
@application
def compute_weighted_averages(self, weights, attended):
"""Compute weighted averages of the attended sequence vectors.
Parameters
----------
weights : :class:`~theano.Variable`
The weights. The shape must be equal to the attended shape
without the last dimension.
attended : :class:`~theano.Variable`
The attended. The index in the sequence must be the first
dimension.
Returns
-------
weighted_averages : :class:`~theano.Variable`
The weighted averages of the attended elements. The shape
is equal to the attended shape with the first dimension
dropped.
"""
return (tensor.shape_padright(weights) * attended).sum(axis=0)
class SequenceContentAttention(GenericSequenceAttention, Initializable):
"""Attention mechanism that looks for relevant content in a sequence.
This is the attention mechanism used in [BCB]_. The idea in a nutshell:
1. The states and the sequence are transformed independently,
2. The transformed states are summed with every transformed sequence
element to obtain *match vectors*,
3. A match vector is transformed into a single number interpreted as
*energy*,
4. Energies are normalized in softmax-like fashion. The resulting
summing to one weights are called *attention weights*,
5. Weighted average of the sequence elements with attention weights
is computed.
In terms of the :class:`AbstractAttention` documentation, the sequence
is the attended. The weighted averages from 5 and the attention
weights from 4 form the set of glimpses produced by this attention
mechanism.
Parameters
----------
state_names : list of str
The names of the network states.
attended_dim : int
The dimension of the sequence elements.
match_dim : int
The dimension of the match vector.
state_transformer : :class:`.Brick`
A prototype for state transformations. If ``None``,
a linear transformation is used.
attended_transformer : :class:`.Feedforward`
The transformation to be applied to the sequence. If ``None`` an
affine transformation is used.
energy_computer : :class:`.Feedforward`
Computes energy from the match vector. If ``None``, an affine
transformations preceeded by :math:`tanh` is used.
Notes
-----
See :class:`.Initializable` for initialization parameters.
.. [BCB] Dzmitry Bahdanau, Kyunghyun Cho and Yoshua Bengio. Neural
Machine Translation by Jointly Learning to Align and Translate.
"""
@lazy(allocation=['match_dim'])
def __init__(self, match_dim, state_transformer=None,
attended_transformer=None, energy_computer=None, **kwargs):
super(SequenceContentAttention, self).__init__(**kwargs)
if not state_transformer:
state_transformer = Linear(use_bias=False)
self.match_dim = match_dim
self.state_transformer = state_transformer
self.state_transformers = Parallel(input_names=self.state_names,
prototype=state_transformer,
name="state_trans")
if not attended_transformer:
attended_transformer = Linear(name="preprocess")
if not energy_computer:
energy_computer = ShallowEnergyComputer(name="energy_comp")
self.attended_transformer = attended_transformer
self.energy_computer = energy_computer
self.children = [self.state_transformers, attended_transformer,
energy_computer]
def _push_allocation_config(self):
self.state_transformers.input_dims = self.state_dims
self.state_transformers.output_dims = [self.match_dim
for name in self.state_names]
self.attended_transformer.input_dim = self.attended_dim
self.attended_transformer.output_dim = self.match_dim
self.energy_computer.input_dim = self.match_dim
self.energy_computer.output_dim = 1
@application
def compute_energies(self, attended, preprocessed_attended, states):
if not preprocessed_attended:
preprocessed_attended = self.preprocess(attended)
transformed_states = self.state_transformers.apply(as_dict=True,
**states)
# Broadcasting of transformed states should be done automatically
match_vectors = sum(transformed_states.values(),
preprocessed_attended)
energies = self.energy_computer.apply(match_vectors).reshape(
match_vectors.shape[:-1], ndim=match_vectors.ndim - 1)
return energies
@application(outputs=['weighted_averages', 'weights'])
def take_glimpses(self, attended, preprocessed_attended=None,
attended_mask=None, **states):
r"""Compute attention weights and produce glimpses.
Parameters
----------
attended : :class:`~tensor.TensorVariable`
The sequence, time is the 1-st dimension.
preprocessed_attended : :class:`~tensor.TensorVariable`
The preprocessed sequence. If ``None``, is computed by calling
:meth:`preprocess`.
attended_mask : :class:`~tensor.TensorVariable`
A 0/1 mask specifying available data. 0 means that the
corresponding sequence element is fake.
\*\*states
The states of the network.
Returns
-------
weighted_averages : :class:`~theano.Variable`
Linear combinations of sequence elements with the attention
weights.
weights : :class:`~theano.Variable`
The attention weights. The first dimension is batch, the second
is time.
"""
energies = self.compute_energies(attended, preprocessed_attended,
states)
weights = self.compute_weights(energies, attended_mask)
weighted_averages = self.compute_weighted_averages(weights, attended)
return weighted_averages, weights.T
@take_glimpses.property('inputs')
def take_glimpses_inputs(self):
return (['attended', 'preprocessed_attended', 'attended_mask'] +
self.state_names)
@application(outputs=['weighted_averages', 'weights'])
def initial_glimpses(self, batch_size, attended):
return [tensor.zeros((batch_size, self.attended_dim)),
tensor.zeros((batch_size, attended.shape[0]))]
@application(inputs=['attended'], outputs=['preprocessed_attended'])
def preprocess(self, attended):
"""Preprocess the sequence for computing attention weights.
Parameters
----------
attended : :class:`~tensor.TensorVariable`
The attended sequence, time is the 1-st dimension.
"""
return self.attended_transformer.apply(attended)
def get_dim(self, name):
if name in ['weighted_averages']:
return self.attended_dim
if name in ['weights']:
return 0
return super(SequenceContentAttention, self).get_dim(name)
class ShallowEnergyComputer(Sequence, Initializable, Feedforward):
"""A simple energy computer: first tanh, then weighted sum."""
@lazy()
def __init__(self, use_bias=False, **kwargs):
super(ShallowEnergyComputer, self).__init__(
[Tanh().apply, Linear(use_bias=use_bias).apply], **kwargs)
@property
def input_dim(self):
return self.children[1].input_dim
@input_dim.setter
def input_dim(self, value):
self.children[1].input_dim = value
@property
def output_dim(self):
return self.children[1].output_dim
@output_dim.setter
def output_dim(self, value):
self.children[1].output_dim = value
@add_metaclass(ABCMeta)
class AbstractAttentionRecurrent(BaseRecurrent):
"""The interface for attention-equipped recurrent transitions.
When a recurrent network is equipped with an attention mechanism its
transition typically consists of two steps: (1) the glimpses are taken
by the attention mechanism and (2) the next states are computed using
the current states and the glimpses. It is required for certain
usecases (such as sequence generator) that apart from a do-it-all
recurrent application method interfaces for the first step and
the second steps of the transition are provided.
"""
@abstractmethod
def apply(self, **kwargs):
"""Compute next states taking glimpses on the way."""
pass
@abstractmethod
def take_glimpses(self, **kwargs):
"""Compute glimpses given the current states."""
pass
@abstractmethod
def compute_states(self, **kwargs):
"""Compute next states given current states and glimpses."""
pass
class AttentionRecurrent(AbstractAttentionRecurrent, Initializable):
"""Combines an attention mechanism and a recurrent transition.
This brick equips a recurrent transition with an attention mechanism.
In order to do this two more contexts are added: one to be attended and
a mask for it. It is also possible to use the contexts of the given
recurrent transition for these purposes and not add any new ones,
see `add_context` parameter.
At the beginning of each step attention mechanism produces glimpses;
these glimpses together with the current states are used to compute the
next state and finish the transition. In some cases glimpses from the
previous steps are also necessary for the attention mechanism, e.g.
in order to focus on an area close to the one from the previous step.
This is also supported: such glimpses become states of the new
transition.
To let the user control the way glimpses are used, this brick also
takes a "distribute" brick as parameter that distributes the
information from glimpses across the sequential inputs of the wrapped
recurrent transition.
Parameters
----------
transition : :class:`.BaseRecurrent`
The recurrent transition.
attention : :class:`.Brick`
The attention mechanism.
distribute : :class:`.Brick`, optional
Distributes the information from glimpses across the input
sequences of the transition. By default a :class:`.Distribute` is
used, and those inputs containing the "mask" substring in their
name are not affected.
add_contexts : bool, optional
If ``True``, new contexts for the attended and the attended mask
are added to this transition, otherwise existing contexts of the
wrapped transition are used. ``True`` by default.
attended_name : str
The name of the attended context. If ``None``, "attended"
or the first context of the recurrent transition is used
depending on the value of `add_contents` flag.
attended_mask_name : str
The name of the mask for the attended context. If ``None``,
"attended_mask" or the second context of the recurrent transition
is used depending on the value of `add_contents` flag.
Notes
-----
See :class:`.Initializable` for initialization parameters.
Wrapping your recurrent brick with this class makes all the
states mandatory. If you feel this is a limitation for you, try
to make it better! This restriction does not apply to sequences
and contexts: those keep being as optional as they were for
your brick.
Those coming to Blocks from Groundhog might recognize that this is
a `RecurrentLayerWithSearch`, but on steroids :)
"""
def __init__(self, transition, attention, distribute=None,
add_contexts=True,
attended_name=None, attended_mask_name=None,
**kwargs):
super(AttentionRecurrent, self).__init__(**kwargs)
self._sequence_names = list(transition.apply.sequences)
self._state_names = list(transition.apply.states)
self._context_names = list(transition.apply.contexts)
if add_contexts:
if not attended_name:
attended_name = 'attended'
if not attended_mask_name:
attended_mask_name = 'attended_mask'
self._context_names += [attended_name, attended_mask_name]
else:
attended_name = self._context_names[0]
attended_mask_name = self._context_names[1]
if not distribute:
normal_inputs = [name for name in self._sequence_names
if 'mask' not in name]
distribute = Distribute(normal_inputs,
attention.take_glimpses.outputs[0])
self.transition = transition
self.attention = attention
self.distribute = distribute
self.add_contexts = add_contexts
self.attended_name = attended_name
self.attended_mask_name = attended_mask_name
self.preprocessed_attended_name = "preprocessed_" + self.attended_name
self._glimpse_names = self.attention.take_glimpses.outputs
# We need to determine which glimpses are fed back.
# Currently we extract it from `take_glimpses` signature.
self.previous_glimpses_needed = [
name for name in self._glimpse_names
if name in self.attention.take_glimpses.inputs]
self.children = [self.transition, self.attention, self.distribute]
def _push_allocation_config(self):
self.attention.state_dims = self.transition.get_dims(
self.attention.state_names)
self.attention.attended_dim = self.get_dim(self.attended_name)
self.distribute.source_dim = self.attention.get_dim(
self.distribute.source_name)
self.distribute.target_dims = self.transition.get_dims(
self.distribute.target_names)
@application
def take_glimpses(self, **kwargs):
r"""Compute glimpses with the attention mechanism.
A thin wrapper over `self.attention.take_glimpses`: takes care
of choosing and renaming the necessary arguments.
Parameters
----------
\*\*kwargs
Must contain the attended, previous step states and glimpses.
Can optionaly contain the attended mask and the preprocessed
attended.
Returns
-------
glimpses : list of :class:`~tensor.TensorVariable`
Current step glimpses.
"""
states = dict_subset(kwargs, self._state_names, pop=True)
glimpses = dict_subset(kwargs, self._glimpse_names, pop=True)
glimpses_needed = dict_subset(glimpses, self.previous_glimpses_needed)
result = self.attention.take_glimpses(
kwargs.pop(self.attended_name),
kwargs.pop(self.preprocessed_attended_name, None),
kwargs.pop(self.attended_mask_name, None),
**dict_union(states, glimpses_needed))
# At this point kwargs may contain additional items.
# e.g. AttentionRecurrent.transition.apply.contexts
return result
@take_glimpses.property('outputs')
def take_glimpses_outputs(self):
return self._glimpse_names
@application
def compute_states(self, **kwargs):
r"""Compute current states when glimpses have already been computed.
Combines an application of the `distribute` that alter the
sequential inputs of the wrapped transition and an application of
the wrapped transition. All unknown keyword arguments go to
the wrapped transition.
Parameters
----------
\*\*kwargs
Should contain everything what `self.transition` needs
and in addition the current glimpses.
Returns
-------
current_states : list of :class:`~tensor.TensorVariable`
Current states computed by `self.transition`.
"""
# make sure we are not popping the mask
normal_inputs = [name for name in self._sequence_names
if 'mask' not in name]
sequences = dict_subset(kwargs, normal_inputs, pop=True)
glimpses = dict_subset(kwargs, self._glimpse_names, pop=True)
if self.add_contexts:
kwargs.pop(self.attended_name)
# attended_mask_name can be optional
kwargs.pop(self.attended_mask_name, None)
sequences.update(self.distribute.apply(
as_dict=True, **dict_subset(dict_union(sequences, glimpses),
self.distribute.apply.inputs)))
current_states = self.transition.apply(
iterate=False, as_list=True,
**dict_union(sequences, kwargs))
return current_states
@compute_states.property('outputs')
def compute_states_outputs(self):
return self._state_names
@recurrent
def do_apply(self, **kwargs):
r"""Process a sequence attending the attended context every step.
In addition to the original sequence this method also requires
its preprocessed version, the one computed by the `preprocess`
method of the attention mechanism. Unknown keyword arguments
are passed to the wrapped transition.
Parameters
----------
\*\*kwargs
Should contain current inputs, previous step states, contexts,
the preprocessed attended context, previous step glimpses.
Returns
-------
outputs : list of :class:`~tensor.TensorVariable`
The current step states and glimpses.
"""
attended = kwargs[self.attended_name]
preprocessed_attended = kwargs.pop(self.preprocessed_attended_name)
attended_mask = kwargs.get(self.attended_mask_name)
sequences = dict_subset(kwargs, self._sequence_names, pop=True,
must_have=False)
states = dict_subset(kwargs, self._state_names, pop=True)
glimpses = dict_subset(kwargs, self._glimpse_names, pop=True)
current_glimpses = self.take_glimpses(
as_dict=True,
**dict_union(
states, glimpses,
{self.attended_name: attended,
self.attended_mask_name: attended_mask,
self.preprocessed_attended_name: preprocessed_attended}))
current_states = self.compute_states(
as_list=True,
**dict_union(sequences, states, current_glimpses, kwargs))
return current_states + list(current_glimpses.values())
@do_apply.property('sequences')
def do_apply_sequences(self):
return self._sequence_names
@do_apply.property('contexts')
def do_apply_contexts(self):
return self._context_names + [self.preprocessed_attended_name]
@do_apply.property('states')
def do_apply_states(self):
return self._state_names + self._glimpse_names
@do_apply.property('outputs')
def do_apply_outputs(self):
return self._state_names + self._glimpse_names
@application
def apply(self, **kwargs):
"""Preprocess a sequence attending the attended context at every step.
Preprocesses the attended context and runs :meth:`do_apply`. See
:meth:`do_apply` documentation for further information.
"""
preprocessed_attended = self.attention.preprocess(
kwargs[self.attended_name])
return self.do_apply(
**dict_union(kwargs,
{self.preprocessed_attended_name:
preprocessed_attended}))
@apply.delegate
def apply_delegate(self):
# TODO: Nice interface for this trick?
return self.do_apply.__get__(self, None)
@apply.property('contexts')
def apply_contexts(self):
return self._context_names
@application
def initial_states(self, batch_size, **kwargs):
return (pack(self.transition.initial_states(
batch_size, **kwargs)) +
pack(self.attention.initial_glimpses(
batch_size, kwargs[self.attended_name])))
@initial_states.property('outputs')
def initial_states_outputs(self):
return self.do_apply.states
def get_dim(self, name):
if name in self._glimpse_names:
return self.attention.get_dim(name)
if name == self.preprocessed_attended_name:
(original_name,) = self.attention.preprocess.outputs
return self.attention.get_dim(original_name)
if self.add_contexts:
if name == self.attended_name:
return self.attention.get_dim(
self.attention.take_glimpses.inputs[0])
if name == self.attended_mask_name:
return 0
return self.transition.get_dim(name)
| {
"repo_name": "nke001/attention-lvcsr",
"path": "libs/blocks/blocks/bricks/attention.py",
"copies": "1",
"size": "30120",
"license": "mit",
"hash": 5477704904373049000,
"line_mean": 38.4757536042,
"line_max": 78,
"alpha_frac": 0.6508300133,
"autogenerated": false,
"ratio": 4.386194844910441,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 763
} |
# ATTENTION: The code in this file is highly EXPERIMENTAL.
# Adventurous users should note that the APIs will probably change.
"""onnx optimizer
This enables users to optimize their models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import onnx
import onnx.onnx_cpp2py_export.optimizer as C
from onnx import ModelProto
from typing import Text, Sequence
"""Apply the optimization on the serialized ModelProto.
Arguments:
input (ModelProto): model
names (list of string): list of optimization names
Return:
return (ModelProto) optimized model
Supported pass names:
-- nop
-- eliminate_identity
-- eliminate_nop_transpose
-- eliminate_unused_initializer
-- fuse_consecutive_squeezes
-- fuse_consecutive_transposes
-- fuse_add_bias_into_conv
-- fuse_transpose_into_gemm
"""
get_available_passes = C.get_available_passes
def optimize(model, passes=[]): # type: (ModelProto, Sequence[Text]) -> ModelProto
if len(passes) == 0:
passes = ['eliminate_nop_transpose',
'fuse_consecutive_transposes',
'fuse_transpose_into_gemm']
if not isinstance(model, ModelProto):
raise ValueError('Optimizer only accepts ModelProto, incorrect type: {}'.format(type(model)))
model_str = model.SerializeToString()
optimized_model_str = C.optimize(model_str, passes)
return onnx.load_from_string(optimized_model_str)
| {
"repo_name": "mlperf/training_results_v0.6",
"path": "Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/onnx-tensorrt/third_party/onnx/onnx/optimizer.py",
"copies": "1",
"size": "1526",
"license": "apache-2.0",
"hash": 4766287612232548000,
"line_mean": 28.9215686275,
"line_max": 101,
"alpha_frac": 0.7090432503,
"autogenerated": false,
"ratio": 3.777227722772277,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9984142670770965,
"avg_score": 0.0004256604602625364,
"num_lines": 51
} |
# at terminal window, run:
# python track_uscis_rd.py <rd_process_center argument> <rd_date_start argument> <rd_date_end argument> <rd_form argument>
import re
import mechanize
from lxml import html
from bs4 import BeautifulSoup
import os
import sys
from termcolor import colored
import glob
from time import gmtime, strftime
import datetime
import copy
rd_url = 'http://www.mycasetracker.org/index.php?dest=rd'
rd_process_center = "SRC"
rd_form = "I131"
rd_date_start = "2016-10-03"
rd_date_end = "2016-10-08"
received_key = 'Case Was Received'
approved_key = 'Case Was Approved'
log_file_dir = '/logs'
back_look = 1
if not os.path.exists(os.getcwd()+log_file_dir):
os.system("mkdir logs")
if len(sys.argv) > 2:
if sys.argv[2] is not None:
rd_process_center = sys.argv[2]
if len(sys.argv) > 3:
if sys.argv[3] is not None:
rd_date = sys.argv[3]
if len(sys.argv) > 4:
if sys.argv[4] is not None:
rd_form = sys.argv[4]
day_length = int(rd_date_end[-2:]) - int(rd_date_start[-2:])
rd_dates = []
new_info = []
for i in range (0,day_length):
new_day = int(rd_date_start[-2:]) + i
if new_day > 9:
rd_date = rd_date_start[:-2] + "0" + str(new_day)
else:
rd_date = rd_date_start[:-2] + str(new_day)
rd_dates.append(rd_date)
br = mechanize.Browser()
br.open(rd_url)
for form in br.forms():
if "class" in form.attrs.keys():
br.form = form
br.form.find_control("in_Form").value=[rd_form]
br.form.find_control("in_RD").value=rd_date
response = br.submit()
# tree = html.fromstring(response.read())
rd_soup = BeautifulSoup(response.read(),'html.parser')
rd_soup_tables = (rd_soup.find("div", class_="ym-cbox ym-clearfix")).find_all("table")
# fetch new info
for i,table in enumerate(rd_soup_tables):
table_head = table.find("thead")
if table_head.find("b") is not None:
process_center = table_head.find("b").text.split(" ")[1]
if process_center == rd_process_center:
general_info = []
detailed_info = {}
for td_tag in table.find("tr").find_all("td"):
content = td_tag.find("b").text
general_info.append(content)
detail_table = rd_soup_tables[i+1]
for table_row in detail_table.find_all("tr"):
this_row_info = table_row.find_all("td")
info_type = this_row_info[0].text
number = this_row_info[1].text
percent = this_row_info[2].text
start_date = this_row_info[3].text
end_date = this_row_info[4].text
detailed_info[info_type] = [number, percent, start_date, end_date]
new_info.append([detailed_info[received_key],detailed_info[approved_key]])
# read the most recent file info from log file
log_files = glob.glob(os.getcwd()+log_file_dir+"/*.log")
last_time_date = None
if len(log_files) > 0:
# most_recent_file = min(glob.iglob(os.getcwd()+log_file_dir+'/*.log'),key=os.path.getctime)
# sort log_files:
log_files.sort(key=lambda x: os.stat(os.path.join(log_file_dir,x)).st_mtime)
log_files = log_files[::-1]
for log_file in log_files:
this_file_date = datetime.datetime.fromtimestamp(\
os.path.getctime(log_file))
if this_file_date.year == datetime.datetime.now().year \
and this_file_date.month == datetime.datetime.now().month \
and this_file_date.day == datetime.datetime.now().day:
continue
elif datetime.datetime.now().day - this_file_date.day >= back_look or \
this_file_date.day - datetime.datetime.now().day >= back_look:
last_time_date = this_file_date
most_recent_file = log_file
break
old_dates = None
old_info = None
with open(most_recent_file) as f:
lines = f.readlines()
old_dates = []
old_info = []
for line in lines:
line_content = line.split(",")
old_dates.append(line_content[0])
old_info.append(line_content[1:])
f.close()
# save new info into log file
# date, number of received(percentage,date range),number of approved (percentage,date range)
new_file_path = os.getcwd()+log_file_dir+'/'+"date_range"+rd_date_start+"_"+rd_date_end+\
strftime("_inqury_%Y_%m_%d-%H_%M_%S", gmtime())+".log"
with open(new_file_path,'w') as f:
for t,date in enumerate(rd_dates):
f.write(date+", "+new_info[t][0][0]+"("+new_info[t][0][1]+";"+new_info[t][0][2]+";"+\
new_info[t][0][3]+")" + ", " +new_info[t][1][0]+"("+new_info[t][1][1]+";"+\
new_info[t][1][2]+";"+new_info[t][1][3]+")"+"\n")
f.close()
#display content
print colored('#################################### USCIS Tracker RD ####################################','red',attrs=['bold'])
print colored('# Status of current inquiry:'+strftime("_inqury_%Y_%m_%d-%H_%M_%S", gmtime()),\
'blue',attrs=['bold'])
for rd_time, new_info_entry in enumerate(new_info):
print ''
print colored('# RD: '+rd_dates[rd_time]+' Approved: '+new_info_entry[1][0]+'('+new_info_entry[1][1]+'%'+' in date range: '+ \
new_info_entry[1][2]+'--'+new_info_entry[1][3]+')', 'white', attrs=['bold','underline'])
print colored('# ------------------ Received: '+new_info_entry[0][0]+'('+new_info_entry[0][1]+'%'+' in date range: '+ \
new_info_entry[0][2]+'--'+new_info_entry[0][3]+')', 'yellow', attrs=['bold'])
print ''
print '------------------------------------------------------------------------------------------'
print colored('# Status of previous inquiry on '+str(last_time_date.year)+"-"+str(last_time_date.month)+"-"+str(last_time_date.day),\
'blue',attrs=['bold'])
for rd_time, old_info_entry in enumerate(old_info):
print ''
print colored('# RD: '+old_dates[rd_time]+ ' Approved: '+ old_info_entry[1], 'white', attrs=['bold','underline'])
print colored('# ---------- Received: '+ old_info_entry[0],'yellow' ,attrs=['bold'])
| {
"repo_name": "tianchuliang/my_util",
"path": "track_uscis_rd.py",
"copies": "1",
"size": "5590",
"license": "mit",
"hash": 4430383382748590000,
"line_mean": 33.9375,
"line_max": 133,
"alpha_frac": 0.6241502683,
"autogenerated": false,
"ratio": 2.7442317133038783,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38683819816038784,
"avg_score": null,
"num_lines": null
} |
# at terminal window, run:
# python track_uscis_receipt_no.py <receipt_num_start> <number_of_receipts> <target_form_type>
import re
import mechanize
from lxml import html
from bs4 import BeautifulSoup
import os
import sys
from termcolor import colored
import glob
from time import gmtime, strftime
import datetime
import copy
receipt_url = 'http://www.mycasetracker.org/index.php?dest=receipt'
receipt_num_start = 'SRC1790011000'
processing_center = receipt_num_start[:3]
number_of_receipts = '300'
number_of_intervals = 6
received_key = 'Case Was Received'
approved_key = 'Case Was Approved'
target_form_type = 'I131'
if len(sys.argv) > 1:
if sys.argv[1] is not None:
receipt_no_start = sys.argv[1]
if len(sys.argv) > 2:
if sys.argv[2] is not None:
number_of_receipts = sys.argv[2]
if len(sys.argv) > 3:
if sys.argv[3] is not None:
target_form_type = sys.argv[3]
new_info = []
receipt_starts = []
for interval in range(0,number_of_intervals):
current_start = processing_center+ str(int(receipt_num_start[3:]) + interval * int(number_of_receipts))
receipt_starts.append(current_start)
br = mechanize.Browser()
br.open(receipt_url)
for form in br.forms():
if 'class' in form.attrs.keys():
br.form = form
br.form.find_control("in_Receipt").value = current_start
br.form.find_control("in_Num").value = number_of_receipts
response = br.submit()
rd_soup = BeautifulSoup(response.read(),'html.parser')
rd_soup_tables = (rd_soup.find("div", class_="ym-cbox ym-clearfix")).find_all("table")
for i,table in enumerate(rd_soup_tables):
table_head = table.find("thead")
if table_head.find("b") is not None:
form_type = table_head.find("b").text.split(" ")[1]
if form_type == target_form_type:
general_info = []
detailed_info = {}
for td_tag in table.find("tr").find_all("td"):
content = td_tag.find("b").text
general_info.append(content)
detail_table = rd_soup_tables[i+1]
for table_row in detail_table.find_all("tr"):
this_row_info = table_row.find_all("td")
info_type = this_row_info[0].text
if info_type != received_key and info_type != approved_key:
continue
else:
number = this_row_info[1].text
percent = this_row_info[2].text
start_date = this_row_info[3].text
end_date = this_row_info[4].text
detailed_info[info_type] = [number, percent, start_date, end_date]
new_info.append(detailed_info)
print colored('#################################### USCIS Tracker By Receipt Number ####################################','red',attrs=['bold'])
for i,info in enumerate(new_info):
print colored('###########################'+target_form_type+'#######################################')
print colored('# Receipt number: '+receipt_starts[i]+'--'+processing_center+str( int(receipt_starts[i][3:]) + int(number_of_receipts)), 'blue',attrs=['bold'])
for key in info.keys():
if key == approved_key:
print colored('# Approved: '+info[key][0]+' '+info[key][1]+' '+info[key][2]+' '+info[key][3], 'white', attrs=['bold','underline'])
if key == received_key:
print colored('# Received: '+info[key][0]+' '+info[key][1]+' '+info[key][2]+' '+info[key][3], 'yellow', attrs=['bold']) | {
"repo_name": "tianchuliang/my_util",
"path": "track_uscis_receipt_no.py",
"copies": "1",
"size": "3226",
"license": "mit",
"hash": 5610696897448368000,
"line_mean": 32.6145833333,
"line_max": 160,
"alpha_frac": 0.6385616863,
"autogenerated": false,
"ratio": 2.938069216757741,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8866226484208046,
"avg_score": 0.0420808837699388,
"num_lines": 96
} |
# At the 64-bit Anaconda prompt, "activate my-rdkit-env" first.
import sys
from rdkit import Chem
from io import StringIO
import common
class MyAromaticSmilesWriter(common.AromaticSmilesWriter):
def getoutput(self, smi):
m = Chem.MolFromSmiles(smi)
if m is None:
return ""
return Chem.MolToSmiles(m, canonical=False)
class MyHydrogenCounter(common.HydrogenCounter):
def __init__(self, name):
super(MyHydrogenCounter, self).__init__(name)
Chem.WrapLogs()
old_stderr = sys.stderr
self.sio = sys.stderr = StringIO()
def getoutput(self, smi):
m = Chem.MolFromSmiles(smi)
err = self.sio.getvalue()
if err:
self.sio = sys.stderr = StringIO()
if "Can't kekulize" in err:
return None, "Kekulization_failure"
elif "Explicit valence" in err:
return None, "Bad_valence"
elif "SMILES Parse Error" in err:
return None, "SMILES_parse_error"
elif "Aromatic bonds on non aromatic atom" in err:
return None, "Aromatic_bonds_on_non_aromatic_atom"
elif "non-ring" in err and "marked aromatic" in err:
return None, "Non_ring_atom_marked_aromatic"
print("**ERROR NOT CAPTURED from %s\n%s " % (smi, err))
if m is None:
return None, "No_output"
return [atom.GetTotalNumHs(False) for atom in m.GetAtoms()], None
class MyStereoSmilesWriter(common.StereoSmilesWriter):
def getoutput(self, smi):
m = Chem.MolFromSmiles(smi)
if m is None:
return ""
return Chem.MolToSmiles(m, canonical=True)
if __name__ == "__main__":
myname = "rdkit_2018.03.1"
# MyAromaticSmilesWriter(myname).main()
# MyHydrogenCounter(myname).main()
MyStereoSmilesWriter(myname).main()
| {
"repo_name": "nextmovesoftware/smilesreading",
"path": "scripts/RDKit.py",
"copies": "1",
"size": "1948",
"license": "bsd-2-clause",
"hash": -7929839737922268000,
"line_mean": 33.4181818182,
"line_max": 73,
"alpha_frac": 0.5882956879,
"autogenerated": false,
"ratio": 3.4055944055944054,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9425080569684882,
"avg_score": 0.013761904761904762,
"num_lines": 55
} |
# at the moment, this file isn't used. All configuration parameters are located in the cralgo.py file
import operator as op
import itertools
# Initial conditions
start_time = '01-01-2017 00:00' # MM-DD-YYYY HH:MM format (military time). GMT timezone
end_time = '01-01-2018 00:00' # MM-DD-YYYY HH:MM format (military time). GMT timezone
period = 86400 # in seconds, valid values are 300, 900, 1800, 7200, 14400, 86400
currency_pair = 'USDT_ETH'
instrument_1_qty = 500
instrument_2_qty = 0
candle_data_filename = 'data/USDT_BTC_900s_1_day_candle_data.csv'
output_filename ='run_tests/' + currency_pair + '_' + 'complete2.csv'
#parameters
window_1 = [5]
window_2 = [10]
volume = [10000]
parameters = [window_1, window_2, volume]
# creating all combinations
combinations = list(itertools.product(*parameters))
for window_1, window_2, volume in combinations:
tech_1 = 'ema_'+str(window_1)
tech_2 = 'ema_'+str(window_2)
tech_3 = 'bollinger_20_2'
strategy = {
'buy' : {'op' : op.and_,
'a' : {'op' : op.gt, 'a' : tech_1, 'b' : tech_2},
'b' : {'op' : op.gt, 'a' : 'volume', 'b' : volume}},
'sell' : {'op' : op.and_,
'a' : {'op' : op.lt, 'a' : tech_1, 'b' : tech_2},
'b' : {'op' : op.gt, 'a' : 'volume', 'b' : volume}},
'technicals' : {tech_1 : {'type' : ema, 'window' : window_1 },
tech_2 : {'type' : ema, 'window' : window_2 },
tech_3 : {'type' : bollinger, 'window' : 20, 'K' : 2}}}
# Building dictionary of arguments
| {
"repo_name": "Shutch/Cralgo",
"path": "cralgo/config.py",
"copies": "1",
"size": "1678",
"license": "mit",
"hash": -1884570414998841900,
"line_mean": 38.023255814,
"line_max": 102,
"alpha_frac": 0.5399284863,
"autogenerated": false,
"ratio": 2.975177304964539,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40151057912645394,
"avg_score": null,
"num_lines": null
} |
# At the time of writing, unicurses just doesn't work on Windows (pdcurses.dll
# lookup is wrong)
# This module provides escape sequences for terminal colors
import os
_DEFAULT = b'\x1B[0m'
class _ColorMethod(object):
def __init__(self, code):
self._code = code
def __get__(self, obj, type=None):
return self
def __call__(self, msg):
if colors._enabled:
return "%s%s%s" % (self._code, msg, _DEFAULT)
else:
return msg
class _Colors(object):
def __init__(self):
self._enabled = None
self.auto()
def auto(self):
if '/bin:' in os.getenv('PATH'):
self.enable(True)
else:
self.enable(False)
def enable(self, mode=True):
self._enabled = mode
black = _ColorMethod(b'\x1B[30m')
red = _ColorMethod(b'\x1B[31m')
green = _ColorMethod(b'\x1B[32m')
yellow = _ColorMethod(b'\x1B[33m')
blue = _ColorMethod(b'\x1B[34m')
magenta = _ColorMethod(b'\x1B[35m')
cyan = _ColorMethod(b'\x1B[36m')
white = _ColorMethod(b'\x1B[37m')
colors = _Colors()
| {
"repo_name": "pombredanne/timyd",
"path": "timyd/console.py",
"copies": "1",
"size": "1115",
"license": "mit",
"hash": -4738208748202939000,
"line_mean": 21.7551020408,
"line_max": 78,
"alpha_frac": 0.5730941704,
"autogenerated": false,
"ratio": 3.1766381766381766,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42497323470381765,
"avg_score": null,
"num_lines": null
} |
# At the top.
import sphinx_bootstrap_theme
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../src/python'))
import psychrolib
# -- Project information -----------------------------------------------------
project = u'PsychroLib'
copyright = u'2018, D. Thevenard and D. Meyer'
author = u'D. Thevenard and D. Meyer'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.napoleon',
'sphinx_autodoc_typehints',
'sphinx.ext.viewcode',
'm2r']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'api_docs'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': "PsychroLib",
# A list of tuples containing pages or urls to link to.
# Valid tuples should be in the following forms:
# (name, page) # a link to a page
# (name, "/aa/bb", 1) # a link to an arbitrary relative url
# (name, "http://example.com", True) # arbitrary absolute url
# Note the "1" or "True" value above as the third argument to indicate
# an arbitrary url.
# 'navbar_links': [
# ("Examples", "examples"),
# ("Link", "http://example.com", True),
# ],
'navbar_links': [
("Get PsychroLib", "https://github.com/psychrometrics/psychrolib", True),
],
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
# 'globaltoc_depth': -1
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "false",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing (default) or the name of a valid theme such
# such as "cosmo" or "sandstone".
#
# Example themes:
# * flatly
# * sandstone (v3 only)
# * united
# * yeti (v3 only)
'bootswatch_theme': "yeti",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# Keep the same order as in the source file.
autodoc_member_order = 'bysource' | {
"repo_name": "psychrometrics/psychrolib",
"path": "docs/sphinx/conf.py",
"copies": "1",
"size": "5453",
"license": "mit",
"hash": -1123750895907104900,
"line_mean": 32.8757763975,
"line_max": 82,
"alpha_frac": 0.6464331561,
"autogenerated": false,
"ratio": 3.7374914324880053,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9878563251957595,
"avg_score": 0.0010722673260822077,
"num_lines": 161
} |
"""AtThirty URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.conf import settings
import os
urlpatterns = [
####this line don't delete!!!
#url(r'^admin/', include(admin.site.urls)),
# url(r'^$', adminViews.index, name='index'),
url(r'^', include('app.blog.urls')),
url(r'^notes/', include('app.note.urls')),
url(r'^lab/',include('app.lab.urls')),
url(r'^blog_admin/', include('app.sysAdmin.urls')),
#url(r'^dashboard/', adminViews.index, name='index'),
#url(r'^reports/', adminViews.reports, name='reports'),
#url(r'^guides/', adminViews.guides, name='guides'),
#url(r'^charts/', adminViews.charts, name='charts'),
#url(r'^shortcodes/', adminViews.shortcodes, name='shortcodes'),
#url(r'^schedules/', adminViews.schedules, name='schedules'),
#url(r'^get_sche_events/$',adminViews.get_sche_events),
#url(r'^css/(?P<path>.*)$' , 'django.views.static.serve',
# {'document_root': settings.GLOBAL_CSS_DIR} ) ,
#url(r'^js/(?P<path>.*)$' , 'django.views.static.serve',
# {'document_root': settings.GLOBAL_JS_DIR} ) ,
#url(r'^img/(?P<path>.*)$' , 'django.views.static.serve',
# {'document_root': settings.GLOBAL_IMG_DIR} ) ,
#url(r'^font/(?P<path>.*)$' , 'django.views.static.serve',
# {'document_root': settings.GLOBAL_FONT_DIR} ) ,
]
| {
"repo_name": "AjayHao/AtThirty",
"path": "AtThirty/urls.py",
"copies": "1",
"size": "2021",
"license": "mit",
"hash": -6534309756124921000,
"line_mean": 42,
"line_max": 77,
"alpha_frac": 0.6259277585,
"autogenerated": false,
"ratio": 3.2336,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43595277585,
"avg_score": null,
"num_lines": null
} |
# at this stage in the book we haven't actually installed matplotlib,
# comment this out if you need to
from matplotlib import pyplot as plt
##########################
# #
# FINDING KEY CONNECTORS #
# #
##########################
users = [
{ "id": 0, "name": "Hero" },
{ "id": 1, "name": "Dunn" },
{ "id": 2, "name": "Sue" },
{ "id": 3, "name": "Chi" },
{ "id": 4, "name": "Thor" },
{ "id": 5, "name": "Clive" },
{ "id": 6, "name": "Hicks" },
{ "id": 7, "name": "Devin" },
{ "id": 8, "name": "Kate" },
{ "id": 9, "name": "Klein" },
{ "id": 10, "name": "Jen" }
]
friendships = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4),
(4, 5), (5, 6), (5, 7), (6, 8), (7, 8), (8, 9)]
# first give each user an empty list
for user in users:
user["friends"] = []
# and then populate the lists with friendships
for i, j in friendships:
# this works because users[i] is the user whose id is i
users[i]["friends"].append(users[j]) # add i as a friend of j
users[j]["friends"].append(users[i]) # add j as a friend of i
def number_of_friends(user):
"""how many friends does _user_ have?"""
return len(user["friends"]) # length of friend_ids list
total_connections = sum(number_of_friends(user)
for user in users) # 24
num_users = len(users)
avg_connections = total_connections / num_users # 2.4
################################
# #
# DATA SCIENTISTS YOU MAY KNOW #
# #
################################
def friends_of_friend_ids_bad(user):
# "foaf" is short for "friend of a friend"
return [foaf["id"]
for friend in user["friends"] # for each of user's friends
for foaf in friend["friends"]] # get each of _their_ friends
from collections import Counter # not loaded by default
def not_the_same(user, other_user):
"""two users are not the same if they have different ids"""
return user["id"] != other_user["id"]
def not_friends(user, other_user):
"""other_user is not a friend if he's not in user["friends"];
that is, if he's not_the_same as all the people in user["friends"]"""
return all(not_the_same(friend, other_user)
for friend in user["friends"])
def friends_of_friend_ids(user):
return Counter(foaf["id"]
for friend in user["friends"] # for each of my friends
for foaf in friend["friends"] # count *their* friends
if not_the_same(user, foaf) # who aren't me
and not_friends(user, foaf)) # and aren't my friends
print(friends_of_friend_ids(users[3])) # Counter({0: 2, 5: 1})
interests = [
(0, "Hadoop"), (0, "Big Data"), (0, "HBase"), (0, "Java"),
(0, "Spark"), (0, "Storm"), (0, "Cassandra"),
(1, "NoSQL"), (1, "MongoDB"), (1, "Cassandra"), (1, "HBase"),
(1, "Postgres"), (2, "Python"), (2, "scikit-learn"), (2, "scipy"),
(2, "numpy"), (2, "statsmodels"), (2, "pandas"), (3, "R"), (3, "Python"),
(3, "statistics"), (3, "regression"), (3, "probability"),
(4, "machine learning"), (4, "regression"), (4, "decision trees"),
(4, "libsvm"), (5, "Python"), (5, "R"), (5, "Java"), (5, "C++"),
(5, "Haskell"), (5, "programming languages"), (6, "statistics"),
(6, "probability"), (6, "mathematics"), (6, "theory"),
(7, "machine learning"), (7, "scikit-learn"), (7, "Mahout"),
(7, "neural networks"), (8, "neural networks"), (8, "deep learning"),
(8, "Big Data"), (8, "artificial intelligence"), (9, "Hadoop"),
(9, "Java"), (9, "MapReduce"), (9, "Big Data")
]
def data_scientists_who_like(target_interest):
return [user_id
for user_id, user_interest in interests
if user_interest == target_interest]
from collections import defaultdict
# keys are interests, values are lists of user_ids with that interest
user_ids_by_interest = defaultdict(list)
for user_id, interest in interests:
user_ids_by_interest[interest].append(user_id)
# keys are user_ids, values are lists of interests for that user_id
interests_by_user_id = defaultdict(list)
for user_id, interest in interests:
interests_by_user_id[user_id].append(interest)
def most_common_interests_with(user_id):
return Counter(interested_user_id
for interest in interests_by_user["user_id"]
for interested_user_id in users_by_interest[interest]
if interested_user_id != user_id)
###########################
# #
# SALARIES AND EXPERIENCE #
# #
###########################
salaries_and_tenures = [(83000, 8.7), (88000, 8.1),
(48000, 0.7), (76000, 6),
(69000, 6.5), (76000, 7.5),
(60000, 2.5), (83000, 10),
(48000, 1.9), (63000, 4.2)]
def make_chart_salaries_by_tenure():
tenures = [tenure for salary, tenure in salaries_and_tenures]
salaries = [salary for salary, tenure in salaries_and_tenures]
plt.scatter(tenures, salaries)
plt.xlabel("Years Experience")
plt.ylabel("Salary")
plt.show()
# keys are years
# values are the salaries for each tenure
salary_by_tenure = defaultdict(list)
for salary, tenure in salaries_and_tenures:
salary_by_tenure[tenure].append(salary)
average_salary_by_tenure = {
tenure : sum(salaries) / len(salaries)
for tenure, salaries in salary_by_tenure.items()
}
def tenure_bucket(tenure):
if tenure < 2: return "less than two"
elif tenure < 5: return "between two and five"
else: return "more than five"
salary_by_tenure_bucket = defaultdict(list)
for salary, tenure in salaries_and_tenures:
bucket = tenure_bucket(tenure)
salary_by_tenure_bucket[bucket].append(salary)
average_salary_by_bucket = {
tenure_bucket : sum(salaries) / len(salaries)
for tenure_bucket, salaries in salary_by_tenure_bucket.items()
}
#################
# #
# PAID_ACCOUNTS #
# #
#################
def predict_paid_or_unpaid(years_experience):
if years_experience < 3.0: return "paid"
elif years_experience < 8.5: return "unpaid"
else: return "paid"
######################
# #
# TOPICS OF INTEREST #
# #
######################
words_and_counts = Counter(word
for user, interest in interests
for word in interest.lower().split())
if __name__ == "__main__":
print()
print("######################")
print("#")
print("# FINDING KEY CONNECTORS")
print("#")
print("######################")
print()
print("total connections", total_connections)
print("number of users", num_users)
print("average connections", total_connections / num_users)
print()
# create a list (user_id, number_of_friends)
num_friends_by_id = [(user["id"], number_of_friends(user))
for user in users]
print("users sorted by number of friends:")
print(sorted(num_friends_by_id,
key=lambda pair: pair[1], # by number of friends
reverse=True)) # largest to smallest
print()
print("######################")
print("#")
print("# DATA SCIENTISTS YOU MAY KNOW")
print("#")
print("######################")
print()
print("friends of friends bad for user 0:", friends_of_friend_ids_bad(users[0]))
print("friends of friends for user 3:", friends_of_friend_ids(users[3]))
print()
print("######################")
print("#")
print("# SALARIES AND TENURES")
print("#")
print("######################")
print()
print("average salary by tenure", average_salary_by_tenure)
print("average salary by tenure bucket", average_salary_by_bucket)
print()
print("######################")
print("#")
print("# MOST COMMON WORDS")
print("#")
print("######################")
print()
for word, count in words_and_counts.most_common():
if count > 1:
print(word, count)
| {
"repo_name": "trenton3983/Data_Science_from_Scratch",
"path": "code-python3/introduction.py",
"copies": "6",
"size": "8194",
"license": "unlicense",
"hash": 8313053104128269000,
"line_mean": 31.3873517787,
"line_max": 87,
"alpha_frac": 0.5400292897,
"autogenerated": false,
"ratio": 3.336319218241042,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006803333443151593,
"num_lines": 253
} |
"""attic URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^$', 'attic.views.log_in', name='login'),
url(r'^home/', 'attic.views.home', name='home'),
url(r'^logout/', 'attic.views.log_out', name='logout'),
url(r'^profile/(?P<session_id>.*)$', 'attic.views.profile', name='profile'),
url(r'^reset-session/', 'attic.views.reset_session', name='reset_session'),
url(r'^delete-session/(?P<session_id>.*)$', 'attic.views.delete_session', name='delete_session'),
url(r'^delete-query/(?P<query_id>.*)$', 'attic.views.delete_query', name='delete_query'),
url(r'^delete-query-from-profile/(?P<query_id>.*)$', 'attic.views.delete_query_from_profile', name='delete_query'),
url(r'^modify/', 'attic.views.modify', name='modify'),
]
| {
"repo_name": "SkySchermer/uweclang",
"path": "django/src/attic/urls.py",
"copies": "1",
"size": "1456",
"license": "mit",
"hash": -9031290404592725000,
"line_mean": 47.5333333333,
"line_max": 119,
"alpha_frac": 0.6641483516,
"autogenerated": false,
"ratio": 3.20704845814978,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.437119680974978,
"avg_score": null,
"num_lines": null
} |
"""attitude constraint corrections
Revision ID: 420f9b8b9e
Revises: 162f93d4393
Create Date: 2015-06-24 16:52:02.606637
"""
# revision identifiers, used by Alembic.
revision = '420f9b8b9e'
down_revision = '162f93d4393'
branch_labels = None
depends_on = None
from alembic import op
def upgrade():
op.drop_constraint('fk_repos_id', 'users_attitude', type_='foreignkey')
op.drop_constraint('fk_users_id', 'users_attitude', type_='foreignkey')
op.create_foreign_key(
'fk_users_id', 'users_attitude', 'users', ['user_id'], ['id'], ondelete='CASCADE'
)
op.create_foreign_key(
'fk_repos_id', 'users_attitude', 'repos', ['repo_id'], ['id'], ondelete='CASCADE'
)
def downgrade():
op.drop_constraint('fk_repos_id', 'users_attitude', type_='foreignkey')
op.drop_constraint('fk_users_id', 'users_attitude', type_='foreignkey')
op.create_foreign_key('fk_users_id', 'users_attitude', 'users', ['user_id'], ['id'])
op.create_foreign_key('fk_repos_id', 'users_attitude', 'repos', ['repo_id'], ['id'])
| {
"repo_name": "kkamkou/gitmostwanted.com",
"path": "migration/versions/420f9b8b9e_attitude_constraint_corrections.py",
"copies": "1",
"size": "1048",
"license": "mit",
"hash": 5627134195796560000,
"line_mean": 31.75,
"line_max": 89,
"alpha_frac": 0.6583969466,
"autogenerated": false,
"ratio": 2.952112676056338,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4110509622656338,
"avg_score": null,
"num_lines": null
} |
''' Attitude discipline for CADRE. '''
from six.moves import range
import numpy as np
from openmdao.core.component import Component
from CADRE.kinematics import computepositionrotd, computepositionrotdjacobian
# Allow non-standard variable names for scientific calc
# pylint: disable=C0103
class Attitude_Angular(Component):
""" Calculates angular velocity vector from the satellite's orientation
matrix and its derivative.
"""
def __init__(self, n=2):
super(Attitude_Angular, self).__init__()
self.n = n
# Inputs
self.add_param('O_BI', np.zeros((3, 3, n)), units="unitless",
desc="Rotation matrix from body-fixed frame to Earth-centered "
"inertial frame over time")
self.add_param('Odot_BI', np.zeros((3, 3, n)), units="unitless",
desc="First derivative of O_BI over time")
# Outputs
self.add_output('w_B', np.zeros((3, n)), units="1/s",
desc="Angular velocity vector in body-fixed frame over time")
self.dw_dOdot = np.zeros((n, 3, 3, 3))
self.dw_dO = np.zeros((n, 3, 3, 3))
def solve_nonlinear(self, params, unknowns, resids):
""" Calculate output. """
O_BI = params['O_BI']
Odot_BI = params['Odot_BI']
w_B = unknowns['w_B']
for i in range(0, self.n):
w_B[0, i] = np.dot(Odot_BI[2, :, i], O_BI[1, :, i])
w_B[1, i] = np.dot(Odot_BI[0, :, i], O_BI[2, :, i])
w_B[2, i] = np.dot(Odot_BI[1, :, i], O_BI[0, :, i])
def jacobian(self, params, unknowns, resids):
""" Calculate and save derivatives. (i.e., Jacobian) """
O_BI = params['O_BI']
Odot_BI = params['Odot_BI']
for i in range(0, self.n):
self.dw_dOdot[i, 0, 2, :] = O_BI[1, :, i]
self.dw_dO[i, 0, 1, :] = Odot_BI[2, :, i]
self.dw_dOdot[i, 1, 0, :] = O_BI[2, :, i]
self.dw_dO[i, 1, 2, :] = Odot_BI[0, :, i]
self.dw_dOdot[i, 2, 1, :] = O_BI[0, :, i]
self.dw_dO[i, 2, 0, :] = Odot_BI[1, :, i]
def apply_linear(self, params, unknowns, dparams, dunknowns, dresids, mode):
""" Matrix-vector product with the Jacobian. """
dw_B = dresids['w_B']
if mode == 'fwd':
for k in range(3):
for i in range(3):
for j in range(3):
if 'O_BI' in dparams:
dw_B[k, :] += self.dw_dO[:, k, i, j] * \
dparams['O_BI'][i, j, :]
if 'Odot_BI' in dparams:
dw_B[k, :] += self.dw_dOdot[:, k, i, j] * \
dparams['Odot_BI'][i, j, :]
else:
for k in range(3):
for i in range(3):
for j in range(3):
if 'O_BI' in dparams:
dparams['O_BI'][i, j, :] += self.dw_dO[:, k, i, j] * \
dw_B[k, :]
if 'Odot_BI' in dparams:
dparams['Odot_BI'][i, j, :] += self.dw_dOdot[:, k, i, j] * \
dw_B[k, :]
class Attitude_AngularRates(Component):
""" Calculates time derivative of angular velocity vector.
"""
def __init__(self, n=2, h=28.8):
super(Attitude_AngularRates, self).__init__()
self.n = n
# Inputs
self.add_param('w_B', np.zeros((3, n)), units="1/s",
desc="Angular velocity vector in body-fixed frame over time")
# Outputs
self.add_output('wdot_B', np.zeros((3, n)), units="1/s**2",
desc="Time derivative of w_B over time")
self.h = h
def solve_nonlinear(self, params, unknowns, resids):
""" Calculate output. """
w_B = params['w_B']
h = self.h
wdot_B = unknowns['wdot_B']
wdot_B[:, 0] = w_B[:, 1] - w_B[:, 0]
wdot_B[:, 1:-1] = (w_B[:, 2:] - w_B[:, :-2])/ 2.0
wdot_B[:, -1] = w_B[:, -1] - w_B[:, -2]
wdot_B *= 1.0/h
def apply_linear(self, params, unknowns, dparams, dunknowns, dresids, mode):
""" Matrix-vector product with the Jacobian. """
h = self.h
dwdot_B = dresids['wdot_B']
if mode == 'fwd':
if 'w_B' in dparams:
dwdot_B[:, 0] += dparams['w_B'][:, 1] / h
dwdot_B[:, 0] -= dparams['w_B'][:, 0] / h
dwdot_B[:, 1:-1] += dparams['w_B'][:, 2:] / 2.0 / h
dwdot_B[:, 1:-1] -= dparams['w_B'][:, :-2] / 2.0 / h
dwdot_B[:, -1] += dparams['w_B'][:, -1] / h
dwdot_B[:, -1] -= dparams['w_B'][:, -2] / h
else:
if 'w_B' in dparams:
w_B = np.zeros(dparams['w_B'].shape)
w_B[:, 1] += dwdot_B[:, 0] / h
w_B[:, 0] -= dwdot_B[:, 0] / h
w_B[:, 2:] += dwdot_B[:, 1:-1] / 2.0 / h
w_B[:, :-2] -= dwdot_B[:, 1:-1] / 2.0 / h
w_B[:, -1] += dwdot_B[:, -1] / h
w_B[:, -2] -= dwdot_B[:, -1] / h
dparams['w_B'] += w_B
class Attitude_Attitude(Component):
""" Coordinate transformation from the interial plane to the rolled
(forward facing) plane.
"""
dvx_dv = np.zeros((3, 3, 3))
dvx_dv[0, :, 0] = (0., 0., 0.)
dvx_dv[1, :, 0] = (0., 0., -1.)
dvx_dv[2, :, 0] = (0., 1., 0.)
dvx_dv[0, :, 1] = (0., 0., 1.)
dvx_dv[1, :, 1] = (0., 0., 0.)
dvx_dv[2, :, 1] = (-1., 0., 0.)
dvx_dv[0, :, 2] = (0., -1., 0.)
dvx_dv[1, :, 2] = (1., 0., 0.)
dvx_dv[2, :, 2] = (0., 0., 0.)
def __init__(self, n=2):
super(Attitude_Attitude, self).__init__()
self.n = n
# Inputs
self.add_param('r_e2b_I', np.zeros((6, n)), units="unitless",
desc="Position and velocity vector from earth to satellite in "
"Earth-centered inertial frame over time")
# Outputs
self.add_output('O_RI', np.zeros((3, 3, n)), units="unitless",
desc="Rotation matrix from rolled body-fixed frame to "
"Earth-centered inertial frame over time")
self.dO_dr = np.zeros((n, 3, 3, 6))
def solve_nonlinear(self, params, unknowns, resids):
""" Calculate output. """
r_e2b_I = params['r_e2b_I']
O_RI = unknowns['O_RI']
O_RI[:] = np.zeros(O_RI.shape)
for i in range(0, self.n):
r = r_e2b_I[0:3, i]
v = r_e2b_I[3:, i]
normr = np.sqrt(np.dot(r, r))
normv = np.sqrt(np.dot(v, v))
# Prevent overflow
if normr < 1e-10:
normr = 1e-10
if normv < 1e-10:
normv = 1e-10
r = r / normr
v = v / normv
vx = np.zeros((3, 3))
vx[0, :] = (0., -v[2], v[1])
vx[1, :] = (v[2], 0., -v[0])
vx[2, :] = (-v[1], v[0], 0.)
iB = np.dot(vx, r)
jB = -np.dot(vx, iB)
O_RI[0, :, i] = iB
O_RI[1, :, i] = jB
O_RI[2, :, i] = -v
def jacobian(self, params, unknowns, resids):
""" Calculate and save derivatives. (i.e., Jacobian) """
r_e2b_I = params['r_e2b_I']
diB_dv = np.zeros((3, 3))
djB_dv = np.zeros((3, 3))
for i in range(0, self.n):
r = r_e2b_I[0:3, i]
v = r_e2b_I[3:, i]
normr = np.sqrt(np.dot(r, r))
normv = np.sqrt(np.dot(v, v))
# Prevent overflow
if normr < 1e-10:
normr = 1e-10
if normv < 1e-10:
normv = 1e-10
r = r / normr
v = v / normv
dr_dr = np.zeros((3, 3))
dv_dv = np.zeros((3, 3))
for k in range(0, 3):
dr_dr[k, k] += 1.0 / normr
dv_dv[k, k] += 1.0 / normv
dr_dr[:, k] -= r_e2b_I[
0:3, i] * r_e2b_I[k, i] / normr ** 3
dv_dv[:, k] -= r_e2b_I[
3:, i] * r_e2b_I[3 + k, i] / normv ** 3
vx = np.zeros((3, 3))
vx[0, :] = (0., -v[2], v[1])
vx[1, :] = (v[2], 0., -v[0])
vx[2, :] = (-v[1], v[0], 0.)
iB = np.dot(vx, r)
diB_dr = vx
diB_dv[:, 0] = np.dot(self.dvx_dv[:, :, 0], r)
diB_dv[:, 1] = np.dot(self.dvx_dv[:, :, 1], r)
diB_dv[:, 2] = np.dot(self.dvx_dv[:, :, 2], r)
djB_diB = -vx
djB_dv[:, 0] = -np.dot(self.dvx_dv[:, :, 0], iB)
djB_dv[:, 1] = -np.dot(self.dvx_dv[:, :, 1], iB)
djB_dv[:, 2] = -np.dot(self.dvx_dv[:, :, 2], iB)
self.dO_dr[i, 0, :, 0:3] = np.dot(diB_dr, dr_dr)
self.dO_dr[i, 0, :, 3:] = np.dot(diB_dv, dv_dv)
self.dO_dr[i, 1, :, 0:3] = np.dot(np.dot(djB_diB, diB_dr), dr_dr)
self.dO_dr[i, 1, :, 3:] = np.dot(np.dot(djB_diB, diB_dv) + djB_dv,
dv_dv)
self.dO_dr[i, 2, :, 3:] = -dv_dv
def apply_linear(self, params, unknowns, dparams, dunknowns, dresids, mode):
""" Matrix-vector product with the Jacobian. """
dO_RI = dresids['O_RI']
if mode == 'fwd':
for k in range(3):
for j in range(3):
for i in range(6):
dO_RI[k, j, :] += self.dO_dr[:, k, j, i] * \
dparams['r_e2b_I'][i, :]
else:
for k in range(3):
for j in range(3):
for i in range(6):
dparams['r_e2b_I'][i, :] += self.dO_dr[:, k, j, i] * \
dO_RI[k, j, :]
class Attitude_Roll(Component):
""" Calculates the body-fixed orientation matrix.
"""
def __init__(self, n=2):
super(Attitude_Roll, self).__init__()
self.n = n
# Inputs
self.add_param('Gamma', np.zeros(n), units="rad",
desc="Satellite roll angle over time")
# Outputs
self.add_output('O_BR', np.zeros((3, 3, n)), units="unitless",
desc="Rotation matrix from body-fixed frame to rolled "
"body-fixed frame over time")
self.dO_dg = np.zeros((n, 3, 3))
def solve_nonlinear(self, params, unknowns, resids):
""" Calculate output. """
Gamma = params['Gamma']
O_BR = unknowns['O_BR']
O_BR[:] = np.zeros((3, 3, self.n))
O_BR[0, 0, :] = np.cos(Gamma)
O_BR[0, 1, :] = np.sin(Gamma)
O_BR[1, 0, :] = -O_BR[0, 1, :]
O_BR[1, 1, :] = O_BR[0, 0, :]
O_BR[2, 2, :] = np.ones(self.n)
def jacobian(self, params, unknowns, resids):
""" Calculate and save derivatives. (i.e., Jacobian) """
Gamma = params['Gamma']
self.dO_dg = np.zeros((self.n, 3, 3))
self.dO_dg[:, 0, 0] = -np.sin(Gamma)
self.dO_dg[:, 0, 1] = np.cos(Gamma)
self.dO_dg[:, 1, 0] = -self.dO_dg[:, 0, 1]
self.dO_dg[:, 1, 1] = self.dO_dg[:, 0, 0]
def apply_linear(self, params, unknowns, dparams, dunknowns, dresids, mode):
""" Matrix-vector product with the Jacobian. """
dO_BR = dresids['O_BR']
if mode == 'fwd':
for k in range(3):
for j in range(3):
dO_BR[k, j, :] += self.dO_dg[:, k, j] * \
dparams['Gamma']
else:
for k in range(3):
for j in range(3):
dparams['Gamma'] += self.dO_dg[:, k, j] * \
dO_BR[k, j, :]
class Attitude_RotationMtx(Component):
""" Multiplies transformations to produce the orientation matrix of the
body frame with respect to inertial.
"""
def __init__(self, n=2):
super(Attitude_RotationMtx, self).__init__()
self.n = n
# Inputs
self.add_param('O_BR', np.zeros((3, 3, n)), units="unitless",
desc="Rotation matrix from body-fixed frame to rolled "
"body-fixed frame over time")
self.add_param('O_RI', np.zeros((3, 3, n)), units="unitless",
desc="Rotation matrix from rolled body-fixed "
"frame to Earth-centered inertial frame over time")
# Outputs
self.add_output('O_BI', np.zeros((3, 3, n)), units="unitless",
desc="Rotation matrix from body-fixed frame to "
"Earth-centered inertial frame over time")
def solve_nonlinear(self, params, unknowns, resids):
""" Calculate output. """
O_BR = params['O_BR']
O_RI = params['O_RI']
O_BI = unknowns['O_BI']
for i in range(0, self.n):
O_BI[:, :, i] = np.dot(O_BR[:, :, i], O_RI[:, :, i])
def apply_linear(self, params, unknowns, dparams, dunknowns, dresids, mode):
""" Matrix-vector product with the Jacobian. """
dO_BI = dresids['O_BI']
O_BR = params['O_BR']
O_RI = params['O_RI']
if mode == 'fwd':
for u in range(3):
for v in range(3):
for k in range(3):
if 'O_RI' in dparams:
dO_BI[u, v, :] += O_BR[u, k, :] * \
dparams['O_RI'][k, v, :]
if 'O_BR' in dparams:
dO_BI[u, v, :] += dparams['O_BR'][u, k, :] * \
O_RI[k, v, :]
else:
for u in range(3):
for v in range(3):
for k in range(3):
if 'O_RI' in dparams:
dparams['O_RI'][k, v, :] += O_BR[u, k, :] * \
dO_BI[u, v, :]
if 'O_BR' in dparams:
dparams['O_BR'][u, k, :] += dO_BI[u, v, :] * \
O_RI[k, v, :]
class Attitude_RotationMtxRates(Component):
""" Calculates time derivative of body frame orientation matrix.
"""
def __init__(self, n=2, h=28.2):
super(Attitude_RotationMtxRates, self).__init__()
self.n = n
self.h = h
# Inputs
self.add_param('O_BI', np.zeros((3, 3, n)), units="unitless",
desc="Rotation matrix from body-fixed frame to Earth-centered "
"inertial frame over time")
# Outputs
self.add_output('Odot_BI', np.zeros((3, 3, n)), units="unitless",
desc="First derivative of O_BI over time")
def solve_nonlinear(self, params, unknowns, resids):
""" Calculate output. """
O_BI = params['O_BI']
h = self.h
Odot_BI = unknowns['Odot_BI']
Odot_BI[:, :, 0] = O_BI[:, :, 1]
Odot_BI[:, :, 0] -= O_BI[:, :, 0]
Odot_BI[:, :, 1:-1] = O_BI[:, :, 2:] / 2.0
Odot_BI[:, :, 1:-1] -= O_BI[:, :, :-2] / 2.0
Odot_BI[:, :, -1] = O_BI[:, :, -1]
Odot_BI[:, :, -1] -= O_BI[:, :, -2]
Odot_BI *= 1.0/h
def apply_linear(self, params, unknowns, dparams, dunknowns, dresids, mode):
""" Matrix-vector product with the Jacobian. """
dOdot_BI = dresids['Odot_BI']
h = self.h
if mode == 'fwd':
dO_BI = dparams['O_BI']
dOdot_BI[:, :, 0] += dO_BI[:, :, 1] / h
dOdot_BI[:, :, 0] -= dO_BI[:, :, 0] / h
dOdot_BI[:, :, 1:-1] += dO_BI[:, :, 2:] / (2.0*h)
dOdot_BI[:, :, 1:-1] -= dO_BI[:, :, :-2] / (2.0*h)
dOdot_BI[:, :, -1] += dO_BI[:, :, -1] / h
dOdot_BI[:, :, -1] -= dO_BI[:, :, -2] / h
else:
dO_BI = np.zeros(dparams['O_BI'].shape)
dO_BI[:, :, 1] += dOdot_BI[:, :, 0] / h
dO_BI[:, :, 0] -= dOdot_BI[:, :, 0] / h
dO_BI[:, :, 2:] += dOdot_BI[:, :, 1:-1] / (2.0*h)
dO_BI[:, :, :-2] -= dOdot_BI[:, :, 1:-1] / (2.0*h)
dO_BI[:, :, -1] += dOdot_BI[:, :, -1] / h
dO_BI[:, :, -2] -= dOdot_BI[:, :, -1] / h
dparams['O_BI'] += dO_BI
class Attitude_Sideslip(Component):
""" Determine velocity in the body frame."""
def __init__(self, n=2):
super(Attitude_Sideslip, self).__init__()
self.n = n
# Inputs
self.add_param('r_e2b_I', np.zeros((6, n)), units="unitless",
desc="Position and velocity vector from earth to satellite "
"in Earth-centered inertial frame over time")
self.add_param('O_BI', np.zeros((3, 3, n)), units="unitless",
desc="Rotation matrix from body-fixed frame to "
"Earth-centered inertial frame over time")
# Outputs
self.add_output('v_e2b_B', np.zeros((3, n)), units="m/s",
desc="Velocity vector from earth to satellite"
"in body-fixed frame over time")
def solve_nonlinear(self, params, unknowns, resids):
""" Calculate output. """
r_e2b_I = params['r_e2b_I']
O_BI = params['O_BI']
v_e2b_B = unknowns['v_e2b_B']
v_e2b_B[:] = computepositionrotd(self.n, r_e2b_I[3:, :], O_BI)
def jacobian(self, params, unknowns, resids):
""" Calculate and save derivatives. (i.e., Jacobian) """
r_e2b_I = params['r_e2b_I']
O_BI = params['O_BI']
self.J1, self.J2 = computepositionrotdjacobian(self.n,
r_e2b_I[3:, :],
O_BI)
def apply_linear(self, params, unknowns, dparams, dunknowns, dresids, mode):
""" Matrix-vector product with the Jacobian. """
dv_e2b_B = dresids['v_e2b_B']
if mode == 'fwd':
for k in range(3):
if 'O_BI' in params:
for u in range(3):
for v in range(3):
dv_e2b_B[k, :] += self.J1[:, k, u, v] * \
dparams['O_BI'][u, v, :]
if 'r_e2b_I' in params:
for j in range(3):
dv_e2b_B[k, :] += self.J2[:, k, j] * \
dparams['r_e2b_I'][3+j, :]
else:
for k in range(3):
if 'O_BI' in params:
for u in range(3):
for v in range(3):
dparams['O_BI'][u, v, :] += self.J1[:, k, u, v] * \
dv_e2b_B[k, :]
if 'r_e2b_I' in params:
for j in range(3):
dparams['r_e2b_I'][3+j, :] += self.J2[:, k, j] * \
dv_e2b_B[k, :]
class Attitude_Torque(Component):
""" Compute the required reaction wheel tourque."""
J = np.zeros((3, 3))
J[0, :] = (0.018, 0., 0.)
J[1, :] = (0., 0.018, 0.)
J[2, :] = (0., 0., 0.006)
def __init__(self, n=2):
super(Attitude_Torque, self).__init__()
self.n = n
# Inputs
self.add_param('w_B', np.zeros((3, n)), units="1/s",
desc="Angular velocity in body-fixed frame over time")
self.add_param('wdot_B', np.zeros((3, n)), units="1/s**2",
desc="Time derivative of w_B over time")
# Outputs
self.add_output('T_tot', np.zeros((3, n)), units="N*m",
desc="Total reaction wheel torque over time")
self.dT_dwdot = np.zeros((n, 3, 3))
self.dwx_dw = np.zeros((3, 3, 3))
self.dwx_dw[0, :, 0] = (0., 0., 0.)
self.dwx_dw[1, :, 0] = (0., 0., -1.)
self.dwx_dw[2, :, 0] = (0., 1., 0.)
self.dwx_dw[0, :, 1] = (0., 0., 1.)
self.dwx_dw[1, :, 1] = (0., 0., 0.)
self.dwx_dw[2, :, 1] = (-1., 0, 0.)
self.dwx_dw[0, :, 2] = (0., -1., 0)
self.dwx_dw[1, :, 2] = (1., 0., 0.)
self.dwx_dw[2, :, 2] = (0., 0., 0.)
def solve_nonlinear(self, params, unknowns, resids):
""" Calculate output. """
w_B = params['w_B']
wdot_B = params['wdot_B']
T_tot = unknowns['T_tot']
wx = np.zeros((3, 3))
for i in range(0, self.n):
wx[0, :] = (0., -w_B[2, i], w_B[1, i])
wx[1, :] = (w_B[2, i], 0., -w_B[0, i])
wx[2, :] = (-w_B[1, i], w_B[0, i], 0.)
T_tot[:, i] = np.dot(self.J, wdot_B[:, i]) + \
np.dot(wx, np.dot(self.J, w_B[:, i]))
def jacobian(self, params, unknowns, resids):
""" Calculate and save derivatives. (i.e., Jacobian) """
w_B = params['w_B']
self.dT_dw = np.zeros((self.n, 3, 3))
wx = np.zeros((3, 3))
for i in range(0, self.n):
wx[0, :] = (0., -w_B[2, i], w_B[1, i])
wx[1, :] = (w_B[2, i], 0., -w_B[0, i])
wx[2, :] = (-w_B[1, i], w_B[0, i], 0.)
self.dT_dwdot[i, :, :] = self.J
self.dT_dw[i, :, :] = np.dot(wx, self.J)
for k in range(0, 3):
self.dT_dw[i, :, k] += np.dot(self.dwx_dw[:, :, k],
np.dot(self.J, w_B[:, i]))
def apply_linear(self, params, unknowns, dparams, dunknowns, dresids, mode):
""" Matrix-vector product with the Jacobian. """
dT_tot = dresids['T_tot']
if mode == 'fwd':
for k in range(3):
for j in range(3):
if 'w_B' in dparams:
dT_tot[k, :] += self.dT_dw[:, k, j] * \
dparams['w_B'][j, :]
if 'wdot_B' in dparams:
dT_tot[k, :] += self.dT_dwdot[:, k, j] * \
dparams['wdot_B'][j, :]
else:
for k in range(3):
for j in range(3):
if 'w_B' in dparams:
dparams['w_B'][j, :] += self.dT_dw[:, k, j] * \
dT_tot[k, :]
if 'wdot_B' in dparams:
dparams['wdot_B'][j, :] += self.dT_dwdot[:, k, j] * \
dT_tot[k, :]
| {
"repo_name": "hschilling/CADRE-1",
"path": "src/CADRE/attitude.py",
"copies": "1",
"size": "22488",
"license": "apache-2.0",
"hash": -5147041392915192000,
"line_mean": 32.7657657658,
"line_max": 88,
"alpha_frac": 0.4174226254,
"autogenerated": false,
"ratio": 3.0339989206691853,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39514215460691854,
"avg_score": null,
"num_lines": null
} |
"""Attitude reconstruction example."""
import yaipopt
import numpy as np
import sympy
import sym2num
from numpy import ma
from scipy import integrate, interpolate, signal, stats
from ceacoest import kalman, sde, utils
class SymbolicModel(sde.SymbolicModel):
"""Symbolic SDE model."""
var_names = {'t', 'x', 'y', 'q', 'c'}
"""Name of model variables."""
function_names = {'f', 'g', 'h', 'R', 'x0', 'Px0'}
"""Name of the model functions."""
t = 't'
"""Time variable."""
x = ['ax', 'ay', 'az', 'p', 'q', 'r', 'phi', 'theta', 'psi']
"""State vector."""
y = ['ax_meas', 'ay_meas', 'az_meas', 'p_meas', 'q_meas', 'r_meas',
'magx_meas', 'magy_meas', 'magz_meas']
"""Measurement vector."""
q = ['magex', 'magez', 'maghx', 'maghy', 'maghz',
'pbias', 'qbias', 'rbias',
'accel_png', 'omega_png']
"""Parameter vector."""
c = ['g0', 'ax0', 'ay0', 'az0', 'p0', 'q0', 'r0', 'phi0', 'theta0', 'psi0',
'ax0_std', 'ay0_std', 'az0_std', 'p0_std', 'q0_std', 'r0_std',
'phi0_std', 'theta0_std', 'psi0_std',
'omega_mng', 'mag_mng', 'accel_mng']
"""Constants vector."""
def f(self, t, x, q, c):
"""Drift function."""
s = self.symbols(t=t, x=x, q=q, c=c)
cphi = sympy.cos(s.phi)
sphi = sympy.sin(s.phi)
cth = sympy.cos(s.theta)
sth = sympy.sin(s.theta)
tth = sympy.tan(s.theta)
derivs = dict(
p=0, q=0, r=0, ax=0, ay=0, az=0,
phi=s.p + tth*(sphi*s.q + cphi*s.r),
theta=cphi*s.q - sphi*s.r,
psi=sphi/cth*s.q + cphi/cth*s.r,
)
return self.pack('x', derivs)
def g(self, t, x, q, c):
"""Diffusion matrix."""
s = self.symbols(t=t, x=x, q=q, c=c)
g = np.zeros((x.size, 6), object)
g[[0, 1, 2], [0, 1, 2]] = s.accel_png
g[[3, 4, 5], [3, 4, 5]] = s.omega_png
return g
def h(self, t, x, q, c):
"""Measurement function."""
s = self.symbols(t=t, x=x, q=q, c=c)
cphi = sympy.cos(s.phi)
sphi = sympy.sin(s.phi)
cth = sympy.cos(s.theta)
sth = sympy.sin(s.theta)
cpsi = sympy.cos(s.psi)
spsi = sympy.sin(s.psi)
e2b = np.array(
[[cth*cpsi, cth*spsi, -sth],
[-cphi*spsi + sphi*sth*cpsi, cphi*cpsi + sphi*sth*spsi, sphi*cth],
[sphi*spsi + cphi*sth*cpsi, -sphi*cpsi + cphi*sth*spsi, cphi*cth]],
dtype=object
)
magh = [s.maghx, s.maghy, s.maghz]
mag = e2b.dot([s.magex, 0, s.magez]) + magh
gb = np.dot(e2b, [0, 0, s.g0])
meas = dict(
magx_meas=mag[0], magy_meas=mag[1], magz_meas=mag[2],
p_meas=s.p + s.pbias,
q_meas=s.q + s.qbias,
r_meas=s.r + s.rbias,
ax_meas=s.ax - gb[0],
ay_meas=s.ay - gb[1],
az_meas=s.az - gb[2],
)
return self.pack('y', meas)
def R(self, q, c):
"""Measurement function."""
s = self.symbols(q=q, c=c)
std = dict(
ax_meas=s.accel_mng, ay_meas=s.accel_mng, az_meas=s.accel_mng,
p_meas=s.omega_mng, q_meas=s.omega_mng, r_meas=s.omega_mng,
magx_meas=s.mag_mng, magy_meas=s.mag_mng, magz_meas=s.mag_mng,
)
return np.diag(self.pack('y', std) ** 2)
def x0(self, q, c):
"""Initial state."""
s = self.symbols(q=q, c=c)
x0 = {
'ax': s.ax0, 'ay': s.ay0, 'az': s.az0,
'p': s.p0, 'q': s.q0, 'r': s.r0,
'phi': s.phi0, 'theta': s.theta0, 'psi': s.psi0,
}
return self.pack('x', x0)
def Px0(self, q, c):
"""Initial state covariance."""
s = self.symbols(q=q, c=c)
x0_std = {
'ax': s.ax0_std, 'ay': s.ay0_std, 'az': s.az0_std,
'p': s.p0_std, 'q': s.q0_std, 'r': s.r0_std,
'phi': s.phi0_std, 'theta': s.theta0_std, 'psi': s.psi0_std,
}
return np.diag(self.pack('x', x0_std)**2)
class SymbolicDTModel(SymbolicModel, sde.ItoTaylorAS15DiscretizedModel):
derivatives = [('df_dx', 'f', 'x'), ('df_dq', 'f', 'q'),
('d2f_dx2', 'df_dx', 'x'),
('d2f_dx_dq', 'df_dx', 'q'),
('d2f_dq2', 'df_dq', 'q'),
('dQ_dx', 'Q', 'x'), ('dQ_dq', 'Q', 'q'),
('d2Q_dx2', 'dQ_dx', 'x'),
('d2Q_dx_dq', 'dQ_dx', 'q'),
('d2Q_dq2', 'dQ_dq', 'q'),
('dh_dx', 'h', 'x'), ('dh_dq', 'h', 'q'),
('d2h_dx2', 'dh_dx', 'x'),
('d2h_dx_dq', 'dh_dx', 'q'),
('d2h_dq2', 'dh_dq', 'q'),
('dR_dq', 'R', 'q'), ('d2R_dq2', 'dR_dq', 'q'),
('dx0_dq', 'x0', 'q'), ('d2x0_dq2', 'dx0_dq', 'q'),
('dPx0_dq', 'Px0', 'q'), ('d2Px0_dq2', 'dPx0_dq', 'q')]
"""List of the model function derivatives to calculate / generate."""
dt = 'dt'
"""Discretization time step."""
k = 'k'
"""Discretized sample index."""
generated_name = "GeneratedDTModel"
"""Name of the generated class."""
meta = 'ceacoest.sde.DiscretizedModel.meta'
"""Generated model metaclass."""
@property
def imports(self):
return super().imports + ('import ceacoest.sde',)
sym_model = SymbolicModel()
sym_dt_model = SymbolicDTModel()
printer = sym2num.ScipyPrinter()
GeneratedDTModel = sym2num.class_obj(sym_dt_model, printer)
def sim():
np.random.seed(0)
# Generate the time vector
dt = 0.05
N = int(30 / dt)
k = np.arange(N)
t = k * dt
# Generate the accelerations and angular velocities
[b, a] = signal.butter(2, 0.05)
omega_png = 0.01
accel_png = 0.1
omega_seed = np.cumsum(np.random.randn(3, N), -1)
accel_seed = np.cumsum(np.random.randn(3, N), -1)
omega = signal.lfilter(b, a, np.sqrt(dt) * omega_png * omega_seed)
accel = signal.lfilter(b, a, np.sqrt(dt) * accel_png * accel_seed)
# Integrate the angular velocities to obtain the attitude
omega_int = interpolate.interp1d(t, omega, fill_value='extrapolate')
def odefun(angles, t):
[phi, theta, psi] = angles
[p, q, r] = omega_int(t)
cphi = np.cos(phi)
sphi = np.sin(phi)
cth = np.cos(theta)
sth = np.sin(theta)
tth = np.tan(theta)
phidot = p + tth*(sphi*q + cphi*r)
thetadot = cphi*q - sphi*r
psidot = sphi/cth*q + cphi/cth*r
return [phidot, thetadot, psidot]
angles = integrate.odeint(odefun, [0,0,0], t)
# Generate the nominal model
nominal = dict(
g0=9.80665, accel_png=accel_png, omega_png=omega_png,
accel_mng=0.5, omega_mng=0.01, mag_mng=1e-6,
magex=18.982e-6, magez=-13.6305e-6,
maghx=1e-6, maghy=-1e-6, maghz=0,
pbias=0.02, qbias=-0.02, rbias=0,
ax0_std=1, ay0_std=1, az0_std=1,
p0_std=0.1, q0_std=0.1, r0_std=0.1,
phi0_std=0.1, theta0_std=0.1, psi0_std=0.1,
)
c = GeneratedDTModel.pack('c', nominal)
q = GeneratedDTModel.pack('q', nominal)
model = GeneratedDTModel({'q': q, 'c': c, 'dt': dt}, {'t': t})
x = np.vstack((accel, omega, angles.T)).T
v = np.random.multivariate_normal(np.zeros(model.ny), model.R(), N)
y = ma.asarray(model.h(k, x) + v)
y[np.arange(N) % 2 != 0] = ma.masked
return model, t, x, y, q
def pem(model, q0):
def merit(q, new=None):
mq = model.parametrize(q=q)
kf = kalman.DTUnscentedFilter(mq)
return kf.pem_merit(y)
def grad(q, new=None):
mq = model.parametrize(q=q)
kf = kalman.DTUnscentedFilter(mq)
return kf.pem_gradient(y)
hess_inds = np.tril_indices(model.nq)
def hess(q, new_q=1, obj_factor=1, lmult=1, new_lmult=1):
mq = model.parametrize(q=q)
kf = kalman.DTUnscentedFilter(mq)
return obj_factor * kf.pem_hessian(y)[hess_inds]
q_lb = dict(accel_png=1e-4, omega_png=1e-4,
accel_mng=0, omega_mng=0, mag_mng=0)
q_ub = dict()
q_fix = dict()
q_bounds = [model.pack('q', dict(q_lb, **q_fix), fill=-np.inf),
model.pack('q', dict(q_ub, **q_fix), fill=np.inf)]
problem = yaipopt.Problem(q_bounds, merit, grad,
hess=hess, hess_inds=hess_inds)
problem.num_option(b'obj_scaling_factor', -1)
(qopt, solinfo) = problem.solve(q0)
return problem, qopt, solinfo
if __name__ == '__main__':
guess = dict(magex=18.982e-6, magez=-13.6305e-6,
accel_png=0.1, omega_png=0.01,
accel_mng=0.5, omega_mng=0.01, mag_mng=1e-6)
q0 = GeneratedDTModel.pack('q', guess)
model, t, x, y, q = sim()
problem, qopt, solinfo = pem(model, q0)
mopt = model.parametrize(q=qopt)
kfopt = kalman.DTUnscentedFilter(mopt)
[xs, Pxs] = kfopt.smooth(y)
| {
"repo_name": "dimasad/ceacoest",
"path": "examples/attitude.py",
"copies": "2",
"size": "9068",
"license": "mit",
"hash": 4090559878153489400,
"line_mean": 32.7100371747,
"line_max": 80,
"alpha_frac": 0.4996691663,
"autogenerated": false,
"ratio": 2.6049985636311406,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.899589428884705,
"avg_score": 0.02175468821681834,
"num_lines": 269
} |
"""Attitude reconstruction of an ArduPilot Mega."""
import os
import re
import yaipopt
import numpy as np
import sympy
import sym2num
from numpy import ma
from scipy import interpolate
from ceacoest import kalman, sde, utils
class SymbolicModel(sde.SymbolicModel):
"""Symbolic SDE model."""
var_names = {'t', 'x', 'y', 'q', 'c'}
"""Name of model variables."""
function_names = {'f', 'g', 'h', 'R', 'x0'}
"""Name of the model functions."""
t = 't'
"""Time variable."""
x = ['ax', 'ay', 'az', 'p', 'q', 'r', 'phi', 'theta', 'psi']
"""State vector."""
y = ['ax_meas', 'ay_meas', 'az_meas', 'p_meas', 'q_meas', 'r_meas',
'magx_meas', 'magy_meas', 'magz_meas']
"""Measurement vector."""
q = [#'acc_png', 'omega_png', 'acc_mng', 'omega_mng', 'mag_mng',
'magex', 'magez', 'maghx', 'maghy', 'maghz',
'maggy', 'maggz', 'maggxy', 'maggxz', 'maggyz',
'pbias', 'qbias', 'rbias',
'ax0', 'ay0', 'az0', 'p0', 'q0', 'r0', 'phi0', 'theta0', 'psi0']
"""Parameter vector."""
c = ['g0', 'acc_png', 'omega_png', 'acc_mng', 'omega_mng', 'mag_mng']
"""Constants vector."""
def f(self, t, x, q, c):
"""Drift function."""
s = self.symbols(t=t, x=x, q=q, c=c)
cphi = sympy.cos(s.phi)
sphi = sympy.sin(s.phi)
cth = sympy.cos(s.theta)
sth = sympy.sin(s.theta)
tth = sympy.tan(s.theta)
derivs = dict(
p=0, q=0, r=0, ax=0, ay=0, az=0,
phi=s.p + tth*(sphi*s.q + cphi*s.r),
theta=cphi*s.q -sphi*s.r,
psi=sphi/cth*s.q + cphi/cth*s.r,
)
return self.pack('x', derivs)
def g(self, t, x, q, c):
"""Diffusion matrix."""
s = self.symbols(t=t, x=x, q=q, c=c)
g = np.zeros((x.size, 6), object)
g[[0, 1, 2], [0, 1, 2]] = s.acc_png
g[[3, 4, 5], [3, 4, 5]] = s.omega_png
return g
def h(self, t, x, q, c):
"""Measurement function."""
s = self.symbols(t=t, x=x, q=q, c=c)
cphi = sympy.cos(s.phi)
sphi = sympy.sin(s.phi)
cth = sympy.cos(s.theta)
sth = sympy.sin(s.theta)
cpsi = sympy.cos(s.psi)
spsi = sympy.sin(s.psi)
e2b = np.array(
[[cth*cpsi, cth*spsi, -sth],
[-cphi*spsi + sphi*sth*cpsi, cphi*cpsi + sphi*sth*spsi, sphi*cth],
[sphi*spsi + cphi*sth*cpsi, -sphi*cpsi + cphi*sth*spsi, cphi*cth]],
dtype=object
)
magg = np.array(
[[1, s.maggxy, s.maggxz],
[s.maggxy, s.maggy, s.maggyz],
[s.maggxz, s.maggyz, s.maggz]],
dtype=object
)
magh = [s.maghx, s.maghy, s.maghz]
mag = magg.dot(e2b).dot([s.magex, 0, s.magez]) + magh
gb = np.dot(e2b, [0, 0, s.g0])
meas = dict(
magx_meas=mag[0], magy_meas=mag[1], magz_meas=mag[2],
p_meas=s.p + s.pbias,
q_meas=s.q + s.qbias,
r_meas=s.r + s.rbias,
ax_meas=s.ax - gb[0],
ay_meas=s.ay - gb[1],
az_meas=s.az - gb[2],
)
return self.pack('y', meas)
def R(self, q, c):
"""Measurement function."""
s = self.symbols(q=q, c=c)
std = dict(
ax_meas=s.acc_mng, ay_meas=s.acc_mng, az_meas=s.acc_mng,
p_meas=s.omega_mng, q_meas=s.omega_mng, r_meas=s.omega_mng,
magx_meas=s.mag_mng, magy_meas=s.mag_mng, magz_meas=s.mag_mng,
)
return np.diag(self.pack('y', std) ** 2)
def x0(self, q, c):
"""Initial state."""
s = self.symbols(q=q, c=c)
x0 = {
'ax': s.ax0, 'ay': s.ay0, 'az': s.az0,
'p': s.p0, 'q': s.q0, 'r': s.r0,
'phi': s.phi0, 'theta': s.theta0, 'psi': s.psi0,
}
return self.pack('x', x0)
class SymbolicDTModel(SymbolicModel, sde.ItoTaylorAS15DiscretizedModel):
derivatives = [('df_dx', 'f', 'x'), ('df_dq', 'f', 'q'),
('d2f_dx2', 'df_dx', 'x'),
('d2f_dx_dq', 'df_dx', 'q'),
('d2f_dq2', 'df_dq', 'q'),
('dQ_dx', 'Q', 'x'), ('dQ_dq', 'Q', 'q'),
('d2Q_dx2', 'dQ_dx', 'x'),
('d2Q_dx_dq', 'dQ_dx', 'q'),
('d2Q_dq2', 'dQ_dq', 'q'),
('dh_dx', 'h', 'x'), ('dh_dq', 'h', 'q'),
('d2h_dx2', 'dh_dx', 'x'),
('d2h_dx_dq', 'dh_dx', 'q'),
('d2h_dq2', 'dh_dq', 'q'),
('dR_dq', 'R', 'q'), ('d2R_dq2', 'dR_dq', 'q'),
('dx0_dq', 'x0', 'q'), ('d2x0_dq2', 'dx0_dq', 'q')]
"""List of the model function derivatives to calculate / generate."""
dt = 'dt'
"""Discretization time step."""
k = 'k'
"""Discretized sample index."""
generated_name = "GeneratedDTModel"
"""Name of the generated class."""
meta = 'ceacoest.sde.DiscretizedModel.meta'
"""Generated model metaclass."""
@property
def imports(self):
return super().imports + ('import ceacoest.sde',)
sym_model = SymbolicModel()
sym_dt_model = SymbolicDTModel()
printer = sym2num.ScipyPrinter()
GeneratedDTModel = sym2num.class_obj(sym_dt_model, printer)
def load_data():
# Read the log file
module_dir = os.path.dirname(__file__)
filepath = os.path.join(module_dir, 'data', 'apm.log')
lines = open(filepath).read().splitlines()
# Parse the data
data = dict(MAG=[], IMU=[], ATT=[])
for line in lines:
msgid, *fields = re.split(',\s*', line)
if msgid in data:
data[msgid].append([float(f) for f in fields])
data = {key: np.asarray(val) for key, val in data.items()}
imu = data['IMU']
mag = data['MAG']
# Build the output array
t = np.sort(np.hstack((imu[:, 0], mag[:, 0])))
imu_inds = np.array([tk in imu[:, 0] for tk in t])
mag_inds = np.array([tk in mag[:, 0] for tk in t])
y = ma.masked_all((t.size, GeneratedDTModel.ny))
y[imu_inds, :6] = imu[:, [4, 5, 6, 1, 2, 3]]
y[mag_inds, 6:] = mag[:, [1, 2, 3]]
t *= 1e-3
# Select the experiment interval
range_ = np.s_[905:1800]#np.s_[900:1800]
t = t[range_]
y = y[range_]
assert np.unique(t).size == t.size
return t, y, data
def pem(t, y, data):
# Build the initial state and covariance
imu = data['IMU']
att = data['ATT']
d2r = np.pi / 180
omega0 = interpolate.interp1d(imu[:, 0]*1e-3, imu[:,[1,2,3]],axis=0)(t[0])
ang0 = interpolate.interp1d(att[:, 0]*1e-3, att[:,[2,4,6]]*d2r,axis=0)(t[0])
Px0 = np.diag(np.repeat(1e-3 ** 2, GeneratedDTModel.nx))
# Instantiate the model
given = dict(
g0=9.81,
acc_png=0.1, omega_png=0.1,
acc_mng=0.01, omega_mng=5.5e-4, mag_mng=2,
pbias=0, qbias=0, rbias=0,
magex=109, magez=-76, maghx=0, maghy=0, maghz=34,
maggy=1, maggz=1, maggxy=0, maggxz=0, maggyz=0,
p0=omega0[0], q0=omega0[1], r0=omega0[2],
phi0=ang0[0], theta0=ang0[1], psi0=ang0[2]+0.3941,
)
q0 = GeneratedDTModel.pack('q', given)
c = GeneratedDTModel.pack('c', given)
dt = np.diff(t)
params = dict(q=q0, c=c)
sampled = dict(dt=dt, t=t)
model = GeneratedDTModel(params, sampled)
def merit(q, new=None):
mq = model.parametrize(q=q)
kf = kalman.DTUnscentedKalmanFilter(mq, Px=Px0)
return kf.pem_merit(y)
def grad(q, new=None):
mq = model.parametrize(q=q)
kf = kalman.DTUnscentedKalmanFilter(mq, Px=Px0)
return kf.pem_gradient(y)
hess_inds = np.tril_indices(model.nq)
def hess(q, new_q=1, obj_factor=1, lmult=1, new_lmult=1):
mq = model.parametrize(q=q)
kf = kalman.DTUnscentedKalmanFilter(mq, Px=Px0)
return obj_factor * kf.pem_hessian(y)[hess_inds]
q_lb = dict(acc_png=0, omega_png=0,
acc_mng=0, omega_mng=0, mag_mng=0,
ax0=-5, ay0=-5, az0=-5,
phi0=-np.pi, psi0=0, theta0=-np.pi/2)
q_ub = dict(ax0=5, ay0=5, az0=5,
phi0=np.pi, psi0=2*np.pi, theta0=np.pi/2)
q_fix = dict()
q_bounds = [model.pack('q', dict(q_lb, **q_fix), fill=-np.inf),
model.pack('q', dict(q_ub, **q_fix), fill=np.inf)]
problem = yaipopt.Problem(q_bounds, merit, grad,
hess=hess, hess_inds=hess_inds)
problem.num_option(b'obj_scaling_factor', -1)
(qopt, solinfo) = problem.solve(q0)
return problem, qopt, solinfo, model, q0, Px0
if __name__ == '__main__':
[t, y, data] = load_data()
| {
"repo_name": "dimasad/apm-estimation",
"path": "attitude.py",
"copies": "1",
"size": "8734",
"license": "mit",
"hash": -7044896596579060000,
"line_mean": 32.4636015326,
"line_max": 80,
"alpha_frac": 0.4965651477,
"autogenerated": false,
"ratio": 2.6362813160277696,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.36328464637277696,
"avg_score": null,
"num_lines": null
} |
"""Attitude reconstruction of an Ascention Technology trakSTAR sensor."""
import os
import yaipopt
import numpy as np
import sympy
import sym2num
from numpy import ma
from scipy import io
from ceacoest import kalman, sde, utils
class SymbolicModel(sde.SymbolicModel):
"""Symbolic SDE model."""
var_names = {'t', 'x', 'y', 'q', 'c'}
"""Name of model variables."""
function_names = {'f', 'g', 'h', 'R'}
"""Name of the model functions."""
t = 't'
"""Time variable."""
x = ['phi', 'theta', 'psi', 'p', 'q', 'r']
"""State vector."""
y = ['phi_meas', 'theta_meas', 'psi_meas']
"""Measurement vector."""
q = ['angvel_png', 'ang_meas_std']
"""Parameter vector."""
c = []
"""Constants vector."""
def f(self, t, x, q, c):
"""Drift function."""
s = self.symbols(t=t, x=x, q=q, c=c)
sinphi = sympy.sin(s.phi)
cosphi = sympy.cos(s.phi)
costheta = sympy.cos(s.theta)
tantheta = sympy.tan(s.theta)
derivs = dict(
phi=s.p + s.q*tantheta*sinphi + s.r*tantheta*cosphi,
theta=s.q*cosphi - s.r*sinphi,
psi=s.q*sinphi/costheta + s.r*cosphi/costheta,
p=0, q=0, r=0
)
return self.pack('x', derivs)
def g(self, t, x, q, c):
"""Diffusion matrix."""
s = self.symbols(t=t, x=x, q=q, c=c)
g = np.zeros((x.size, 3), object)
g[[3, 4, 5], [0, 1, 2]] = s.angvel_png
return g
def h(self, t, x, q, c):
"""Measurement function."""
s = self.symbols(t=t, x=x, q=q, c=c)
meas = dict(phi_meas=s.phi, theta_meas=s.theta, psi_meas=s.psi)
return self.pack('y', meas)
def R(self, q, c):
"""Measurement function."""
s = self.symbols(q=q, c=c)
R = np.diag(np.repeat([s.ang_meas_std], 3))**2
return R
class SymbolicDTModel(SymbolicModel, sde.ItoTaylorAS15DiscretizedModel):
derivatives = [('df_dx', 'f', 'x'), ('df_dq', 'f', 'q'),
('d2f_dx2', 'df_dx', 'x'),
('d2f_dx_dq', 'df_dx', 'q'),
('d2f_dq2', 'df_dq', 'q'),
('dQ_dx', 'Q', 'x'), ('dQ_dq', 'Q', 'q'),
('d2Q_dx2', 'dQ_dx', 'x'),
('d2Q_dx_dq', 'dQ_dx', 'q'),
('d2Q_dq2', 'dQ_dq', 'q'),
('dh_dx', 'h', 'x'), ('dh_dq', 'h', 'q'),
('d2h_dx2', 'dh_dx', 'x'),
('d2h_dx_dq', 'dh_dx', 'q'),
('d2h_dq2', 'dh_dq', 'q'),
('dR_dq', 'R', 'q'), ('d2R_dq2', 'dR_dq', 'q')]
"""List of the model function derivatives to calculate / generate."""
dt = 'dt'
"""Discretization time step."""
k = 'k'
"""Discretized sample index."""
generated_name = "GeneratedDTModel"
"""Name of the generated class."""
meta = 'ceacoest.sde.DiscretizedModel.meta'
"""Generated model metaclass."""
@property
def imports(self):
return super().imports + ('import ceacoest.sde',)
sym_model = SymbolicModel()
sym_dt_model = SymbolicDTModel()
printer = sym2num.ScipyPrinter()
GeneratedDTModel = sym2num.class_obj(sym_dt_model, printer)
def load_data():
filepath = '20140520AC1301FREE01_PY_09.bin'
interval = slice(95, 400)
upsample = 4
data = io.loadmat(filepath)
dt = (data['time'].flat[1] - data['time'].flat[0]) / upsample
q0, q1, q2, q3 = data['q'][interval].T
phi = np.arctan2(2*(q0*q1 + q2*q3), 1 - 2*(q1**2 + q2**2))
theta = np.arcsin(2*(q0*q2 - q1*q3))
psi = np.arctan2(2*(q0*q3 + q1*q2), 1 - 2*(q2**2 + q3**2))
N = len(q0)
t = np.arange(N * upsample) * dt
y_dict = dict(phi_meas=phi, theta_meas=theta, psi_meas=psi)
y = ma.masked_all((N * upsample, GeneratedDTModel.ny))
y[::upsample] = GeneratedDTModel.pack('y', y_dict, fill=np.zeros_like(q0))
return t, y
def pem(t, y):
# Instantiate the model
given = dict(
ang_meas_std=0.054*np.pi/180, angvel_png=3,
)
dt = t[1] - t[0]
q0 = GeneratedDTModel.pack('q', given)
c = GeneratedDTModel.pack('c', given)
params = dict(q=q0, c=c, dt=dt)
sampled = dict(t=t)
model = GeneratedDTModel(params, sampled)
x0 = np.zeros(GeneratedDTModel.nx)
x0[:3] = y[0, :3]
Px0 = np.diag(np.repeat([1e-3, 1e-3], 3))
def merit(q, new=None):
mq = model.parametrize(q=q)
kf = kalman.DTUnscentedKalmanFilter(mq, x0, Px0)
return kf.pem_merit(y)
def grad(q, new=None):
mq = model.parametrize(q=q)
kf = kalman.DTUnscentedKalmanFilter(mq, x0, Px0)
return kf.pem_gradient(y)
hess_inds = np.tril_indices(model.nq)
def hess(q, new_q=1, obj_factor=1, lmult=1, new_lmult=1):
mq = model.parametrize(q=q)
kf = kalman.DTUnscentedKalmanFilter(mq, x0, Px0)
return obj_factor * kf.pem_hessian(y)[hess_inds]
q_lb = dict(ang_meas_std=0, angvel_png=0)
q_ub = dict()
q_fix = dict(ang_meas_std=given['ang_meas_std'])
q_bounds = [model.pack('q', dict(q_lb, **q_fix), fill=-np.inf),
model.pack('q', dict(q_ub, **q_fix), fill=np.inf)]
problem = yaipopt.Problem(q_bounds, merit, grad,
hess=hess, hess_inds=hess_inds)
problem.num_option(b'obj_scaling_factor', -1)
(qopt, solinfo) = problem.solve(q0)
return problem, qopt, solinfo, model, q0, x0, Px0
if __name__ == '__main__':
[t, y] = load_data()
[problem, qopt, solinfo, model, q0, x0, Px0] = pem(t, y)
| {
"repo_name": "dimasad/corujas",
"path": "pem.py",
"copies": "1",
"size": "5626",
"license": "mit",
"hash": 408456086512305660,
"line_mean": 30.2555555556,
"line_max": 78,
"alpha_frac": 0.522929257,
"autogenerated": false,
"ratio": 2.757843137254902,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37807723942549015,
"avg_score": null,
"num_lines": null
} |
"""A Ttk Notebook with close buttons.
Based on an example by patthoyts, http://paste.tclers.tk/896
"""
import datetime
import os
import Tkinter as tk
import ttk
import os.path
from tkFileDialog import askopenfilename
import tkSimpleDialog
import configuration
import pcwg_tool
import Analysis
from gui.utils import *
import gui.dataset_tab
import gui.analysis_tab
columnSeparator = "|"
filterSeparator = "#"
datePickerFormat = "%Y-%m-%d %H:%M"# "%d-%m-%Y %H:%M"
datePickerFormatDisplay = "[dd-mm-yyyy hh:mm]"
version = "0.5.13"
ExceptionType = Exception
#ExceptionType = None #comment this line before release
pcwg_inner_ranges = {'A': {'LTI': 0.08, 'UTI': 0.12, 'LSh': 0.05, 'USh': 0.25},
'B': {'LTI': 0.05, 'UTI': 0.09, 'LSh': 0.05, 'USh': 0.25},
'C': {'LTI': 0.1, 'UTI': 0.14, 'LSh': 0.1, 'USh': 0.3}}
class OpenRecent:
def __init__(self, fileOpener, path):
self.path = path
self.fileOpener = fileOpener
def __call__(self):
self.fileOpener.loadFile(self.path)
class ConfirmClose(tkSimpleDialog.Dialog):
def __init__(self, parent, name):
self.name = name
self.close = False
self.save = False
imgdir = os.path.join(os.path.dirname(__file__), 'img')
self.img_logo = tk.PhotoImage("img_logo", file=os.path.join(imgdir, 'logo.gif'))
tkSimpleDialog.Dialog.__init__(self, parent, "Confirm File Close")
def body(self, master):
tk.Label(master, image = self.img_logo).grid(column=0, row=0)
tk.Label(master, text="Do you want to save the changes you made to {0}?".format(self.name)).grid(column=1, row=0)
def buttonbox(self):
try:
self.attributes("-toolwindow",1) #only works on windows
except:
#self.overrideredirect(1) #removes whole frame
self.resizable(0,0) #stops maximising and resizing but can still be minimised
box = tk.Frame(self)
w = tk.Button(box, text="Don't Save", width=10, command=self.close_dont_save)
w.pack(side=tk.LEFT, padx=5, pady=5)
w = tk.Button(box, text="Cancel", width=10, command=self.cancel)
w.pack(side=tk.LEFT, padx=5, pady=5)
w = tk.Button(box, text="Save", width=10, command=self.close_and_save, default=tk.ACTIVE)
w.pack(side=tk.LEFT, padx=5, pady=5)
self.bind("<Return>", self.close_and_save)
self.bind("<Escape>", self.cancel)
box.pack()
def close_dont_save(self, event=None):
self.close = True
self.save = False
self.close_window()
def close_and_save(self, event=None):
self.close = True
self.save = True
self.close_window()
def cancel(self, event=None):
self.close = False
self.save = False
self.close_window()
def close_window(self):
self.parent.focus_set()
self.destroy()
class FileOpener:
def __init__(self, root, tabs, preferences):
self.root = root
self.tabs = tabs
self.preferences = preferences
def openFile(self):
fileName = self.SelectFile(parent=self.root, defaultextension=".xml")
self.loadFile(fileName)
def SelectFile(self, parent, defaultextension=None):
if len(self.preferences.workSpaceFolder) > 0:
return askopenfilename(parent=parent, initialdir=self.preferences.workSpaceFolder, defaultextension=defaultextension)
else:
return askopenfilename(parent=parent, defaultextension=defaultextension)
def loadFile(self, fileName):
if len(fileName) > 0:
detector = configuration.TypeDetector(fileName)
if detector.file_type == "analysis":
self.tabs.addAnalysis(fileName)
elif detector.file_type == "dataset":
self.tabs.addDataset(fileName)
else:
raise Exception("Unkown file type: {0}".format(detector.file_type))
self.preferences.addRecent(fileName)
def openMaximized(root):
w, h = root.winfo_screenwidth(), root.winfo_screenheight()
root.geometry("%dx%d+0+0" % (w, h))
def hello():
print "hello!"
def getTabID(notebook):
my_tabs = notebook.tabs()
tab_id = my_tabs[len(my_tabs) - 1]
return tab_id
class ClosableTab:
def __init__(self, notebook, fileName, console):
self.console = console
self.name = os.path.basename(fileName)
self.frame = tk.Frame(notebook)
notebook.add(self.frame, text=self.name, padding=3)
self.index = self.getTabIndex(notebook)
#self.status = status
self.isNew = False
self.titleColumn = 0
self.labelColumn = 1
self.inputColumn = 2
self.buttonColumn = 3
self.secondButtonColumn = 4
self.tipColumn = 5
self.messageColumn = 6
self.validations = []
self.row = 0
self.listboxEntries = {}
def close(self):
d = ConfirmClose(root, self.name)
if d.save:
self.save()
self.console.write("{0} saved".format(self.name))
return d.close
def getTabIndex(self, notebook):
my_tabs = notebook.tabs()
return len(my_tabs) - 1
def save(self):
pass
def addDatePickerEntry(self, master, title, validation, value, width = None):
if value != None:
if type(value) == str:
textValue = value
else:
textValue = value.strftime(datePickerFormat)
else:
textValue = None
entry = self.addEntry(master, title + " " + datePickerFormatDisplay, validation, textValue, width = width)
entry.entry.config(state=tk.DISABLED)
pickButton = tk.Button(master, text=".", command = DatePicker(self, entry, datePickerFormat), width=3, height=1)
pickButton.grid(row=(self.row-1), sticky=tk.N, column=self.inputColumn, padx = 160)
clearButton = tk.Button(master, text="x", command = ClearEntry(entry), width=3, height=1)
clearButton.grid(row=(self.row-1), sticky=tk.W, column=self.inputColumn, padx = 133)
entry.bindPickButton(pickButton)
return entry
def addPickerEntry(self, master, title, validation, value, width = None):
entry = self.addEntry(master, title, validation, value, width = width)
pickButton = tk.Button(master, text=".", command = ColumnPicker(self, entry), width=5, height=1)
pickButton.grid(row=(self.row-1), sticky=tk.E+tk.N, column=self.buttonColumn)
entry.bindPickButton(pickButton)
return entry
def addOption(self, master, title, options, value):
label = tk.Label(master, text=title)
label.grid(row=self.row, sticky=tk.W, column=self.labelColumn)
variable = tk.StringVar(master, value)
option = apply(tk.OptionMenu, (master, variable) + tuple(options))
option.grid(row=self.row, column=self.inputColumn, sticky=tk.W)
self.row += 1
return variable
def addListBox(self, master, title):
scrollbar = tk.Scrollbar(master, orient=tk.VERTICAL)
tipLabel = tk.Label(master, text="")
tipLabel.grid(row = self.row, sticky=tk.W, column=self.tipColumn)
lb = tk.Listbox(master, yscrollcommand=scrollbar, selectmode=tk.EXTENDED, height=3)
self.listboxEntries[title] = ListBoxEntry(lb,scrollbar,tipLabel)
self.row += 1
self.listboxEntries[title].scrollbar.configure(command=self.listboxEntries[title].listbox.yview)
self.listboxEntries[title].scrollbar.grid(row=self.row, sticky=tk.W+tk.N+tk.S, column=self.titleColumn)
return self.listboxEntries[title]
def addCheckBox(self, master, title, value):
label = tk.Label(master, text=title)
label.grid(row=self.row, sticky=tk.W, column=self.labelColumn)
variable = tk.IntVar(master, value)
checkButton = tk.Checkbutton(master, variable=variable)
checkButton.grid(row=self.row, column=self.inputColumn, sticky=tk.W)
self.row += 1
return variable
def addTitleRow(self, master, title):
tk.Label(master, text=title).grid(row=self.row, sticky=tk.W, column=self.titleColumn, columnspan = 2)
#add dummy label to stop form shrinking when validation messages hidden
tk.Label(master, text = " " * 70).grid(row=self.row, sticky=tk.W, column=self.messageColumn)
self.row += 1
def addEntry(self, master, title, validation, value, width = None):
variable = tk.StringVar(master, value)
label = tk.Label(master, text=title)
label.grid(row = self.row, sticky=tk.W, column=self.labelColumn)
tipLabel = tk.Label(master, text="")
tipLabel.grid(row = self.row, sticky=tk.W, column=self.tipColumn)
if validation != None:
validation.messageLabel.grid(row = self.row, sticky=tk.W, column=self.messageColumn)
validation.title = title
self.validations.append(validation)
validationCommand = validation.CMD
else:
validationCommand = None
entry = tk.Entry(master, textvariable=variable, validate = 'key', validatecommand = validationCommand, width = width)
entry.grid(row=self.row, column=self.inputColumn, sticky=tk.W)
if validation != None:
validation.link(entry)
self.row += 1
return VariableEntry(variable, entry, tipLabel)
def addFileSaveAsEntry(self, master, title, validation, value, width = 60):
variable = self.addEntry(master, title, validation, value, width, showHideCommand)
button = tk.Button(master, text="...", command = SetFileSaveAsCommand(master, variable), height=1)
button.grid(row=(self.row - 1), sticky=tk.E+tk.W, column=self.buttonColumn)
return variable
def addFileOpenEntry(self, master, title, validation, value, basePathVariable = None, width = 60):
variable = self.addEntry(master, title, validation, value, width)
button = tk.Button(master, text="...", command = SetFileOpenCommand(master, variable, basePathVariable), height=1)
button.grid(row=(self.row - 1), sticky=tk.E+tk.W, column=self.buttonColumn)
return variable
def validate(self):
valid = True
message = ""
for validation in self.validations:
if not validation.valid:
if not isinstance(validation,ValidateDatasets):
message += "%s (%s)\r" % (validation.title, validation.messageLabel['text'])
else:
message += "Datasets error. \r"
valid = False
if not valid:
tkMessageBox.showwarning(
"Validation errors",
"Illegal values, please review error messages and try again:\r %s" % message
)
return 0
else:
return 1
class ClosableTabs:
def __init__(self, parent, console):
self.console = console
self.loadImages()
self.style = self.createClosableTabStyle()
parent.bind_class("TNotebook", "<ButtonPress-1>", self.btn_press, True)
parent.bind_class("TNotebook", "<ButtonRelease-1>", self.btn_release)
#add notebook (holds tabs)
self.nb = ttk.Notebook(parent, style="ButtonNotebook")
self.nb.pressed_index = None
self.tabs = {}
def addAnalysis(self, fileName):
closableTab = gui.analysis_tab.AnalysisTab(self.nb, fileName, self.console)
self.tabs[closableTab.index] = closableTab
return closableTab
def addDataset(self, fileName):
closableTab = gui.dataset_tab.DatasetTab(self.nb, fileName, self.console)
self.tabs[closableTab.index] = closableTab
return closableTab
def loadImages(self):
imgdir = os.path.join(os.path.dirname(__file__), 'img')
self.i1 = tk.PhotoImage("img_close", file=os.path.join(imgdir, 'close.gif'))
self.i2 = tk.PhotoImage("img_closeactive",
file=os.path.join(imgdir, 'close_active.gif'))
self.i3 = tk.PhotoImage("img_closepressed",
file=os.path.join(imgdir, 'close_pressed.gif'))
def btn_press(self, event):
x, y, widget = event.x, event.y, event.widget
elem = widget.identify(x, y)
try:
index = widget.index("@%d,%d" % (x, y))
if "close" in elem:
widget.state(['pressed'])
widget.pressed_index = index
except:
pass
def close_tab(self, widget, index):
tab = self.tabs[index]
if tab.close():
widget.forget(index)
widget.event_generate("<<NotebookClosedTab>>")
def btn_release(self, event):
x, y, widget = event.x, event.y, event.widget
if not widget.instate(['pressed']):
return
elem = widget.identify(x, y)
index = widget.index("@%d,%d" % (x, y))
if "close" in elem and widget.pressed_index == index:
self.close_tab(widget, index)
widget.state(["!pressed"])
widget.pressed_index = None
def createClosableTabStyle(self):
style = ttk.Style()
style.element_create("close", "image", "img_close",
("active", "pressed", "!disabled", "img_closepressed"),
("active", "!disabled", "img_closeactive"), border=8, sticky='')
style.layout("ButtonNotebook", [("ButtonNotebook.client", {"sticky": "nswe"})])
style.layout("ButtonNotebook.Tab", [
("ButtonNotebook.tab", {"sticky": "nswe", "children":
[("ButtonNotebook.padding", {"side": "top", "sticky": "nswe",
"children":
[("ButtonNotebook.focus", {"side": "top", "sticky": "nswe",
"children":
[("ButtonNotebook.label", {"side": "left", "sticky": ''}),
("ButtonNotebook.close", {"side": "left", "sticky": ''})]
})]
})]
})]
)
return style
class ValidationTabs:
def __init__(self, parent):
self.loadImages()
#add notebook (holds tabs)
self.nb = ttk.Notebook(parent)
self.nb.pressed_index = None
def add(self, name):
my_frame = tk.Frame(self.nb)
self.nb.add(my_frame, text=name, padding=3)
tab_id = getTabID(self.nb)
validationTab = ValidationTab(self.nb, tab_id, my_frame, self.img_invalid)
return validationTab
def loadImages(self):
imgdir = os.path.join(os.path.dirname(__file__), 'img')
self.img_valid = tk.PhotoImage("img_valid", file=os.path.join(imgdir, 'valid.gif'))
self.img_invalid = tk.PhotoImage("img_invalid", file=os.path.join(imgdir, 'invalid.gif'))
def pack(self):
self.nb.pack(expand=1, fill='both')
class ValidationTab:
def __init__(self, notebook, tab_id, frame, img_invalid):
self.notebook = notebook
self.tab_id = tab_id
self.frame = frame
self.img_invalid = img_invalid
def validate(self, valid):
if not valid:
self.notebook.tab(self.tab_id, image = self.img_invalid, compound=tk.RIGHT)
else:
self.notebook.tab(self.tab_id, image = None)
class Console:
def __init__(self, parent):
scrollbar = tk.Scrollbar(parent, orient=tk.VERTICAL)
self.listbox = tk.Listbox(parent, yscrollcommand=scrollbar.set, selectmode=tk.EXTENDED)
scrollbar.configure(command=self.listbox.yview)
self.listbox.pack(side=tk.LEFT,fill=tk.BOTH, expand=1, pady=5)
scrollbar.pack(side=tk.RIGHT, fill=tk.Y, pady=5)
def write(self, line):
self.listbox.insert(tk.END, str(line))
class PCWG:
def __init__(self, root):
self.root = root
self.added_recents = []
tab_frame = tk.Frame(root)
console_frame = tk.Frame(root, background="grey")
tab_frame.grid(row=0, column=0, sticky="nsew")
console_frame.grid(row=1, column=0, sticky="nsew")
root.grid_rowconfigure(0, weight=1)
root.grid_columnconfigure(0, weight=1)
console = Console(console_frame)
tabs = ClosableTabs(tab_frame, console)
self.preferences = configuration.Preferences()
self.fileOpener = FileOpener(root, tabs, self.preferences)
self.addMenus(root)
self.preferences.onRecentChange += self.addRecents
def addMenus(self, root):
#http://effbot.org/tkbook/menu.htm
#add menu
self.menubar = tk.Menu(root)
# create a pulldown menu, and add it to the menu bar
filemenu = tk.Menu(self.menubar)
new_menu = tk.Menu(self.menubar)
new_menu.add_command(label="Analysis")
new_menu.add_command(label="Dataset")
new_menu.add_command(label="Portfolio")
self.menubar.add_cascade(label="File", menu=filemenu)
filemenu.add_cascade(label="New", menu=new_menu)
filemenu.add_command(label="Open", command=self.fileOpener.openFile)
self.recent_menu = tk.Menu(self.menubar)
filemenu.add_cascade(label="Open Recent", menu=self.recent_menu)
self.addRecents()
filemenu.add_command(label="Save")
filemenu.add_command(label="Save As")
filemenu.add_separator()
filemenu.add_command(label="Exit", command=root.quit)
#analysis_menu.add_command(label="Analysis")
#filemenu.add_command(label="Dataset")
#filemenu.add_command(label="Portfolio")
#filemenu.add_cascade(label="Analysis", menu=filemenu)
# create more pulldown menus
editmenu = tk.Menu(self.menubar, tearoff=0)
editmenu.add_command(label="Cut", command=hello)
editmenu.add_command(label="Copy", command=hello)
editmenu.add_command(label="Paste", command=hello)
self.menubar.add_cascade(label="Edit", menu=editmenu)
helpmenu = tk.Menu(self.menubar, tearoff=0)
helpmenu.add_command(label="About", command=hello)
self.menubar.add_cascade(label="Help", menu=helpmenu)
# display the menu
root.config(menu=self.menubar)
def addRecents(self):
for recent in self.preferences.recents:
if recent not in self.added_recents:
self.added_recents.append(recent)
self.recent_menu.add_command(label=recent, command = OpenRecent(self.fileOpener, recent))
#start of main code
root = tk.Tk()
menu = PCWG(root)
openMaximized(root)
root.mainloop()
menu.preferences.save()
| {
"repo_name": "peterdougstuart/PCWG",
"path": "test-gui/pcwg_tool_reborn.py",
"copies": "3",
"size": "19571",
"license": "mit",
"hash": -428638948834878500,
"line_mean": 30.8227642276,
"line_max": 137,
"alpha_frac": 0.5853558837,
"autogenerated": false,
"ratio": 3.7832978929054706,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01011727179384996,
"num_lines": 615
} |
"""A Ttk Notebook with close buttons.
Based on an example by patthoyts, http://paste.tclers.tk/896
"""
import os
import Tkinter as tk
import ttk
import os.path
from tkFileDialog import *
import tkMessageBox
import tkSimpleDialog
#http://effbot.org/tkbook/menu.htm
class Analysis:
def __init__(self, fileName):
self.fileName = fileName
self.s1 = "text1"
self.s2 = "text2"
def save(self):
print "save"
class Preferences:
def __init__(self):
self.workSpaceFolder = ""
def addRecent(self, path):
pass
class Recent:
def __init__(self, file):
self.file = file
def __call__(self):
pass
class ConfirmClose(tkSimpleDialog.Dialog):
def __init__(self, parent, name):
self.name = name
self.close = False
self.save = False
imgdir = os.path.join(os.path.dirname(__file__), 'img')
self.img_logo = tk.PhotoImage("img_logo", file=os.path.join(imgdir, 'logo.gif'))
tkSimpleDialog.Dialog.__init__(self, parent, "Confirm File Close")
def body(self, master):
tk.Label(master, image = self.img_logo).grid(column=0, row=0)
tk.Label(master, text="Do you want to save the changes you made to {0}?".format(self.name)).grid(column=1, row=0)
def buttonbox(self):
try:
self.attributes("-toolwindow",1) #only works on windows
except:
#self.overrideredirect(1) #removes whole frame
self.resizable(0,0) #stops maximising and resizing but can still be minimised
box = tk.Frame(self)
w = tk.Button(box, text="Don't Save", width=10, command=self.close_dont_save)
w.pack(side=tk.LEFT, padx=5, pady=5)
w = tk.Button(box, text="Cancel", width=10, command=self.cancel)
w.pack(side=tk.LEFT, padx=5, pady=5)
w = tk.Button(box, text="Save", width=10, command=self.close_and_save, default=tk.ACTIVE)
w.pack(side=tk.LEFT, padx=5, pady=5)
self.bind("<Return>", self.close_and_save)
self.bind("<Escape>", self.cancel)
box.pack()
def close_dont_save(self, event=None):
self.close = True
self.save = False
self.close_window()
def close_and_save(self, event=None):
self.close = True
self.save = True
self.close_window()
def cancel(self, event=None):
self.close = False
self.save = False
self.close_window()
def close_window(self):
self.parent.focus_set()
self.destroy()
class FileOpener:
def __init__(self, root, tabs, preferences):
self.root = root
self.tabs = tabs
self.preferences = preferences
def openFile(self):
fileName = self.SelectFile(parent=self.root, defaultextension=".xml")
if len(fileName) > 0:
self.tabs.addAnalysis(fileName)
self.preferences.addRecent(fileName)
def SelectFile(self, parent, defaultextension=None):
if len(preferences.workSpaceFolder) > 0:
return askopenfilename(parent=parent, initialdir=preferences.workSpaceFolder, defaultextension=defaultextension)
else:
return askopenfilename(parent=parent, defaultextension=defaultextension)
def openMaximized(root):
w, h = root.winfo_screenwidth(), root.winfo_screenheight()
root.geometry("%dx%d+0+0" % (w, h))
def getRecent():
recent = []
recent.append("One")
recent.append("Two")
recent.append("Three")
recent.append("Four")
return recent
def addRecent(recent_menu):
for recent in getRecent():
recent_menu.add_command(label=recent, command = Recent(recent))
def hello():
print "hello!"
def getTabID(notebook):
my_tabs = notebook.tabs()
tab_id = my_tabs[len(my_tabs) - 1]
return tab_id
class ClosableTab:
def __init__(self, notebook, fileName, console):
self.console = console
self.name = os.path.basename(fileName)
self.frame = tk.Frame(notebook)
notebook.add(self.frame, text=self.name, padding=3)
self.index = self.getTabIndex(notebook)
def close(self):
d = ConfirmClose(root, self.name)
if d.save:
self.save()
self.console.write("{0} saved".format(self.name))
return d.close
def getTabIndex(self, notebook):
my_tabs = notebook.tabs()
return len(my_tabs) - 1
def save(self):
pass
class AnalysisTab(ClosableTab):
def __init__(self, notebook, fileName, console):
ClosableTab.__init__(self, notebook, fileName, console)
self.analysis = Analysis(fileName)
sub_tabs = ValidationTabs(self.frame)
main_frame = sub_tabs.add("Main Settings")
correction_frame = sub_tabs.add("Correction Settings")
s1Var = tk.StringVar()
s2Var = tk.StringVar()
s1Var.set(self.analysis.s1)
s2Var.set(self.analysis.s2)
square1Label = tk.Label(main_frame.frame,textvariable=s1Var)
square1Label.grid(row=0, column=7)
square2Label = tk.Label(main_frame.frame,textvariable=s2Var)
square2Label.grid(row=0, column=6)
sub_tabs.pack()
main_frame.validate(False)
notebook.pack(expand=1, fill='both')
def save(self):
self.analysis.save()
class ClosableTabs:
def __init__(self, parent, console):
self.console = console
self.loadImages()
self.style = self.createClosableTabStyle()
parent.bind_class("TNotebook", "<ButtonPress-1>", self.btn_press, True)
parent.bind_class("TNotebook", "<ButtonRelease-1>", self.btn_release)
#add notebook (holds tabs)
self.nb = ttk.Notebook(parent, style="ButtonNotebook")
self.nb.pressed_index = None
self.tabs = {}
def addAnalysis(self, fileName):
closableTab = AnalysisTab(self.nb, fileName, self.console)
self.tabs[closableTab.index] = closableTab
return closableTab
def loadImages(self):
imgdir = os.path.join(os.path.dirname(__file__), 'img')
self.i1 = tk.PhotoImage("img_close", file=os.path.join(imgdir, 'close.gif'))
self.i2 = tk.PhotoImage("img_closeactive",
file=os.path.join(imgdir, 'close_active.gif'))
self.i3 = tk.PhotoImage("img_closepressed",
file=os.path.join(imgdir, 'close_pressed.gif'))
def btn_press(self, event):
x, y, widget = event.x, event.y, event.widget
elem = widget.identify(x, y)
try:
index = widget.index("@%d,%d" % (x, y))
if "close" in elem:
widget.state(['pressed'])
widget.pressed_index = index
except:
pass
def close_tab(self, widget, index):
tab = self.tabs[index]
if tab.close():
widget.forget(index)
widget.event_generate("<<NotebookClosedTab>>")
def btn_release(self, event):
x, y, widget = event.x, event.y, event.widget
if not widget.instate(['pressed']):
return
elem = widget.identify(x, y)
index = widget.index("@%d,%d" % (x, y))
if "close" in elem and widget.pressed_index == index:
self.close_tab(widget, index)
widget.state(["!pressed"])
widget.pressed_index = None
def createClosableTabStyle(self):
style = ttk.Style()
style.element_create("close", "image", "img_close",
("active", "pressed", "!disabled", "img_closepressed"),
("active", "!disabled", "img_closeactive"), border=8, sticky='')
style.layout("ButtonNotebook", [("ButtonNotebook.client", {"sticky": "nswe"})])
style.layout("ButtonNotebook.Tab", [
("ButtonNotebook.tab", {"sticky": "nswe", "children":
[("ButtonNotebook.padding", {"side": "top", "sticky": "nswe",
"children":
[("ButtonNotebook.focus", {"side": "top", "sticky": "nswe",
"children":
[("ButtonNotebook.label", {"side": "left", "sticky": ''}),
("ButtonNotebook.close", {"side": "left", "sticky": ''})]
})]
})]
})]
)
return style
class ValidationTabs:
def __init__(self, parent):
self.loadImages()
#add notebook (holds tabs)
self.nb = ttk.Notebook(parent)
self.nb.pressed_index = None
def add(self, name):
my_frame = tk.Frame(self.nb)
self.nb.add(my_frame, text=name, padding=3)
tab_id = getTabID(self.nb)
validationTab = ValidationTab(self.nb, tab_id, my_frame, self.img_invalid)
return validationTab
def loadImages(self):
imgdir = os.path.join(os.path.dirname(__file__), 'img')
self.img_valid = tk.PhotoImage("img_valid", file=os.path.join(imgdir, 'valid.gif'))
self.img_invalid = tk.PhotoImage("img_invalid", file=os.path.join(imgdir, 'invalid.gif'))
def pack(self):
self.nb.pack(expand=1, fill='both')
class ValidationTab:
def __init__(self, notebook, tab_id, frame, img_invalid):
self.notebook = notebook
self.tab_id = tab_id
self.frame = frame
self.img_invalid = img_invalid
def validate(self, valid):
if not valid:
self.notebook.tab(self.tab_id, image = self.img_invalid, compound=tk.RIGHT)
else:
self.notebook.tab(self.tab_id, image = None)
class Console:
def __init__(self, parent):
scrollbar = tk.Scrollbar(parent, orient=tk.VERTICAL)
self.listbox = tk.Listbox(parent, yscrollcommand=scrollbar.set, selectmode=tk.EXTENDED)
scrollbar.configure(command=self.listbox.yview)
self.listbox.pack(side=tk.LEFT,fill=tk.BOTH, expand=1, pady=5)
scrollbar.pack(side=tk.RIGHT, fill=tk.Y, pady=5)
def write(self, line):
self.listbox.insert(tk.END, str(line))
class PCWG:
def __init__(self, root):
self.root = root
tab_frame = tk.Frame(root)
console_frame = tk.Frame(root, background="grey")
tab_frame.grid(row=0, column=0, sticky="nsew")
console_frame.grid(row=1, column=0, sticky="nsew")
root.grid_rowconfigure(0, weight=1)
root.grid_columnconfigure(0, weight=1)
console = Console(console_frame)
tabs = ClosableTabs(tab_frame, console)
self.preferences = Preferences()
self.fileOpener = FileOpener(root, tabs, self.preferences)
self.addMenus(root)
def addMenus(self, root):
#add menu
self.menubar = tk.Menu(root)
# create a pulldown menu, and add it to the menu bar
filemenu = tk.Menu(self.menubar)
new_menu = tk.Menu(self.menubar)
new_menu.add_command(label="Analysis")
new_menu.add_command(label="Dataset")
new_menu.add_command(label="Portfolio")
self.menubar.add_cascade(label="File", menu=filemenu)
filemenu.add_cascade(label="New", menu=new_menu)
filemenu.add_command(label="Open", command=self.fileOpener.openFile)
recent_menu = tk.Menu(self.menubar)
addRecent(recent_menu)
filemenu.add_cascade(label="Open Recent", menu=recent_menu)
filemenu.add_command(label="Save")
filemenu.add_command(label="Save As")
filemenu.add_separator()
filemenu.add_command(label="Exit", command=root.quit)
#analysis_menu.add_command(label="Analysis")
#filemenu.add_command(label="Dataset")
#filemenu.add_command(label="Portfolio")
#filemenu.add_cascade(label="Analysis", menu=filemenu)
# create more pulldown menus
editmenu = tk.Menu(self.menubar, tearoff=0)
editmenu.add_command(label="Cut", command=hello)
editmenu.add_command(label="Copy", command=hello)
editmenu.add_command(label="Paste", command=hello)
self.menubar.add_cascade(label="Edit", menu=editmenu)
helpmenu = tk.Menu(self.menubar, tearoff=0)
helpmenu.add_command(label="About", command=hello)
self.menubar.add_cascade(label="Help", menu=helpmenu)
# display the menu
root.config(menu=self.menubar)
#start of main code
root = tk.Tk()
menu = PCWG(root)
openMaximized(root)
root.mainloop() | {
"repo_name": "lcameron05/PCWG",
"path": "test-gui/closable_Tab_with_menu.py",
"copies": "3",
"size": "12573",
"license": "mit",
"hash": -301020007839592400,
"line_mean": 26.7571743929,
"line_max": 132,
"alpha_frac": 0.5938121371,
"autogenerated": false,
"ratio": 3.5912596401028276,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007102117948054506,
"num_lines": 453
} |
"""A Ttk Notebook with close buttons.
Based on an example by patthoyts, http://paste.tclers.tk/896
"""
import os
import Tkinter
import ttk
root = Tkinter.Tk()
imgdir = os.path.join(os.path.dirname(__file__), 'img')
i1 = Tkinter.PhotoImage("img_close", file=os.path.join(imgdir, 'close.gif'))
i2 = Tkinter.PhotoImage("img_closeactive",
file=os.path.join(imgdir, 'close_active.gif'))
i3 = Tkinter.PhotoImage("img_closepressed",
file=os.path.join(imgdir, 'close_pressed.gif'))
style = ttk.Style()
style.element_create("close", "image", "img_close",
("active", "pressed", "!disabled", "img_closepressed"),
("active", "!disabled", "img_closeactive"), border=8, sticky='')
style.layout("ButtonNotebook", [("ButtonNotebook.client", {"sticky": "nswe"})])
style.layout("ButtonNotebook.Tab", [
("ButtonNotebook.tab", {"sticky": "nswe", "children":
[("ButtonNotebook.padding", {"side": "top", "sticky": "nswe",
"children":
[("ButtonNotebook.focus", {"side": "top", "sticky": "nswe",
"children":
[("ButtonNotebook.label", {"side": "left", "sticky": ''}),
("ButtonNotebook.close", {"side": "left", "sticky": ''})]
})]
})]
})]
)
def btn_press(event):
x, y, widget = event.x, event.y, event.widget
elem = widget.identify(x, y)
index = widget.index("@%d,%d" % (x, y))
if "close" in elem:
widget.state(['pressed'])
widget.pressed_index = index
def btn_release(event):
x, y, widget = event.x, event.y, event.widget
if not widget.instate(['pressed']):
return
elem = widget.identify(x, y)
index = widget.index("@%d,%d" % (x, y))
if "close" in elem and widget.pressed_index == index:
widget.forget(index)
widget.event_generate("<<NotebookClosedTab>>")
widget.state(["!pressed"])
widget.pressed_index = None
root.bind_class("TNotebook", "<ButtonPress-1>", btn_press, True)
root.bind_class("TNotebook", "<ButtonRelease-1>", btn_release)
# create a ttk notebook with our custom style, and add some tabs to it
nb = ttk.Notebook(width=200, height=200, style="ButtonNotebook")
nb.pressed_index = None
f1 = Tkinter.Frame(nb, background="red")
f2 = Tkinter.Frame(nb, background="green")
f3 = Tkinter.Frame(nb, background="blue")
nb.add(f1, text='Red', padding=3)
nb.add(f2, text='Green', padding=3)
nb.add(f3, text='Blue', padding=3)
nb.pack(expand=1, fill='both')
root.mainloop()
| {
"repo_name": "dexterx17/nodoSocket",
"path": "clients/Python-2.7.6/Demo/tkinter/ttk/notebook_closebtn.py",
"copies": "10",
"size": "2527",
"license": "mit",
"hash": -2158715972527956000,
"line_mean": 31.3974358974,
"line_max": 79,
"alpha_frac": 0.610605461,
"autogenerated": false,
"ratio": 3.2606451612903227,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0027377716970477155,
"num_lines": 78
} |
from Behaviour import Behaviour
from lwmath.Vector import Vector
class Attraction(Behaviour):
def __init__(self, radius=1000, strength=100.0):
self.target = Vector()
self.radius = radius
self.strength = strength
self._delta = Vector()
self.setRadius(self.radius)
super(Attraction, self).__init__()
def setRadius(self, radius):
""" Sets the effective radius of the behaviours. """
self.radius = radius
self.radiusSq = radius**2
def apply(self, p, dt, index):
# Vector pointing from particle to target.
self._delta.copy(self.target).sub(p.pos)
# Squared distance to target.
distSq = self._delta.mag_squared()
# Limit force to behaviour radius.
if 0.000001 < distSq < self.radiusSq:
# Calculate force vector.
self._delta.norm().scale(1.0 - distSq / self.radiusSq)
#Apply force.
p.acc.add(self._delta.scale(self.strength))
| {
"repo_name": "gregroper/Pycipia",
"path": "behaviour/Attraction.py",
"copies": "1",
"size": "1041",
"license": "mit",
"hash": -9118255832838071000,
"line_mean": 28.7428571429,
"line_max": 66,
"alpha_frac": 0.6013448607,
"autogenerated": false,
"ratio": 3.9283018867924526,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5029646747492452,
"avg_score": null,
"num_lines": null
} |
# Attract mode Scriptlet for STTNG
from mpf.system.scriptlet import Scriptlet
from mpf.system.show_controller import Playlist
class Attract(Scriptlet):
def on_load(self):
self.machine.events.add_handler('machineflow_Attract_start', self.start)
self.machine.events.add_handler('machineflow_Attract_stop', self.stop)
# Register the switch handler for the diverter PWM testing switch
# self.machine.switch_controller.add_switch_handler(
# switch_name='s_buyIn', state=1, ms=0,
# callback=self.enable_diverter)
# self.machine.switch_controller.add_switch_handler(
# switch_name='s_buyIn', state=0, ms=0,
# callback=self.disable_diverter)
def start(self):
# self.machine.lights['lt_jackpot'].off()
# for gi in self.machine.gi:
# print("&&&&&& ", gi.name)
# gi.off()
# self.machine.lights['lt_jackpot'].on()
for gi in self.machine.gi:
gi.on()
# Need to add some logic here, like "if not sw_leftLock = active then"
if self.machine.balldevices['bd_leftVUK'].balls > 1:
self.machine.balldevices['bd_leftVUK'].eject(
self.machine.balldevices['bd_leftVUK'].balls-1)
elif not self.machine.balldevices['bd_leftVUK'].balls:
self.machine.balldevices['bd_leftVUK'].request_ball()
if self.machine.balldevices['bd_leftCannonVUK'].balls > 1:
self.machine.balldevices['bd_leftCannonVUK'].eject(
self.machine.balldevices['bd_leftCannonVUK'].balls-1)
elif not self.machine.balldevices['bd_leftCannonVUK'].balls:
self.machine.balldevices['bd_leftCannonVUK'].request_ball()
if self.machine.balldevices['bd_rightCannonVUK'].balls > 1:
self.machine.balldevices['bd_rightCannonVUK'].eject(
self.machine.balldevices['bd_rightCannonVUK'].balls-1)
elif not self.machine.balldevices['bd_rightCannonVUK'].balls:
self.machine.balldevices['bd_rightCannonVUK'].request_ball()
def stop(self):
pass
def enable_diverter(self):
self.machine.coils['c_underDivertorBottom'].pwm(1,2)
def disable_diverter(self):
self.machine.coils['c_underDivertorBottom'].disable()
| {
"repo_name": "GabeKnuth/STTNG",
"path": "scriptlets/attract.py",
"copies": "1",
"size": "2312",
"license": "mit",
"hash": -3370643827637617700,
"line_mean": 38.186440678,
"line_max": 81,
"alpha_frac": 0.6388408304,
"autogenerated": false,
"ratio": 3.312320916905444,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9434785119253772,
"avg_score": 0.003275325610334302,
"num_lines": 59
} |
ATTRIB_DATA = """
Faction|Amarr Empire|Caldari State|Gallente Federation|Minmatar Republic|Angel Cartel|Serpentis|Guristas|Sansha's Nation|Blood Raider Covenant|Outer Ring Excavations
Meta Group|T1|T2|Empire Faction|Pirate Faction|Deadspace
Profession|Combat|Mining|Industry|Exploration
Miscellaneous|Credit-Only|Monocle Prize|Player-designed
"""
def populate_default_attribs():
for line in ATTRIB_DATA.split('\n'):
line = line.strip()
if not line: continue
line = line.split('|')
att_name = line[0]
att_vals = line[1:]
if db(db.attrib.name==att_name).count()==0:
print "Adding attribute: ", att_name
att_id = db.attrib.insert(name=att_name)
else:
print "Attribute already exists: ", att_name
att_id = db(db.attrib.name==att_name).select().first()
for val in att_vals:
if db((db.attrib_value.name==val) & (db.attrib_value.attrib==att_id)).count()==0:
print " -- Adding attribute value: ", val
db.attrib_value.insert(attrib=att_id, name=val)
else:
print " -- Attribute value already exists: ", val
db.commit()
print 'Done.'
def nuke_attribs():
db.attrib.truncate()
db.attrib_value.truncate()
db.reel_metadata.truncate()
db.commit()
| {
"repo_name": "fsufitch/eve-shipspinning",
"path": "models/default_attribs.py",
"copies": "1",
"size": "1345",
"license": "mit",
"hash": -9064605418605556000,
"line_mean": 38.5588235294,
"line_max": 165,
"alpha_frac": 0.6252788104,
"autogenerated": false,
"ratio": 3.3209876543209877,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44462664647209876,
"avg_score": null,
"num_lines": null
} |
#attribute 12dicts appropriately
#make dictionary loading choice
class WordList:
'''
Provides subsets of words from an internal dictionary of words
Internal Dictionary based on 12dicts
'''
def __init__(self):
self._words = self._load_words()
def __iter__(self):
for word in self.filter_by():
yield word
def filter_by(self, min_length = 1, max_length = None,
plural_uncountables = True):
for word in self._words:
#filter plural uncountables
if (plural_uncountables == False) and ('%' in word):
continue
word = word.replace('%','')
#filter by length
length = len(word)
if min_length and (length < min_length):
continue
if max_length and (max_length < length):
continue
#include word if not filtered already
yield word
def _load_words(self):
import os
if '__file__' in globals(): #path to this source file
this_dir = os.path.abspath(os.path.dirname(__file__))
else: #relative path when running IDLE, etc.
this_dir = ''
file_path = os.path.join(this_dir, 'dictionaries\\2of12inf.txt')
## file_path = os.path.join(this_dir, 'dictionaries\\test.txt')
f = open(file_path)
return f.read().splitlines()
def main():
words = WordList()
print('***********Filtered to 1')
for word in words.filter_by(min_length = 1, max_length = 1):
print(word)
print('***********Filtered 18 to 20')
for word in words.filter_by(min_length = 18, max_length = 20):
print(word)
print('***********Filtered 20 to unlimited')
for word in words.filter_by(min_length = 20):
print(word)
print('***********Filtered 18 to 18 and exclude plural uncountable')
for word in words.filter_by(min_length = 18, max_length = 18,
plural_uncountables = False):
print(word)
if __name__ == '__main__':
main()
| {
"repo_name": "kobejohn/BookwormUtility",
"path": "wordlist.py",
"copies": "1",
"size": "2133",
"license": "mit",
"hash": -4297200148732603400,
"line_mean": 27.8243243243,
"line_max": 72,
"alpha_frac": 0.5410220347,
"autogenerated": false,
"ratio": 4.09404990403071,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.014362184257829367,
"num_lines": 74
} |
""" Attribute and method access on Python objects from C++.
Note: std::cout type operations currently crash python...
Not sure what is up with this...
"""
from __future__ import absolute_import, print_function
import scipy.weave as weave
#----------------------------------------------------------------------------
# get/set attribute and call methods example
#----------------------------------------------------------------------------
class Foo(object):
def __init__(self):
self.val = 1
def inc(self,amount):
self.val += amount
return self.val
obj = Foo()
code = """
py::tuple result(3);
int i = obj.attr("val");
result[0] = i;
py::tuple args(1);
args[0] = 2;
i = obj.mcall("inc",args);
result[1] = i;
obj.set_attr("val",5);
i = obj.attr("val");
result[2] = i;
return_val = result;
"""
print('initial, inc(2), set(5)/get:', weave.inline(code,['obj']))
#----------------------------------------------------------------------------
# indexing of values.
#----------------------------------------------------------------------------
from UserList import UserList
obj = UserList([1,[1,2],"hello"])
code = """
int i;
// find obj length and access each of its items
//std::cout << "UserList items: ";
//for(i = 0; i < obj.length(); i++)
// std::cout << obj[i].str() << " ";
//std::cout << std::endl;
// assign new values to each of its items
for(i = 0; i < obj.length(); i++)
obj[i] = "goodbye";
"""
weave.inline(code,['obj'])
print("obj with new values:", obj)
| {
"repo_name": "newemailjdm/scipy",
"path": "scipy/weave/examples/object.py",
"copies": "100",
"size": "1679",
"license": "bsd-3-clause",
"hash": 3781669792563363300,
"line_mean": 26.9833333333,
"line_max": 77,
"alpha_frac": 0.4377605718,
"autogenerated": false,
"ratio": 4.075242718446602,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005780568378607594,
"num_lines": 60
} |
""" Attribute and method access on Python objects from C++.
Note: std::cout type operations currently crash python...
Not sure what is up with this...
"""
import scipy.weave as weave
#----------------------------------------------------------------------------
# get/set attribute and call methods example
#----------------------------------------------------------------------------
class Foo(object):
def __init__(self):
self.val = 1
def inc(self,amount):
self.val += amount
return self.val
obj = Foo()
code = """
py::tuple result(3);
int i = obj.attr("val");
result[0] = i;
py::tuple args(1);
args[0] = 2;
i = obj.mcall("inc",args);
result[1] = i;
obj.set_attr("val",5);
i = obj.attr("val");
result[2] = i;
return_val = result;
"""
print 'initial, inc(2), set(5)/get:', weave.inline(code,['obj'])
#----------------------------------------------------------------------------
# indexing of values.
#----------------------------------------------------------------------------
from UserList import UserList
obj = UserList([1,[1,2],"hello"])
code = """
int i;
// find obj length and access each of its items
//std::cout << "UserList items: ";
//for(i = 0; i < obj.length(); i++)
// std::cout << obj[i].str() << " ";
//std::cout << std::endl;
// assign new values to each of its items
for(i = 0; i < obj.length(); i++)
obj[i] = "goodbye";
"""
weave.inline(code,['obj'])
print "obj with new values:", obj
| {
"repo_name": "jasonmccampbell/scipy-refactor",
"path": "scipy/weave/examples/object.py",
"copies": "12",
"size": "1619",
"license": "bsd-3-clause",
"hash": -4255223307918579000,
"line_mean": 27.9107142857,
"line_max": 77,
"alpha_frac": 0.427424336,
"autogenerated": false,
"ratio": 4.07808564231738,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00782429143497255,
"num_lines": 56
} |
# Attribute basis to MRL - (do not individually apply to MTP)
# http://www.music.mcgill.ca/~jason/mumt621/papers5/fujishima_1999.pdf ?
# https://www.jstor.org/stable/pdf/40285717.pdf
# Zicheng (Brian) Gao
from MusicRoll import *
import TensionModule
import PPMBasis
import numpy as np
import pickle
import pprint
from tkinter import Tk, Button, Frame, Canvas, Scrollbar
import tkinter.constants as Tkconstants
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import AxesGrid
# from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
__verbose = False
__block = True
__report_interval = 100
__hard_limit = 1000
# parameters:
attn_decay = 0.3 # decay by this much from note onsets
n_smoothing = 0.0 # forward smoothing on notes
# candidates must be above b_ambiguous * most_likely_likelihood
# if there are multiple candidates, it is an ambiguous section
# lesser cuts more; higher makes ambiguous sections more likely
# thus, higher is actually more stringent
b_ambiguous = 0.4
max_gap = 4
thresh_conf = 0.25
# lower bound for continuation of current hypothesis
persistence = 0.25
def debug_print(*args):
if __verbose:
print(args)
def ema(seq, inertia):
# mutating operation: exponential moving average applied over an array
for i in np.r_[1:np.size(seq, 0)]:
seq[i] = inertia * seq[i-1] + (1 - inertia) * seq[i]
return seq
def apply_with_window(origin, target, vfunction, width, wdirection = 'backwards'):
# wdirection must either be 'forwards' or 'backwards'
assert np.size(origin, 0) == np.size(target, 0)
# branching for efficiency? save calculations in loop?
if wdirection == 'backwards':
for i in np.r_[0:width]:
target[i] = vfunction(origin[ 0 : i ])
for i in np.r_[width:np.size(origin, 0)]:
target[i] = vfunction(origin[ i - width : i ])
def trim(vector, factor):
return 1.0 * (vector > ((1 - factor) * np.max(vector)))
def matches(belief, actual):
# How much does the observed value match the hypothesis
return 1 - np.linalg.norm(belief - actual)
def do_basis_label(filename, metric = TensionModule.metric.dissonance):
pp = pprint.PrettyPrinter(indent=4)
roll = pickle.load(open(filename, 'rb'))
pp.pprint(vars(roll))
tens_mod = TensionModule.TensionModule(metric)
for tempo, group in roll.get_tape_groups().items():
max_gap = 2 * max([tape.min_common for tape in group])
note_data = MusicRoll.combine_notes(group)
# Array - newness matters - simulate attentional decay
# @newness = 0 -> 1-k
# @newness = 1 -> 1
# factor = (1-k) + n * k
# = 1 - k + nk = 1 - k * (n - 1)
orig_notes = note_data[...,0] * (1 - attn_decay * (note_data[...,1] - 1) )
duration = min(np.size(orig_notes, 0), __hard_limit)
notes = ema(np.tile(orig_notes, 1), n_smoothing)[:duration]
# smoothing?
keys = np.r_[:np.size(notes, 1)]
fig1 = plt.figure(1, figsize = (5, 5))
grid = AxesGrid(fig1, 111,
nrows_ncols = (4, 1),
axes_pad = 0.05,
label_mode = "1",
)
Plot_Bases = grid[3]
basis_prob = np.zeros((duration, 12)) # likelihoods
basis_label = np.zeros((duration, 1)) - 1 # labels
tension = np.zeros(duration)
def axis_basis(quanta):
actives = keys[quanta > 0.0001] + 3 # due to midi pitch nonsense - 0 is C
weights = quanta[quanta > 0.0001]
return tens_mod.basis_likelihood(np.vstack((actives, weights)).T)
def label(base, start, end):
if start == 0:
start -= 1
Plot_Bases.broken_barh([(start + 1, end - start - 1)], (base + 0.25 , 0.5), facecolors = 'red', alpha = 0.3, linewidth = 0)
basis_label[start + 1:end] = base
def bar(time):
Plot_Bases.broken_barh([(time, 0.1)], (0, 12), facecolors = 'red', alpha = 0.7, linewidth = 0)
def block(base, start, end, color):
if __block:
Plot_Bases.broken_barh([(start + 1, end - start)], (base + 0.25, 0.5), facecolors = color, alpha = 1.0, linewidth = 0)
# go through the time slices...
left = 0
right = 0
reach = 0
candidate = None
confidence = 0 # SHOULD BE USED
def notes_in_time(start, end):
return np.sum(note_data[start:end,:,1])
def label_basis():
if candidate == None:
for base in TensionModule.v_basis[b_curr == np.max(b_curr)]:
label(base, left - 1, right + 1)
else:
label(candidate, left - 1, right + 1)
def get_cand(notes):
# return (candidate, confidence)
return (trim(b_curr, b_ambiguous), np.max(b_curr))
while right < duration and right < __hard_limit:
# report
if right % __report_interval == 0:
print('{0}/{1}...'.format(right, duration))
# If we didn't have a candidate, try to check for one
if candidate == None:
# get current hypothesis from accumulated notes
confidence_factor = 1 - 1/(notes_in_time(left, right + 1) + 1)
section = np.sum(notes[left:right+1], 0)
b_curr = axis_basis(section) * confidence_factor
basis_prob[right] = b_curr
tension[right] = tens_mod.selfTension(section)
(try_cand, try_conf) = get_cand(b_curr)
# make sure there is only one candidate, and that it is confident enough
if np.sum(try_cand) == 1 and try_conf > thresh_conf:
# found a candidate
debug_print('got', right, try_cand, try_conf)
block(-0.5, right - 1, right, 'purple')
candidate = TensionModule.v_basis[try_cand > b_ambiguous][0]
confidence = try_conf
else:
# no candidate / still ambiguous
if np.sum(try_cand) > 1:
debug_print('non', right, 'multiple')
else:
debug_print('non', right, try_conf, '<', thresh_conf)
block(-0.5, right - 1, right, 'blue')
# If there is a candidate, check to see if the next observed slice follows
else:
reach = 0
similarity = -1
# attempt to bridge gap if dissimilarity is seen
while reach < max_gap and right + reach < duration and similarity < persistence:
# check if the following slice fits the hypothesis
# section = np.vstack((notes[left:right+1], notes[right + reach]))
# b_next = axis_basis(np.sum(section, 0))
b_next = axis_basis(notes[right + reach])
(try_cand, try_conf) = get_cand(b_next)
# similarity = matches(trim(b_curr, b_ambiguous, 1), b_next)
similarity = b_next[candidate]
debug_print('chk', right, right + reach, 'cnd', candidate, similarity)
reach += 1
# exited due to similarity - can extend
if similarity >= persistence or right + reach >= duration:
block(-0.5, right - 1, right, 'green')
debug_print('ext', right, similarity)
# all's right - we can aggregate this slice
section = np.sum(notes[left:right+1], 0)
# basis_prob[right] = b_curr = axis_basis(section)
basis_prob[right] = b_curr = axis_basis(notes[right])
tension[right] = tens_mod.selfTension(section)
# a gap was found and was too large
elif similarity < persistence and reach >= max_gap:
block(0, right - 1, right, 'yellow')
debug_print('rev', right, similarity, list(b_next))
bar(right)
right -= 1
label_basis()
candidate = None
left = right + 1
right += 1
# back-label
# label when hitting end
label_basis()
# basis_prob = np.apply_along_axis(axis_basis, 1, notes)
# ema(basis_prob, 0.2)
grid[0].set_title("{0} group {1}".format(roll.filepath, tempo))
Plot_Bases.locator_params(axis='y', nbins = 12)
min_note = min([tape.min_note for tape in group])
max_note = max([tape.max_note for tape in group])
grid[0].plot(np.r_[:duration] + 0.5, 2 * tension / np.max(tension), 'k')
grid[1].imshow(notes.T[min_note:max_note + 1],
interpolation = 'none',
cmap = plt.cm.Oranges,
origin = 'lower',
extent=[0, duration, 0, 12],
aspect = 0.5 * duration / 24)
Plot_Bases.imshow(basis_prob.T,
interpolation = 'none',
cmap = plt.cm.Greys,
origin = 'lower',
extent=[0, duration, 0, 12],
aspect = 0.5 * duration / 24)
grid[2].imshow(basis_label.T,
interpolation = 'none',
cmap = plt.cm.jet,
origin = 'lower',
extent=[0, duration, 0, 12],
aspect = 0.5 * duration / (24 * 3))
# print(basis_label)
plt.show()
# TODO label afterwards, and re-pickle
# This is really a classification problem that ought to be addressed with the proper tools
# get it to mark what actions it took - exploring, backlabelling
# prevent "consecutive self-labelling" for example
# and also smooth away hiccups
# from pycallgraph import PyCallGraph
# from pycallgraph.output import GraphvizOutput
if __name__ == '__main__':
# with PyCallGraph(output=GraphvizOutput(output_file = "BASIS.png")):
# do_basis_label('./mid/bach/aof/can1.mrl', dissonance_metric)
do_basis_label('./mid/moldau_single.mrl', TensionModule.metric.western)
do_basis_label('./mid/moldau_accomp.mrl', TensionModule.metric.western)
# do_basis_label('./mid/bach/aof/can1.mrl', TensionModule.metric.western)
# do_basis_label('./mid/oldyuanxian2.mrl', TensionModule.metric.western)
# do_basis_label('./mid/mary.mrl')
# do_basis_label('./mid/ambigious_test.mrl', TensionModule.metric.western) # lowsim 0.3 - 0.35
# do_basis_label('./mid/ivivi.mrl', TensionModule.metric.western) # lowsim 0.5
"""
TODO: Confidence in labelling - should be used
TODO: Make sure no OTHER candidate surpasses the first before extending
"""
"""
Entirely hopeless; needs restart.
ON NEXT TRY:
1: Interleave note activation times in MusicRoll instead of producing the actual timeseries.
(Timeseries for use with neural model)
2: Bias towards "sensible" shifts from previous basis (circle of fifths distance)
""" | {
"repo_name": "ichaelm/MusicPrediction",
"path": "src/midibasis.py",
"copies": "1",
"size": "9495",
"license": "mit",
"hash": -2509689958420297700,
"line_mean": 30.7591973244,
"line_max": 126,
"alpha_frac": 0.6614007372,
"autogenerated": false,
"ratio": 2.909898866074165,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4071299603274165,
"avg_score": null,
"num_lines": null
} |
"""Attribute implementation for _Dispatch classes.
The various listener targets for a particular event class are represented
as attributes, which refer to collections of listeners to be fired off.
These collections can exist at the class level as well as at the instance
level. An event is fired off using code like this::
some_object.dispatch.first_connect(arg1, arg2)
Above, ``some_object.dispatch`` would be an instance of ``_Dispatch`` and
``first_connect`` is typically an instance of ``_ListenerCollection``
if event listeners are present, or ``_EmptyListener`` if none are present.
The attribute mechanics here spend effort trying to ensure listener functions
are available with a minimum of function call overhead, that unnecessary
objects aren't created (i.e. many empty per-instance listener collections),
as well as that everything is garbage collectable when owning references are
lost. Other features such as "propagation" of listener functions across
many ``_Dispatch`` instances, "joining" of multiple ``_Dispatch`` instances,
as well as support for subclass propagation (e.g. events assigned to
``Pool`` vs. ``QueuePool``) are all implemented here.
"""
from __future__ import absolute_import
from .. import util
from . import registry
from . import legacy
from itertools import chain
import weakref
class RefCollection(object):
@util.memoized_property
def ref(self):
return weakref.ref(self, registry._collection_gced)
class _DispatchDescriptor(RefCollection):
"""Class-level attributes on :class:`._Dispatch` classes."""
def __init__(self, parent_dispatch_cls, fn):
self.__name__ = fn.__name__
argspec = util.inspect_getargspec(fn)
self.arg_names = argspec.args[1:]
self.has_kw = bool(argspec.keywords)
self.legacy_signatures = list(reversed(
sorted(
getattr(fn, '_legacy_signatures', []),
key=lambda s: s[0]
)
))
self.__doc__ = fn.__doc__ = legacy._augment_fn_docs(
self, parent_dispatch_cls, fn)
self._clslevel = weakref.WeakKeyDictionary()
self._empty_listeners = weakref.WeakKeyDictionary()
def _adjust_fn_spec(self, fn, named):
argspec = util.get_callable_argspec(fn, no_self=True)
if named:
fn = self._wrap_fn_for_kw(fn)
fn = legacy._wrap_fn_for_legacy(self, fn, argspec)
return fn
def _wrap_fn_for_kw(self, fn):
def wrap_kw(*args, **kw):
argdict = dict(zip(self.arg_names, args))
argdict.update(kw)
return fn(**argdict)
return wrap_kw
def insert(self, event_key, propagate):
target = event_key.dispatch_target
assert isinstance(target, type), \
"Class-level Event targets must be classes."
stack = [target]
while stack:
cls = stack.pop(0)
stack.extend(cls.__subclasses__())
if cls is not target and cls not in self._clslevel:
self.update_subclass(cls)
else:
if cls not in self._clslevel:
self._clslevel[cls] = []
self._clslevel[cls].insert(0, event_key._listen_fn)
registry._stored_in_collection(event_key, self)
def append(self, event_key, propagate):
target = event_key.dispatch_target
assert isinstance(target, type), \
"Class-level Event targets must be classes."
stack = [target]
while stack:
cls = stack.pop(0)
stack.extend(cls.__subclasses__())
if cls is not target and cls not in self._clslevel:
self.update_subclass(cls)
else:
if cls not in self._clslevel:
self._clslevel[cls] = []
self._clslevel[cls].append(event_key._listen_fn)
registry._stored_in_collection(event_key, self)
def update_subclass(self, target):
if target not in self._clslevel:
self._clslevel[target] = []
clslevel = self._clslevel[target]
for cls in target.__mro__[1:]:
if cls in self._clslevel:
clslevel.extend([
fn for fn
in self._clslevel[cls]
if fn not in clslevel
])
def remove(self, event_key):
target = event_key.dispatch_target
stack = [target]
while stack:
cls = stack.pop(0)
stack.extend(cls.__subclasses__())
if cls in self._clslevel:
self._clslevel[cls].remove(event_key.fn)
registry._removed_from_collection(event_key, self)
def clear(self):
"""Clear all class level listeners"""
to_clear = set()
for dispatcher in self._clslevel.values():
to_clear.update(dispatcher)
dispatcher[:] = []
registry._clear(self, to_clear)
def for_modify(self, obj):
"""Return an event collection which can be modified.
For _DispatchDescriptor at the class level of
a dispatcher, this returns self.
"""
return self
def __get__(self, obj, cls):
if obj is None:
return self
elif obj._parent_cls in self._empty_listeners:
ret = self._empty_listeners[obj._parent_cls]
else:
self._empty_listeners[obj._parent_cls] = ret = \
_EmptyListener(self, obj._parent_cls)
# assigning it to __dict__ means
# memoized for fast re-access. but more memory.
obj.__dict__[self.__name__] = ret
return ret
class _HasParentDispatchDescriptor(object):
def _adjust_fn_spec(self, fn, named):
return self.parent._adjust_fn_spec(fn, named)
class _EmptyListener(_HasParentDispatchDescriptor):
"""Serves as a class-level interface to the events
served by a _DispatchDescriptor, when there are no
instance-level events present.
Is replaced by _ListenerCollection when instance-level
events are added.
"""
def __init__(self, parent, target_cls):
if target_cls not in parent._clslevel:
parent.update_subclass(target_cls)
self.parent = parent # _DispatchDescriptor
self.parent_listeners = parent._clslevel[target_cls]
self.name = parent.__name__
self.propagate = frozenset()
self.listeners = ()
def for_modify(self, obj):
"""Return an event collection which can be modified.
For _EmptyListener at the instance level of
a dispatcher, this generates a new
_ListenerCollection, applies it to the instance,
and returns it.
"""
result = _ListenerCollection(self.parent, obj._parent_cls)
if obj.__dict__[self.name] is self:
obj.__dict__[self.name] = result
return result
def _needs_modify(self, *args, **kw):
raise NotImplementedError("need to call for_modify()")
exec_once = insert = append = remove = clear = _needs_modify
def __call__(self, *args, **kw):
"""Execute this event."""
for fn in self.parent_listeners:
fn(*args, **kw)
def __len__(self):
return len(self.parent_listeners)
def __iter__(self):
return iter(self.parent_listeners)
def __bool__(self):
return bool(self.parent_listeners)
__nonzero__ = __bool__
class _CompoundListener(_HasParentDispatchDescriptor):
_exec_once = False
def exec_once(self, *args, **kw):
"""Execute this event, but only if it has not been
executed already for this collection."""
if not self._exec_once:
self(*args, **kw)
self._exec_once = True
def __call__(self, *args, **kw):
"""Execute this event."""
for fn in self.parent_listeners:
fn(*args, **kw)
for fn in self.listeners:
fn(*args, **kw)
def __len__(self):
return len(self.parent_listeners) + len(self.listeners)
def __iter__(self):
return chain(self.parent_listeners, self.listeners)
def __bool__(self):
return bool(self.listeners or self.parent_listeners)
__nonzero__ = __bool__
class _ListenerCollection(RefCollection, _CompoundListener):
"""Instance-level attributes on instances of :class:`._Dispatch`.
Represents a collection of listeners.
As of 0.7.9, _ListenerCollection is only first
created via the _EmptyListener.for_modify() method.
"""
def __init__(self, parent, target_cls):
if target_cls not in parent._clslevel:
parent.update_subclass(target_cls)
self.parent_listeners = parent._clslevel[target_cls]
self.parent = parent
self.name = parent.__name__
self.listeners = []
self.propagate = set()
def for_modify(self, obj):
"""Return an event collection which can be modified.
For _ListenerCollection at the instance level of
a dispatcher, this returns self.
"""
return self
def _update(self, other, only_propagate=True):
"""Populate from the listeners in another :class:`_Dispatch`
object."""
existing_listeners = self.listeners
existing_listener_set = set(existing_listeners)
self.propagate.update(other.propagate)
other_listeners = [l for l
in other.listeners
if l not in existing_listener_set
and not only_propagate or l in self.propagate
]
existing_listeners.extend(other_listeners)
to_associate = other.propagate.union(other_listeners)
registry._stored_in_collection_multi(self, other, to_associate)
def insert(self, event_key, propagate):
if event_key._listen_fn not in self.listeners:
event_key.prepend_to_list(self, self.listeners)
if propagate:
self.propagate.add(event_key._listen_fn)
def append(self, event_key, propagate):
if event_key._listen_fn not in self.listeners:
event_key.append_to_list(self, self.listeners)
if propagate:
self.propagate.add(event_key._listen_fn)
def remove(self, event_key):
self.listeners.remove(event_key._listen_fn)
self.propagate.discard(event_key._listen_fn)
registry._removed_from_collection(event_key, self)
def clear(self):
registry._clear(self, self.listeners)
self.propagate.clear()
self.listeners[:] = []
class _JoinedDispatchDescriptor(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, cls):
if obj is None:
return self
else:
obj.__dict__[self.name] = ret = _JoinedListener(
obj.parent, self.name,
getattr(obj.local, self.name)
)
return ret
class _JoinedListener(_CompoundListener):
_exec_once = False
def __init__(self, parent, name, local):
self.parent = parent
self.name = name
self.local = local
self.parent_listeners = self.local
@property
def listeners(self):
return getattr(self.parent, self.name)
def _adjust_fn_spec(self, fn, named):
return self.local._adjust_fn_spec(fn, named)
def for_modify(self, obj):
self.local = self.parent_listeners = self.local.for_modify(obj)
return self
def insert(self, event_key, propagate):
self.local.insert(event_key, propagate)
def append(self, event_key, propagate):
self.local.append(event_key, propagate)
def remove(self, event_key):
self.local.remove(event_key)
def clear(self):
raise NotImplementedError()
| {
"repo_name": "mitsuhiko/sqlalchemy",
"path": "lib/sqlalchemy/event/attr.py",
"copies": "2",
"size": "11965",
"license": "mit",
"hash": -8599331259657875000,
"line_mean": 31.7808219178,
"line_max": 77,
"alpha_frac": 0.5982448809,
"autogenerated": false,
"ratio": 4.1938310550297935,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5792075935929794,
"avg_score": null,
"num_lines": null
} |
"""Attribute/instance expiration, deferral of attributes, etc."""
from sqlalchemy.testing import eq_, assert_raises, assert_raises_message
from sqlalchemy.testing.util import gc_collect
import sqlalchemy as sa
from sqlalchemy import testing
from sqlalchemy import Integer, String, ForeignKey, exc as sa_exc, FetchedValue
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.schema import Column
from sqlalchemy.orm import mapper, relationship, create_session, \
attributes, deferred, exc as orm_exc, defer, undefer,\
strategies, state, lazyload, backref, Session
from sqlalchemy.testing import fixtures
from test.orm import _fixtures
from sqlalchemy.sql import select
class ExpireTest(_fixtures.FixtureTest):
def test_expire(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(Address, backref='user'),
})
mapper(Address, addresses)
sess = create_session()
u = sess.query(User).get(7)
assert len(u.addresses) == 1
u.name = 'foo'
del u.addresses[0]
sess.expire(u)
assert 'name' not in u.__dict__
def go():
assert u.name == 'jack'
self.assert_sql_count(testing.db, go, 1)
assert 'name' in u.__dict__
u.name = 'foo'
sess.flush()
# change the value in the DB
users.update(users.c.id==7, values=dict(name='jack')).execute()
sess.expire(u)
# object isn't refreshed yet, using dict to bypass trigger
assert u.__dict__.get('name') != 'jack'
assert 'name' in attributes.instance_state(u).expired_attributes
sess.query(User).all()
# test that it refreshed
assert u.__dict__['name'] == 'jack'
assert 'name' not in attributes.instance_state(u).expired_attributes
def go():
assert u.name == 'jack'
self.assert_sql_count(testing.db, go, 0)
def test_persistence_check(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
s = create_session()
u = s.query(User).get(7)
s.expunge_all()
assert_raises_message(sa_exc.InvalidRequestError,
r"is not persistent within this Session", s.expire, u)
def test_get_refreshes(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
s = create_session(autocommit=False)
u = s.query(User).get(10)
s.expire_all()
def go():
u = s.query(User).get(10) # get() refreshes
self.assert_sql_count(testing.db, go, 1)
def go():
eq_(u.name, 'chuck') # attributes unexpired
self.assert_sql_count(testing.db, go, 0)
def go():
u = s.query(User).get(10) # expire flag reset, so not expired
self.assert_sql_count(testing.db, go, 0)
def test_get_on_deleted_expunges(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
s = create_session(autocommit=False)
u = s.query(User).get(10)
s.expire_all()
s.execute(users.delete().where(User.id==10))
# object is gone, get() returns None, removes u from session
assert u in s
assert s.query(User).get(10) is None
assert u not in s # and expunges
def test_refresh_on_deleted_raises(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
s = create_session(autocommit=False)
u = s.query(User).get(10)
s.expire_all()
s.expire_all()
s.execute(users.delete().where(User.id==10))
# raises ObjectDeletedError
assert_raises_message(
sa.orm.exc.ObjectDeletedError,
"Instance '<User at .*?>' has been "
"deleted, or its row is otherwise not present.",
getattr, u, 'name'
)
def test_rollback_undoes_expunge_from_deleted(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
s = create_session(autocommit=False)
u = s.query(User).get(10)
s.expire_all()
s.execute(users.delete().where(User.id==10))
# do a get()/remove u from session
assert s.query(User).get(10) is None
assert u not in s
s.rollback()
assert u in s
# but now its back, rollback has occurred, the
# _remove_newly_deleted is reverted
eq_(u.name, 'chuck')
def test_deferred(self):
"""test that unloaded, deferred attributes aren't included in the
expiry list."""
Order, orders = self.classes.Order, self.tables.orders
mapper(Order, orders, properties={
'description':deferred(orders.c.description)})
s = create_session()
o1 = s.query(Order).first()
assert 'description' not in o1.__dict__
s.expire(o1)
assert o1.isopen is not None
assert 'description' not in o1.__dict__
assert o1.description
def test_deferred_notfound(self):
users, User = self.tables.users, self.classes.User
mapper(User, users, properties={
'name':deferred(users.c.name)
})
s = create_session(autocommit=False)
u = s.query(User).get(10)
assert 'name' not in u.__dict__
s.execute(users.delete().where(User.id==10))
assert_raises_message(
sa.orm.exc.ObjectDeletedError,
"Instance '<User at .*?>' has been "
"deleted, or its row is otherwise not present.",
getattr, u, 'name'
)
def test_lazyload_autoflushes(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(Address,
order_by=addresses.c.email_address)
})
mapper(Address, addresses)
s = create_session(autoflush=True, autocommit=False)
u = s.query(User).get(8)
adlist = u.addresses
eq_(adlist, [
Address(email_address='ed@bettyboop.com'),
Address(email_address='ed@lala.com'),
Address(email_address='ed@wood.com'),
])
a1 = u.addresses[2]
a1.email_address = 'aaaaa'
s.expire(u, ['addresses'])
eq_(u.addresses, [
Address(email_address='aaaaa'),
Address(email_address='ed@bettyboop.com'),
Address(email_address='ed@lala.com'),
])
def test_refresh_collection_exception(self):
"""test graceful failure for currently unsupported
immediate refresh of a collection"""
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(Address, order_by=addresses.c.email_address)
})
mapper(Address, addresses)
s = create_session(autoflush=True, autocommit=False)
u = s.query(User).get(8)
assert_raises_message(sa_exc.InvalidRequestError,
"properties specified for refresh",
s.refresh, u, ['addresses'])
# in contrast to a regular query with no columns
assert_raises_message(sa_exc.InvalidRequestError,
"no columns with which to SELECT", s.query().all)
def test_refresh_cancels_expire(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
s = create_session()
u = s.query(User).get(7)
s.expire(u)
s.refresh(u)
def go():
u = s.query(User).get(7)
eq_(u.name, 'jack')
self.assert_sql_count(testing.db, go, 0)
def test_expire_doesntload_on_set(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
sess = create_session()
u = sess.query(User).get(7)
sess.expire(u, attribute_names=['name'])
def go():
u.name = 'somenewname'
self.assert_sql_count(testing.db, go, 0)
sess.flush()
sess.expunge_all()
assert sess.query(User).get(7).name == 'somenewname'
def test_no_session(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = create_session()
u = sess.query(User).get(7)
sess.expire(u, attribute_names=['name'])
sess.expunge(u)
assert_raises(orm_exc.DetachedInstanceError, getattr, u, 'name')
def test_pending_raises(self):
users, User = self.tables.users, self.classes.User
# this was the opposite in 0.4, but the reasoning there seemed off.
# expiring a pending instance makes no sense, so should raise
mapper(User, users)
sess = create_session()
u = User(id=15)
sess.add(u)
assert_raises(sa_exc.InvalidRequestError, sess.expire, u, ['name'])
def test_no_instance_key(self):
User, users = self.classes.User, self.tables.users
# this tests an artificial condition such that
# an instance is pending, but has expired attributes. this
# is actually part of a larger behavior when postfetch needs to
# occur during a flush() on an instance that was just inserted
mapper(User, users)
sess = create_session()
u = sess.query(User).get(7)
sess.expire(u, attribute_names=['name'])
sess.expunge(u)
attributes.instance_state(u).key = None
assert 'name' not in u.__dict__
sess.add(u)
assert u.name == 'jack'
def test_no_instance_key_no_pk(self):
users, User = self.tables.users, self.classes.User
# same as test_no_instance_key, but the PK columns
# are absent. ensure an error is raised.
mapper(User, users)
sess = create_session()
u = sess.query(User).get(7)
sess.expire(u, attribute_names=['name', 'id'])
sess.expunge(u)
attributes.instance_state(u).key = None
assert 'name' not in u.__dict__
sess.add(u)
assert_raises(sa_exc.InvalidRequestError, getattr, u, 'name')
def test_expire_preserves_changes(self):
"""test that the expire load operation doesn't revert post-expire changes"""
Order, orders = self.classes.Order, self.tables.orders
mapper(Order, orders)
sess = create_session()
o = sess.query(Order).get(3)
sess.expire(o)
o.description = "order 3 modified"
def go():
assert o.isopen == 1
self.assert_sql_count(testing.db, go, 1)
assert o.description == 'order 3 modified'
del o.description
assert "description" not in o.__dict__
sess.expire(o, ['isopen'])
sess.query(Order).all()
assert o.isopen == 1
assert "description" not in o.__dict__
assert o.description is None
o.isopen=15
sess.expire(o, ['isopen', 'description'])
o.description = 'some new description'
sess.query(Order).all()
assert o.isopen == 1
assert o.description == 'some new description'
sess.expire(o, ['isopen', 'description'])
sess.query(Order).all()
del o.isopen
def go():
assert o.isopen is None
self.assert_sql_count(testing.db, go, 0)
o.isopen=14
sess.expire(o)
o.description = 'another new description'
sess.query(Order).all()
assert o.isopen == 1
assert o.description == 'another new description'
def test_expire_committed(self):
"""test that the committed state of the attribute receives the most recent DB data"""
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders)
sess = create_session()
o = sess.query(Order).get(3)
sess.expire(o)
orders.update().execute(description='order 3 modified')
assert o.isopen == 1
assert attributes.instance_state(o).dict['description'] == 'order 3 modified'
def go():
sess.flush()
self.assert_sql_count(testing.db, go, 0)
def test_expire_cascade(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(Address, cascade="all, refresh-expire")
})
mapper(Address, addresses)
s = create_session()
u = s.query(User).get(8)
assert u.addresses[0].email_address == 'ed@wood.com'
u.addresses[0].email_address = 'someotheraddress'
s.expire(u)
assert u.addresses[0].email_address == 'ed@wood.com'
def test_refresh_cascade(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(Address, cascade="all, refresh-expire")
})
mapper(Address, addresses)
s = create_session()
u = s.query(User).get(8)
assert u.addresses[0].email_address == 'ed@wood.com'
u.addresses[0].email_address = 'someotheraddress'
s.refresh(u)
assert u.addresses[0].email_address == 'ed@wood.com'
def test_expire_cascade_pending_orphan(self):
cascade = 'save-update, refresh-expire, delete, delete-orphan'
self._test_cascade_to_pending(cascade, True)
def test_refresh_cascade_pending_orphan(self):
cascade = 'save-update, refresh-expire, delete, delete-orphan'
self._test_cascade_to_pending(cascade, False)
def test_expire_cascade_pending(self):
cascade = 'save-update, refresh-expire'
self._test_cascade_to_pending(cascade, True)
def test_refresh_cascade_pending(self):
cascade = 'save-update, refresh-expire'
self._test_cascade_to_pending(cascade, False)
def _test_cascade_to_pending(self, cascade, expire_or_refresh):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(Address, cascade=cascade)
})
mapper(Address, addresses)
s = create_session()
u = s.query(User).get(8)
a = Address(id=12, email_address='foobar')
u.addresses.append(a)
if expire_or_refresh:
s.expire(u)
else:
s.refresh(u)
if "delete-orphan" in cascade:
assert a not in s
else:
assert a in s
assert a not in u.addresses
s.flush()
def test_expired_lazy(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(Address, backref='user'),
})
mapper(Address, addresses)
sess = create_session()
u = sess.query(User).get(7)
sess.expire(u)
assert 'name' not in u.__dict__
assert 'addresses' not in u.__dict__
def go():
assert u.addresses[0].email_address == 'jack@bean.com'
assert u.name == 'jack'
# two loads
self.assert_sql_count(testing.db, go, 2)
assert 'name' in u.__dict__
assert 'addresses' in u.__dict__
def test_expired_eager(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(Address, backref='user', lazy='joined'),
})
mapper(Address, addresses)
sess = create_session()
u = sess.query(User).get(7)
sess.expire(u)
assert 'name' not in u.__dict__
assert 'addresses' not in u.__dict__
def go():
assert u.addresses[0].email_address == 'jack@bean.com'
assert u.name == 'jack'
# two loads, since relationship() + scalar are
# separate right now on per-attribute load
self.assert_sql_count(testing.db, go, 2)
assert 'name' in u.__dict__
assert 'addresses' in u.__dict__
sess.expire(u, ['name', 'addresses'])
assert 'name' not in u.__dict__
assert 'addresses' not in u.__dict__
def go():
sess.query(User).filter_by(id=7).one()
assert u.addresses[0].email_address == 'jack@bean.com'
assert u.name == 'jack'
# one load, since relationship() + scalar are
# together when eager load used with Query
self.assert_sql_count(testing.db, go, 1)
def test_relationship_changes_preserved(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(Address, backref='user', lazy='joined'),
})
mapper(Address, addresses)
sess = create_session()
u = sess.query(User).get(8)
sess.expire(u, ['name', 'addresses'])
u.addresses
assert 'name' not in u.__dict__
del u.addresses[1]
u.name
assert 'name' in u.__dict__
assert len(u.addresses) == 2
def test_joinedload_props_dontload(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
# relationships currently have to load separately from scalar instances.
# the use case is: expire "addresses". then access it. lazy load
# fires off to load "addresses", but needs foreign key or primary key
# attributes in order to lazy load; hits those attributes, such as
# below it hits "u.id". "u.id" triggers full unexpire operation,
# joinedloads addresses since lazy='joined'. this is all within lazy load
# which fires unconditionally; so an unnecessary joinedload (or
# lazyload) was issued. would prefer not to complicate lazyloading to
# "figure out" that the operation should be aborted right now.
mapper(User, users, properties={
'addresses':relationship(Address, backref='user', lazy='joined'),
})
mapper(Address, addresses)
sess = create_session()
u = sess.query(User).get(8)
sess.expire(u)
u.id
assert 'addresses' not in u.__dict__
u.addresses
assert 'addresses' in u.__dict__
def test_expire_synonym(self):
User, users = self.classes.User, self.tables.users
mapper(User, users, properties={
'uname': sa.orm.synonym('name')
})
sess = create_session()
u = sess.query(User).get(7)
assert 'name' in u.__dict__
assert u.uname == u.name
sess.expire(u)
assert 'name' not in u.__dict__
users.update(users.c.id==7).execute(name='jack2')
assert u.name == 'jack2'
assert u.uname == 'jack2'
assert 'name' in u.__dict__
# this wont work unless we add API hooks through the attr. system to
# provide "expire" behavior on a synonym
# sess.expire(u, ['uname'])
# users.update(users.c.id==7).execute(name='jack3')
# assert u.uname == 'jack3'
def test_partial_expire(self):
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders)
sess = create_session()
o = sess.query(Order).get(3)
sess.expire(o, attribute_names=['description'])
assert 'id' in o.__dict__
assert 'description' not in o.__dict__
assert attributes.instance_state(o).dict['isopen'] == 1
orders.update(orders.c.id==3).execute(description='order 3 modified')
def go():
assert o.description == 'order 3 modified'
self.assert_sql_count(testing.db, go, 1)
assert attributes.instance_state(o).dict['description'] == 'order 3 modified'
o.isopen = 5
sess.expire(o, attribute_names=['description'])
assert 'id' in o.__dict__
assert 'description' not in o.__dict__
assert o.__dict__['isopen'] == 5
assert attributes.instance_state(o).committed_state['isopen'] == 1
def go():
assert o.description == 'order 3 modified'
self.assert_sql_count(testing.db, go, 1)
assert o.__dict__['isopen'] == 5
assert attributes.instance_state(o).dict['description'] == 'order 3 modified'
assert attributes.instance_state(o).committed_state['isopen'] == 1
sess.flush()
sess.expire(o, attribute_names=['id', 'isopen', 'description'])
assert 'id' not in o.__dict__
assert 'isopen' not in o.__dict__
assert 'description' not in o.__dict__
def go():
assert o.description == 'order 3 modified'
assert o.id == 3
assert o.isopen == 5
self.assert_sql_count(testing.db, go, 1)
def test_partial_expire_lazy(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(Address, backref='user'),
})
mapper(Address, addresses)
sess = create_session()
u = sess.query(User).get(8)
sess.expire(u, ['name', 'addresses'])
assert 'name' not in u.__dict__
assert 'addresses' not in u.__dict__
# hit the lazy loader. just does the lazy load,
# doesn't do the overall refresh
def go():
assert u.addresses[0].email_address=='ed@wood.com'
self.assert_sql_count(testing.db, go, 1)
assert 'name' not in u.__dict__
# check that mods to expired lazy-load attributes
# only do the lazy load
sess.expire(u, ['name', 'addresses'])
def go():
u.addresses = [Address(id=10, email_address='foo@bar.com')]
self.assert_sql_count(testing.db, go, 1)
sess.flush()
# flush has occurred, and addresses was modified,
# so the addresses collection got committed and is
# longer expired
def go():
assert u.addresses[0].email_address=='foo@bar.com'
assert len(u.addresses) == 1
self.assert_sql_count(testing.db, go, 0)
# but the name attribute was never loaded and so
# still loads
def go():
assert u.name == 'ed'
self.assert_sql_count(testing.db, go, 1)
def test_partial_expire_eager(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(Address, backref='user', lazy='joined'),
})
mapper(Address, addresses)
sess = create_session()
u = sess.query(User).get(8)
sess.expire(u, ['name', 'addresses'])
assert 'name' not in u.__dict__
assert 'addresses' not in u.__dict__
def go():
assert u.addresses[0].email_address=='ed@wood.com'
self.assert_sql_count(testing.db, go, 1)
# check that mods to expired eager-load attributes
# do the refresh
sess.expire(u, ['name', 'addresses'])
def go():
u.addresses = [Address(id=10, email_address='foo@bar.com')]
self.assert_sql_count(testing.db, go, 1)
sess.flush()
# this should ideally trigger the whole load
# but currently it works like the lazy case
def go():
assert u.addresses[0].email_address=='foo@bar.com'
assert len(u.addresses) == 1
self.assert_sql_count(testing.db, go, 0)
def go():
assert u.name == 'ed'
# scalar attributes have their own load
self.assert_sql_count(testing.db, go, 1)
# ideally, this was already loaded, but we arent
# doing it that way right now
#self.assert_sql_count(testing.db, go, 0)
def test_relationships_load_on_query(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(Address, backref='user'),
})
mapper(Address, addresses)
sess = create_session()
u = sess.query(User).get(8)
assert 'name' in u.__dict__
u.addresses
assert 'addresses' in u.__dict__
sess.expire(u, ['name', 'addresses'])
assert 'name' not in u.__dict__
assert 'addresses' not in u.__dict__
(sess.query(User).options(sa.orm.joinedload('addresses')).
filter_by(id=8).all())
assert 'name' in u.__dict__
assert 'addresses' in u.__dict__
def test_partial_expire_deferred(self):
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders, properties={
'description': sa.orm.deferred(orders.c.description)
})
sess = create_session()
o = sess.query(Order).get(3)
sess.expire(o, ['description', 'isopen'])
assert 'isopen' not in o.__dict__
assert 'description' not in o.__dict__
# test that expired attribute access refreshes
# the deferred
def go():
assert o.isopen == 1
assert o.description == 'order 3'
self.assert_sql_count(testing.db, go, 1)
sess.expire(o, ['description', 'isopen'])
assert 'isopen' not in o.__dict__
assert 'description' not in o.__dict__
# test that the deferred attribute triggers the full
# reload
def go():
assert o.description == 'order 3'
assert o.isopen == 1
self.assert_sql_count(testing.db, go, 1)
sa.orm.clear_mappers()
mapper(Order, orders)
sess.expunge_all()
# same tests, using deferred at the options level
o = sess.query(Order).options(sa.orm.defer('description')).get(3)
assert 'description' not in o.__dict__
# sanity check
def go():
assert o.description == 'order 3'
self.assert_sql_count(testing.db, go, 1)
assert 'description' in o.__dict__
assert 'isopen' in o.__dict__
sess.expire(o, ['description', 'isopen'])
assert 'isopen' not in o.__dict__
assert 'description' not in o.__dict__
# test that expired attribute access refreshes
# the deferred
def go():
assert o.isopen == 1
assert o.description == 'order 3'
self.assert_sql_count(testing.db, go, 1)
sess.expire(o, ['description', 'isopen'])
assert 'isopen' not in o.__dict__
assert 'description' not in o.__dict__
# test that the deferred attribute triggers the full
# reload
def go():
assert o.description == 'order 3'
assert o.isopen == 1
self.assert_sql_count(testing.db, go, 1)
def test_joinedload_query_refreshes(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(Address, backref='user', lazy='joined'),
})
mapper(Address, addresses)
sess = create_session()
u = sess.query(User).get(8)
assert len(u.addresses) == 3
sess.expire(u)
assert 'addresses' not in u.__dict__
sess.query(User).filter_by(id=8).all()
assert 'addresses' in u.__dict__
assert len(u.addresses) == 3
@testing.requires.predictable_gc
def test_expire_all(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(Address, backref='user', lazy='joined',
order_by=addresses.c.id),
})
mapper(Address, addresses)
sess = create_session()
userlist = sess.query(User).order_by(User.id).all()
eq_(self.static.user_address_result, userlist)
eq_(len(list(sess)), 9)
sess.expire_all()
gc_collect()
eq_(len(list(sess)), 4) # since addresses were gc'ed
userlist = sess.query(User).order_by(User.id).all()
u = userlist[1]
eq_(self.static.user_address_result, userlist)
eq_(len(list(sess)), 9)
def test_state_change_col_to_deferred(self):
"""Behavioral test to verify the current activity of loader callables."""
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = create_session()
# deferred attribute option, gets the LoadDeferredColumns
# callable
u1 = sess.query(User).options(defer(User.name)).first()
assert isinstance(
attributes.instance_state(u1).callables['name'],
strategies.LoadDeferredColumns
)
# expire the attr, it gets the InstanceState callable
sess.expire(u1, ['name'])
assert 'name' in attributes.instance_state(u1).expired_attributes
assert 'name' not in attributes.instance_state(u1).callables
# load it, callable is gone
u1.name
assert 'name' not in attributes.instance_state(u1).expired_attributes
assert 'name' not in attributes.instance_state(u1).callables
# same for expire all
sess.expunge_all()
u1 = sess.query(User).options(defer(User.name)).first()
sess.expire(u1)
assert 'name' in attributes.instance_state(u1).expired_attributes
assert 'name' not in attributes.instance_state(u1).callables
# load over it. everything normal.
sess.query(User).first()
assert 'name' not in attributes.instance_state(u1).expired_attributes
assert 'name' not in attributes.instance_state(u1).callables
sess.expunge_all()
u1 = sess.query(User).first()
# for non present, still expires the same way
del u1.name
sess.expire(u1)
assert 'name' in attributes.instance_state(u1).expired_attributes
assert 'name' not in attributes.instance_state(u1).callables
def test_state_deferred_to_col(self):
"""Behavioral test to verify the current activity of loader callables."""
users, User = self.tables.users, self.classes.User
mapper(User, users, properties={'name': deferred(users.c.name)})
sess = create_session()
u1 = sess.query(User).options(undefer(User.name)).first()
assert 'name' not in attributes.instance_state(u1).callables
# mass expire, the attribute was loaded,
# the attribute gets the callable
sess.expire(u1)
assert 'name' in attributes.instance_state(u1).expired_attributes
assert 'name' not in attributes.instance_state(u1).callables
# load it
u1.name
assert 'name' not in attributes.instance_state(u1).expired_attributes
assert 'name' not in attributes.instance_state(u1).callables
# mass expire, attribute was loaded but then deleted,
# the callable goes away - the state wants to flip
# it back to its "deferred" loader.
sess.expunge_all()
u1 = sess.query(User).options(undefer(User.name)).first()
del u1.name
sess.expire(u1)
assert 'name' not in attributes.instance_state(u1).expired_attributes
assert 'name' not in attributes.instance_state(u1).callables
# single attribute expire, the attribute gets the callable
sess.expunge_all()
u1 = sess.query(User).options(undefer(User.name)).first()
sess.expire(u1, ['name'])
assert 'name' in attributes.instance_state(u1).expired_attributes
assert 'name' not in attributes.instance_state(u1).callables
def test_state_noload_to_lazy(self):
"""Behavioral test to verify the current activity of loader callables."""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(
User, users,
properties={'addresses': relationship(Address, lazy='noload')})
mapper(Address, addresses)
sess = create_session()
u1 = sess.query(User).options(lazyload(User.addresses)).first()
assert isinstance(
attributes.instance_state(u1).callables['addresses'],
strategies.LoadLazyAttribute
)
# expire, it stays
sess.expire(u1)
assert 'addresses' not in attributes.instance_state(u1).expired_attributes
assert isinstance(
attributes.instance_state(u1).callables['addresses'],
strategies.LoadLazyAttribute
)
# load over it. callable goes away.
sess.query(User).first()
assert 'addresses' not in attributes.instance_state(u1).expired_attributes
assert 'addresses' not in attributes.instance_state(u1).callables
sess.expunge_all()
u1 = sess.query(User).options(lazyload(User.addresses)).first()
sess.expire(u1, ['addresses'])
assert 'addresses' not in attributes.instance_state(u1).expired_attributes
assert isinstance(
attributes.instance_state(u1).callables['addresses'],
strategies.LoadLazyAttribute
)
# load the attr, goes away
u1.addresses
assert 'addresses' not in attributes.instance_state(u1).expired_attributes
assert 'addresses' not in attributes.instance_state(u1).callables
class PolymorphicExpireTest(fixtures.MappedTest):
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
people = Table('people', metadata,
Column('person_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('type', String(30)),
)
engineers = Table('engineers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('status', String(30)),
)
@classmethod
def setup_classes(cls):
class Person(cls.Basic):
pass
class Engineer(Person):
pass
@classmethod
def insert_data(cls):
people, engineers = cls.tables.people, cls.tables.engineers
people.insert().execute(
{'person_id':1, 'name':'person1', 'type':'person'},
{'person_id':2, 'name':'engineer1', 'type':'engineer'},
{'person_id':3, 'name':'engineer2', 'type':'engineer'},
)
engineers.insert().execute(
{'person_id':2, 'status':'new engineer'},
{'person_id':3, 'status':'old engineer'},
)
@classmethod
def setup_mappers(cls):
Person, people, engineers, Engineer = (cls.classes.Person,
cls.tables.people,
cls.tables.engineers,
cls.classes.Engineer)
mapper(Person, people, polymorphic_on=people.c.type, polymorphic_identity='person')
mapper(Engineer, engineers, inherits=Person, polymorphic_identity='engineer')
def test_poly_deferred(self):
Person, people, Engineer = (self.classes.Person,
self.tables.people,
self.classes.Engineer)
sess = create_session()
[p1, e1, e2] = sess.query(Person).order_by(people.c.person_id).all()
sess.expire(p1)
sess.expire(e1, ['status'])
sess.expire(e2)
for p in [p1, e2]:
assert 'name' not in p.__dict__
assert 'name' in e1.__dict__
assert 'status' not in e2.__dict__
assert 'status' not in e1.__dict__
e1.name = 'new engineer name'
def go():
sess.query(Person).all()
self.assert_sql_count(testing.db, go, 1)
for p in [p1, e1, e2]:
assert 'name' in p.__dict__
assert 'status' not in e2.__dict__
assert 'status' not in e1.__dict__
def go():
assert e1.name == 'new engineer name'
assert e2.name == 'engineer2'
assert e1.status == 'new engineer'
assert e2.status == 'old engineer'
self.assert_sql_count(testing.db, go, 2)
eq_(Engineer.name.get_history(e1), (['new engineer name'],(), ['engineer1']))
def test_no_instance_key(self):
Engineer = self.classes.Engineer
sess = create_session()
e1 = sess.query(Engineer).get(2)
sess.expire(e1, attribute_names=['name'])
sess.expunge(e1)
attributes.instance_state(e1).key = None
assert 'name' not in e1.__dict__
sess.add(e1)
assert e1.name == 'engineer1'
def test_no_instance_key(self):
Engineer = self.classes.Engineer
# same as test_no_instance_key, but the PK columns
# are absent. ensure an error is raised.
sess = create_session()
e1 = sess.query(Engineer).get(2)
sess.expire(e1, attribute_names=['name', 'person_id'])
sess.expunge(e1)
attributes.instance_state(e1).key = None
assert 'name' not in e1.__dict__
sess.add(e1)
assert_raises(sa_exc.InvalidRequestError, getattr, e1, 'name')
class ExpiredPendingTest(_fixtures.FixtureTest):
run_define_tables = 'once'
run_setup_classes = 'once'
run_setup_mappers = None
run_inserts = None
def test_expired_pending(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(Address, backref='user'),
})
mapper(Address, addresses)
sess = create_session()
a1 = Address(email_address='a1')
sess.add(a1)
sess.flush()
u1 = User(name='u1')
a1.user = u1
sess.flush()
# expire 'addresses'. backrefs
# which attach to u1 will expect to be "pending"
sess.expire(u1, ['addresses'])
# attach an Address. now its "pending"
# in user.addresses
a2 = Address(email_address='a2')
a2.user = u1
# expire u1.addresses again. this expires
# "pending" as well.
sess.expire(u1, ['addresses'])
# insert a new row
sess.execute(addresses.insert(), dict(email_address='a3', user_id=u1.id))
# only two addresses pulled from the DB, no "pending"
assert len(u1.addresses) == 2
sess.flush()
sess.expire_all()
assert len(u1.addresses) == 3
class LifecycleTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table("data", metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(30)),
)
Table("data_fetched", metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(30), FetchedValue()),
)
Table("data_defer", metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', String(30)),
Column('data2', String(30)),
)
@classmethod
def setup_classes(cls):
class Data(cls.Comparable):
pass
class DataFetched(cls.Comparable):
pass
class DataDefer(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
mapper(cls.classes.Data, cls.tables.data)
mapper(cls.classes.DataFetched, cls.tables.data_fetched)
mapper(cls.classes.DataDefer, cls.tables.data_defer, properties={
"data": deferred(cls.tables.data_defer.c.data)
})
def test_attr_not_inserted(self):
Data = self.classes.Data
sess = create_session()
d1 = Data()
sess.add(d1)
sess.flush()
# we didn't insert a value for 'data',
# so its not in dict, but also when we hit it, it isn't
# expired because there's no column default on it or anything like that
assert 'data' not in d1.__dict__
def go():
eq_(d1.data, None)
self.assert_sql_count(
testing.db,
go,
0
)
def test_attr_not_inserted_expired(self):
Data = self.classes.Data
sess = create_session()
d1 = Data()
sess.add(d1)
sess.flush()
assert 'data' not in d1.__dict__
# with an expire, we emit
sess.expire(d1)
def go():
eq_(d1.data, None)
self.assert_sql_count(
testing.db,
go,
1
)
def test_attr_not_inserted_fetched(self):
Data = self.classes.DataFetched
sess = create_session()
d1 = Data()
sess.add(d1)
sess.flush()
assert 'data' not in d1.__dict__
def go():
eq_(d1.data, None)
# this one is marked as "fetch" so we emit SQL
self.assert_sql_count(
testing.db,
go,
1
)
def test_cols_missing_in_load(self):
Data = self.classes.Data
sess = create_session()
d1 = Data(data='d1')
sess.add(d1)
sess.flush()
sess.close()
sess = create_session()
d1 = sess.query(Data).from_statement(select([Data.id])).first()
# cols not present in the row are implicitly expired
def go():
eq_(d1.data, 'd1')
self.assert_sql_count(
testing.db, go, 1
)
def test_deferred_cols_missing_in_load_state_reset(self):
Data = self.classes.DataDefer
sess = create_session()
d1 = Data(data='d1')
sess.add(d1)
sess.flush()
sess.close()
sess = create_session()
d1 = sess.query(Data).from_statement(
select([Data.id])).options(undefer(Data.data)).first()
d1.data = 'd2'
# the deferred loader has to clear out any state
# on the col, including that 'd2' here
d1 = sess.query(Data).populate_existing().first()
def go():
eq_(d1.data, 'd1')
self.assert_sql_count(
testing.db, go, 1
)
class RefreshTest(_fixtures.FixtureTest):
def test_refresh(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses), backref='user')
})
s = create_session()
u = s.query(User).get(7)
u.name = 'foo'
a = Address()
assert sa.orm.object_session(a) is None
u.addresses.append(a)
assert a.email_address is None
assert id(a) in [id(x) for x in u.addresses]
s.refresh(u)
# its refreshed, so not dirty
assert u not in s.dirty
# username is back to the DB
assert u.name == 'jack'
assert id(a) not in [id(x) for x in u.addresses]
u.name = 'foo'
u.addresses.append(a)
# now its dirty
assert u in s.dirty
assert u.name == 'foo'
assert id(a) in [id(x) for x in u.addresses]
s.expire(u)
# get the attribute, it refreshes
assert u.name == 'jack'
assert id(a) not in [id(x) for x in u.addresses]
def test_persistence_check(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
s = create_session()
u = s.query(User).get(7)
s.expunge_all()
assert_raises_message(sa_exc.InvalidRequestError, r"is not persistent within this Session", lambda: s.refresh(u))
def test_refresh_expired(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
s = create_session()
u = s.query(User).get(7)
s.expire(u)
assert 'name' not in u.__dict__
s.refresh(u)
assert u.name == 'jack'
def test_refresh_with_lazy(self):
"""test that when a lazy loader is set as a trigger on an object's attribute
(at the attribute level, not the class level), a refresh() operation doesn't
fire the lazy loader or create any problems"""
User, Address, addresses, users = (self.classes.User,
self.classes.Address,
self.tables.addresses,
self.tables.users)
s = create_session()
mapper(User, users, properties={'addresses':relationship(mapper(Address, addresses))})
q = s.query(User).options(sa.orm.lazyload('addresses'))
u = q.filter(users.c.id==8).first()
def go():
s.refresh(u)
self.assert_sql_count(testing.db, go, 1)
def test_refresh_with_eager(self):
"""test that a refresh/expire operation loads rows properly and sends correct "isnew" state to eager loaders"""
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses), lazy='joined')
})
s = create_session()
u = s.query(User).get(8)
assert len(u.addresses) == 3
s.refresh(u)
assert len(u.addresses) == 3
s = create_session()
u = s.query(User).get(8)
assert len(u.addresses) == 3
s.expire(u)
assert len(u.addresses) == 3
def test_refresh_maintains_deferred_options(self):
# testing a behavior that may have changed with
# [ticket:3822]
User, Address, Dingaling = self.classes(
"User", "Address", "Dingaling")
users, addresses, dingalings = self.tables(
"users", "addresses", "dingalings")
mapper(User, users, properties={
'addresses': relationship(Address)
})
mapper(Address, addresses, properties={
'dingalings': relationship(Dingaling)
})
mapper(Dingaling, dingalings)
s = create_session()
q = s.query(User).filter_by(name='fred').options(
sa.orm.lazyload('addresses').joinedload("dingalings"))
u1 = q.one()
# "addresses" is not present on u1, but when u1.addresses
# lazy loads, it should also joinedload dingalings. This is
# present in state.load_options and state.load_path. The
# refresh operation should not reset these attributes.
s.refresh(u1)
def go():
eq_(
u1.addresses,
[Address(
email_address='fred@fred.com',
dingalings=[Dingaling(data="ding 2/5")]
)]
)
self.assert_sql_count(testing.db, go, 1)
def test_refresh2(self):
"""test a hang condition that was occurring on expire/refresh"""
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
s = create_session()
mapper(Address, addresses)
mapper(User, users, properties = dict(addresses=relationship(Address,cascade="all, delete-orphan",lazy='joined')) )
u = User()
u.name='Justin'
a = Address(id=10, email_address='lala')
u.addresses.append(a)
s.add(u)
s.flush()
s.expunge_all()
u = s.query(User).filter(User.name=='Justin').one()
s.expire(u)
assert u.name == 'Justin'
s.refresh(u)
| {
"repo_name": "robin900/sqlalchemy",
"path": "test/orm/test_expire.py",
"copies": "2",
"size": "50809",
"license": "mit",
"hash": 3924879550385398300,
"line_mean": 32.7153284672,
"line_max": 123,
"alpha_frac": 0.5622232282,
"autogenerated": false,
"ratio": 4.016521739130435,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5578744967330436,
"avg_score": null,
"num_lines": null
} |
"""Attribute/instance expiration, deferral of attributes, etc."""
from sqlalchemy.test.testing import eq_, assert_raises, assert_raises_message
from sqlalchemy.test.util import gc_collect
import sqlalchemy as sa
from sqlalchemy.test import testing
from sqlalchemy import Integer, String, ForeignKey, exc as sa_exc
from sqlalchemy.test.schema import Table
from sqlalchemy.test.schema import Column
from sqlalchemy.orm import mapper, relation, create_session, \
attributes, deferred, exc as orm_exc, defer, undefer,\
strategies, state, lazyload
from test.orm import _base, _fixtures
class ExpireTest(_fixtures.FixtureTest):
@testing.resolve_artifact_names
def test_expire(self):
mapper(User, users, properties={
'addresses':relation(Address, backref='user'),
})
mapper(Address, addresses)
sess = create_session()
u = sess.query(User).get(7)
assert len(u.addresses) == 1
u.name = 'foo'
del u.addresses[0]
sess.expire(u)
assert 'name' not in u.__dict__
def go():
assert u.name == 'jack'
self.assert_sql_count(testing.db, go, 1)
assert 'name' in u.__dict__
u.name = 'foo'
sess.flush()
# change the value in the DB
users.update(users.c.id==7, values=dict(name='jack')).execute()
sess.expire(u)
# object isnt refreshed yet, using dict to bypass trigger
assert u.__dict__.get('name') != 'jack'
assert 'name' in attributes.instance_state(u).expired_attributes
sess.query(User).all()
# test that it refreshed
assert u.__dict__['name'] == 'jack'
assert 'name' not in attributes.instance_state(u).expired_attributes
def go():
assert u.name == 'jack'
self.assert_sql_count(testing.db, go, 0)
@testing.resolve_artifact_names
def test_persistence_check(self):
mapper(User, users)
s = create_session()
u = s.query(User).get(7)
s.expunge_all()
assert_raises_message(sa_exc.InvalidRequestError,
r"is not persistent within this Session", s.expire, u)
@testing.resolve_artifact_names
def test_get_refreshes(self):
mapper(User, users)
s = create_session(autocommit=False)
u = s.query(User).get(10)
s.expire_all()
def go():
u = s.query(User).get(10) # get() refreshes
self.assert_sql_count(testing.db, go, 1)
def go():
eq_(u.name, 'chuck') # attributes unexpired
self.assert_sql_count(testing.db, go, 0)
def go():
u = s.query(User).get(10) # expire flag reset, so not expired
self.assert_sql_count(testing.db, go, 0)
s.expire_all()
s.execute(users.delete().where(User.id==10))
# object is gone, get() returns None, removes u from session
assert u in s
assert s.query(User).get(10) is None
assert u not in s # and expunges
# add it back
s.add(u)
# nope, raises ObjectDeletedError
assert_raises(sa.orm.exc.ObjectDeletedError, getattr, u, 'name')
# do a get()/remove u from session again
assert s.query(User).get(10) is None
assert u not in s
s.rollback()
assert u in s
# but now its back, rollback has occured, the _remove_newly_deleted
# is reverted
eq_(u.name, 'chuck')
@testing.resolve_artifact_names
def test_deferred(self):
"""test that unloaded, deferred attributes aren't included in the expiry list."""
mapper(Order, orders, properties={'description':deferred(orders.c.description)})
s = create_session()
o1 = s.query(Order).first()
assert 'description' not in o1.__dict__
s.expire(o1)
assert o1.isopen is not None
assert 'description' not in o1.__dict__
assert o1.description
@testing.resolve_artifact_names
def test_lazyload_autoflushes(self):
mapper(User, users, properties={
'addresses':relation(Address, order_by=addresses.c.email_address)
})
mapper(Address, addresses)
s = create_session(autoflush=True, autocommit=False)
u = s.query(User).get(8)
adlist = u.addresses
eq_(adlist, [
Address(email_address='ed@bettyboop.com'),
Address(email_address='ed@lala.com'),
Address(email_address='ed@wood.com'),
])
a1 = u.addresses[2]
a1.email_address = 'aaaaa'
s.expire(u, ['addresses'])
eq_(u.addresses, [
Address(email_address='aaaaa'),
Address(email_address='ed@bettyboop.com'),
Address(email_address='ed@lala.com'),
])
@testing.resolve_artifact_names
def test_refresh_collection_exception(self):
"""test graceful failure for currently unsupported immediate refresh of a collection"""
mapper(User, users, properties={
'addresses':relation(Address, order_by=addresses.c.email_address)
})
mapper(Address, addresses)
s = create_session(autoflush=True, autocommit=False)
u = s.query(User).get(8)
assert_raises_message(sa_exc.InvalidRequestError, "properties specified for refresh", s.refresh, u, ['addresses'])
# in contrast to a regular query with no columns
assert_raises_message(sa_exc.InvalidRequestError, "no columns with which to SELECT", s.query().all)
@testing.resolve_artifact_names
def test_refresh_cancels_expire(self):
mapper(User, users)
s = create_session()
u = s.query(User).get(7)
s.expire(u)
s.refresh(u)
def go():
u = s.query(User).get(7)
eq_(u.name, 'jack')
self.assert_sql_count(testing.db, go, 0)
@testing.resolve_artifact_names
def test_expire_doesntload_on_set(self):
mapper(User, users)
sess = create_session()
u = sess.query(User).get(7)
sess.expire(u, attribute_names=['name'])
def go():
u.name = 'somenewname'
self.assert_sql_count(testing.db, go, 0)
sess.flush()
sess.expunge_all()
assert sess.query(User).get(7).name == 'somenewname'
@testing.resolve_artifact_names
def test_no_session(self):
mapper(User, users)
sess = create_session()
u = sess.query(User).get(7)
sess.expire(u, attribute_names=['name'])
sess.expunge(u)
assert_raises(orm_exc.DetachedInstanceError, getattr, u, 'name')
@testing.resolve_artifact_names
def test_pending_raises(self):
# this was the opposite in 0.4, but the reasoning there seemed off.
# expiring a pending instance makes no sense, so should raise
mapper(User, users)
sess = create_session()
u = User(id=15)
sess.add(u)
assert_raises(sa_exc.InvalidRequestError, sess.expire, u, ['name'])
@testing.resolve_artifact_names
def test_no_instance_key(self):
# this tests an artificial condition such that
# an instance is pending, but has expired attributes. this
# is actually part of a larger behavior when postfetch needs to
# occur during a flush() on an instance that was just inserted
mapper(User, users)
sess = create_session()
u = sess.query(User).get(7)
sess.expire(u, attribute_names=['name'])
sess.expunge(u)
attributes.instance_state(u).key = None
assert 'name' not in u.__dict__
sess.add(u)
assert u.name == 'jack'
@testing.resolve_artifact_names
def test_expire_preserves_changes(self):
"""test that the expire load operation doesn't revert post-expire changes"""
mapper(Order, orders)
sess = create_session()
o = sess.query(Order).get(3)
sess.expire(o)
o.description = "order 3 modified"
def go():
assert o.isopen == 1
self.assert_sql_count(testing.db, go, 1)
assert o.description == 'order 3 modified'
del o.description
assert "description" not in o.__dict__
sess.expire(o, ['isopen'])
sess.query(Order).all()
assert o.isopen == 1
assert "description" not in o.__dict__
assert o.description is None
o.isopen=15
sess.expire(o, ['isopen', 'description'])
o.description = 'some new description'
sess.query(Order).all()
assert o.isopen == 1
assert o.description == 'some new description'
sess.expire(o, ['isopen', 'description'])
sess.query(Order).all()
del o.isopen
def go():
assert o.isopen is None
self.assert_sql_count(testing.db, go, 0)
o.isopen=14
sess.expire(o)
o.description = 'another new description'
sess.query(Order).all()
assert o.isopen == 1
assert o.description == 'another new description'
@testing.resolve_artifact_names
def test_expire_committed(self):
"""test that the committed state of the attribute receives the most recent DB data"""
mapper(Order, orders)
sess = create_session()
o = sess.query(Order).get(3)
sess.expire(o)
orders.update(id=3).execute(description='order 3 modified')
assert o.isopen == 1
assert attributes.instance_state(o).dict['description'] == 'order 3 modified'
def go():
sess.flush()
self.assert_sql_count(testing.db, go, 0)
@testing.resolve_artifact_names
def test_expire_cascade(self):
mapper(User, users, properties={
'addresses':relation(Address, cascade="all, refresh-expire")
})
mapper(Address, addresses)
s = create_session()
u = s.query(User).get(8)
assert u.addresses[0].email_address == 'ed@wood.com'
u.addresses[0].email_address = 'someotheraddress'
s.expire(u)
u.name
print attributes.instance_state(u).dict
assert u.addresses[0].email_address == 'ed@wood.com'
@testing.resolve_artifact_names
def test_expired_lazy(self):
mapper(User, users, properties={
'addresses':relation(Address, backref='user'),
})
mapper(Address, addresses)
sess = create_session()
u = sess.query(User).get(7)
sess.expire(u)
assert 'name' not in u.__dict__
assert 'addresses' not in u.__dict__
def go():
assert u.addresses[0].email_address == 'jack@bean.com'
assert u.name == 'jack'
# two loads
self.assert_sql_count(testing.db, go, 2)
assert 'name' in u.__dict__
assert 'addresses' in u.__dict__
@testing.resolve_artifact_names
def test_expired_eager(self):
mapper(User, users, properties={
'addresses':relation(Address, backref='user', lazy=False),
})
mapper(Address, addresses)
sess = create_session()
u = sess.query(User).get(7)
sess.expire(u)
assert 'name' not in u.__dict__
assert 'addresses' not in u.__dict__
def go():
assert u.addresses[0].email_address == 'jack@bean.com'
assert u.name == 'jack'
# two loads, since relation() + scalar are
# separate right now on per-attribute load
self.assert_sql_count(testing.db, go, 2)
assert 'name' in u.__dict__
assert 'addresses' in u.__dict__
sess.expire(u, ['name', 'addresses'])
assert 'name' not in u.__dict__
assert 'addresses' not in u.__dict__
def go():
sess.query(User).filter_by(id=7).one()
assert u.addresses[0].email_address == 'jack@bean.com'
assert u.name == 'jack'
# one load, since relation() + scalar are
# together when eager load used with Query
self.assert_sql_count(testing.db, go, 1)
@testing.resolve_artifact_names
def test_relation_changes_preserved(self):
mapper(User, users, properties={
'addresses':relation(Address, backref='user', lazy=False),
})
mapper(Address, addresses)
sess = create_session()
u = sess.query(User).get(8)
sess.expire(u, ['name', 'addresses'])
u.addresses
assert 'name' not in u.__dict__
del u.addresses[1]
u.name
assert 'name' in u.__dict__
assert len(u.addresses) == 2
@testing.resolve_artifact_names
def test_eagerload_props_dontload(self):
# relations currently have to load separately from scalar instances.
# the use case is: expire "addresses". then access it. lazy load
# fires off to load "addresses", but needs foreign key or primary key
# attributes in order to lazy load; hits those attributes, such as
# below it hits "u.id". "u.id" triggers full unexpire operation,
# eagerloads addresses since lazy=False. this is all wihtin lazy load
# which fires unconditionally; so an unnecessary eagerload (or
# lazyload) was issued. would prefer not to complicate lazyloading to
# "figure out" that the operation should be aborted right now.
mapper(User, users, properties={
'addresses':relation(Address, backref='user', lazy=False),
})
mapper(Address, addresses)
sess = create_session()
u = sess.query(User).get(8)
sess.expire(u)
u.id
assert 'addresses' not in u.__dict__
u.addresses
assert 'addresses' in u.__dict__
@testing.resolve_artifact_names
def test_expire_synonym(self):
mapper(User, users, properties={
'uname': sa.orm.synonym('name')
})
sess = create_session()
u = sess.query(User).get(7)
assert 'name' in u.__dict__
assert u.uname == u.name
sess.expire(u)
assert 'name' not in u.__dict__
users.update(users.c.id==7).execute(name='jack2')
assert u.name == 'jack2'
assert u.uname == 'jack2'
assert 'name' in u.__dict__
# this wont work unless we add API hooks through the attr. system to
# provide "expire" behavior on a synonym
# sess.expire(u, ['uname'])
# users.update(users.c.id==7).execute(name='jack3')
# assert u.uname == 'jack3'
@testing.resolve_artifact_names
def test_partial_expire(self):
mapper(Order, orders)
sess = create_session()
o = sess.query(Order).get(3)
sess.expire(o, attribute_names=['description'])
assert 'id' in o.__dict__
assert 'description' not in o.__dict__
assert attributes.instance_state(o).dict['isopen'] == 1
orders.update(orders.c.id==3).execute(description='order 3 modified')
def go():
assert o.description == 'order 3 modified'
self.assert_sql_count(testing.db, go, 1)
assert attributes.instance_state(o).dict['description'] == 'order 3 modified'
o.isopen = 5
sess.expire(o, attribute_names=['description'])
assert 'id' in o.__dict__
assert 'description' not in o.__dict__
assert o.__dict__['isopen'] == 5
assert attributes.instance_state(o).committed_state['isopen'] == 1
def go():
assert o.description == 'order 3 modified'
self.assert_sql_count(testing.db, go, 1)
assert o.__dict__['isopen'] == 5
assert attributes.instance_state(o).dict['description'] == 'order 3 modified'
assert attributes.instance_state(o).committed_state['isopen'] == 1
sess.flush()
sess.expire(o, attribute_names=['id', 'isopen', 'description'])
assert 'id' not in o.__dict__
assert 'isopen' not in o.__dict__
assert 'description' not in o.__dict__
def go():
assert o.description == 'order 3 modified'
assert o.id == 3
assert o.isopen == 5
self.assert_sql_count(testing.db, go, 1)
@testing.resolve_artifact_names
def test_partial_expire_lazy(self):
mapper(User, users, properties={
'addresses':relation(Address, backref='user'),
})
mapper(Address, addresses)
sess = create_session()
u = sess.query(User).get(8)
sess.expire(u, ['name', 'addresses'])
assert 'name' not in u.__dict__
assert 'addresses' not in u.__dict__
# hit the lazy loader. just does the lazy load,
# doesnt do the overall refresh
def go():
assert u.addresses[0].email_address=='ed@wood.com'
self.assert_sql_count(testing.db, go, 1)
assert 'name' not in u.__dict__
# check that mods to expired lazy-load attributes
# only do the lazy load
sess.expire(u, ['name', 'addresses'])
def go():
u.addresses = [Address(id=10, email_address='foo@bar.com')]
self.assert_sql_count(testing.db, go, 1)
sess.flush()
# flush has occurred, and addresses was modified,
# so the addresses collection got committed and is
# longer expired
def go():
assert u.addresses[0].email_address=='foo@bar.com'
assert len(u.addresses) == 1
self.assert_sql_count(testing.db, go, 0)
# but the name attribute was never loaded and so
# still loads
def go():
assert u.name == 'ed'
self.assert_sql_count(testing.db, go, 1)
@testing.resolve_artifact_names
def test_partial_expire_eager(self):
mapper(User, users, properties={
'addresses':relation(Address, backref='user', lazy=False),
})
mapper(Address, addresses)
sess = create_session()
u = sess.query(User).get(8)
sess.expire(u, ['name', 'addresses'])
assert 'name' not in u.__dict__
assert 'addresses' not in u.__dict__
def go():
assert u.addresses[0].email_address=='ed@wood.com'
self.assert_sql_count(testing.db, go, 1)
# check that mods to expired eager-load attributes
# do the refresh
sess.expire(u, ['name', 'addresses'])
def go():
u.addresses = [Address(id=10, email_address='foo@bar.com')]
self.assert_sql_count(testing.db, go, 1)
sess.flush()
# this should ideally trigger the whole load
# but currently it works like the lazy case
def go():
assert u.addresses[0].email_address=='foo@bar.com'
assert len(u.addresses) == 1
self.assert_sql_count(testing.db, go, 0)
def go():
assert u.name == 'ed'
# scalar attributes have their own load
self.assert_sql_count(testing.db, go, 1)
# ideally, this was already loaded, but we arent
# doing it that way right now
#self.assert_sql_count(testing.db, go, 0)
@testing.resolve_artifact_names
def test_relations_load_on_query(self):
mapper(User, users, properties={
'addresses':relation(Address, backref='user'),
})
mapper(Address, addresses)
sess = create_session()
u = sess.query(User).get(8)
assert 'name' in u.__dict__
u.addresses
assert 'addresses' in u.__dict__
sess.expire(u, ['name', 'addresses'])
assert 'name' not in u.__dict__
assert 'addresses' not in u.__dict__
(sess.query(User).options(sa.orm.eagerload('addresses')).
filter_by(id=8).all())
assert 'name' in u.__dict__
assert 'addresses' in u.__dict__
@testing.resolve_artifact_names
def test_partial_expire_deferred(self):
mapper(Order, orders, properties={
'description': sa.orm.deferred(orders.c.description)
})
sess = create_session()
o = sess.query(Order).get(3)
sess.expire(o, ['description', 'isopen'])
assert 'isopen' not in o.__dict__
assert 'description' not in o.__dict__
# test that expired attribute access refreshes
# the deferred
def go():
assert o.isopen == 1
assert o.description == 'order 3'
self.assert_sql_count(testing.db, go, 1)
sess.expire(o, ['description', 'isopen'])
assert 'isopen' not in o.__dict__
assert 'description' not in o.__dict__
# test that the deferred attribute triggers the full
# reload
def go():
assert o.description == 'order 3'
assert o.isopen == 1
self.assert_sql_count(testing.db, go, 1)
sa.orm.clear_mappers()
mapper(Order, orders)
sess.expunge_all()
# same tests, using deferred at the options level
o = sess.query(Order).options(sa.orm.defer('description')).get(3)
assert 'description' not in o.__dict__
# sanity check
def go():
assert o.description == 'order 3'
self.assert_sql_count(testing.db, go, 1)
assert 'description' in o.__dict__
assert 'isopen' in o.__dict__
sess.expire(o, ['description', 'isopen'])
assert 'isopen' not in o.__dict__
assert 'description' not in o.__dict__
# test that expired attribute access refreshes
# the deferred
def go():
assert o.isopen == 1
assert o.description == 'order 3'
self.assert_sql_count(testing.db, go, 1)
sess.expire(o, ['description', 'isopen'])
assert 'isopen' not in o.__dict__
assert 'description' not in o.__dict__
# test that the deferred attribute triggers the full
# reload
def go():
assert o.description == 'order 3'
assert o.isopen == 1
self.assert_sql_count(testing.db, go, 1)
@testing.resolve_artifact_names
def test_eagerload_query_refreshes(self):
mapper(User, users, properties={
'addresses':relation(Address, backref='user', lazy=False),
})
mapper(Address, addresses)
sess = create_session()
u = sess.query(User).get(8)
assert len(u.addresses) == 3
sess.expire(u)
assert 'addresses' not in u.__dict__
print "-------------------------------------------"
sess.query(User).filter_by(id=8).all()
assert 'addresses' in u.__dict__
assert len(u.addresses) == 3
@testing.resolve_artifact_names
def test_expire_all(self):
mapper(User, users, properties={
'addresses':relation(Address, backref='user', lazy=False),
})
mapper(Address, addresses)
sess = create_session()
userlist = sess.query(User).order_by(User.id).all()
assert self.static.user_address_result == userlist
assert len(list(sess)) == 9
sess.expire_all()
gc_collect()
assert len(list(sess)) == 4 # since addresses were gc'ed
userlist = sess.query(User).order_by(User.id).all()
u = userlist[1]
eq_(self.static.user_address_result, userlist)
assert len(list(sess)) == 9
@testing.resolve_artifact_names
def test_state_change_col_to_deferred(self):
"""Behavioral test to verify the current activity of loader callables."""
mapper(User, users)
sess = create_session()
# deferred attribute option, gets the LoadDeferredColumns
# callable
u1 = sess.query(User).options(defer(User.name)).first()
assert isinstance(
attributes.instance_state(u1).callables['name'],
strategies.LoadDeferredColumns
)
# expire the attr, it gets the InstanceState callable
sess.expire(u1, ['name'])
assert isinstance(
attributes.instance_state(u1).callables['name'],
state.InstanceState
)
# load it, callable is gone
u1.name
assert 'name' not in attributes.instance_state(u1).callables
# same for expire all
sess.expunge_all()
u1 = sess.query(User).options(defer(User.name)).first()
sess.expire(u1)
assert isinstance(
attributes.instance_state(u1).callables['name'],
state.InstanceState
)
# load over it. everything normal.
sess.query(User).first()
assert 'name' not in attributes.instance_state(u1).callables
sess.expunge_all()
u1 = sess.query(User).first()
# for non present, still expires the same way
del u1.name
sess.expire(u1)
assert 'name' in attributes.instance_state(u1).callables
@testing.resolve_artifact_names
def test_state_deferred_to_col(self):
"""Behavioral test to verify the current activity of loader callables."""
mapper(User, users, properties={'name':deferred(users.c.name)})
sess = create_session()
u1 = sess.query(User).options(undefer(User.name)).first()
assert 'name' not in attributes.instance_state(u1).callables
# mass expire, the attribute was loaded,
# the attribute gets the callable
sess.expire(u1)
assert isinstance(
attributes.instance_state(u1).callables['name'],
state.InstanceState
)
# load it, callable is gone
u1.name
assert 'name' not in attributes.instance_state(u1).callables
# mass expire, attribute was loaded but then deleted,
# the callable goes away - the state wants to flip
# it back to its "deferred" loader.
sess.expunge_all()
u1 = sess.query(User).options(undefer(User.name)).first()
del u1.name
sess.expire(u1)
assert 'name' not in attributes.instance_state(u1).callables
# single attribute expire, the attribute gets the callable
sess.expunge_all()
u1 = sess.query(User).options(undefer(User.name)).first()
sess.expire(u1, ['name'])
assert isinstance(
attributes.instance_state(u1).callables['name'],
state.InstanceState
)
@testing.resolve_artifact_names
def test_state_noload_to_lazy(self):
"""Behavioral test to verify the current activity of loader callables."""
mapper(User, users, properties={'addresses':relation(Address, lazy=None)})
mapper(Address, addresses)
sess = create_session()
u1 = sess.query(User).options(lazyload(User.addresses)).first()
assert isinstance(
attributes.instance_state(u1).callables['addresses'],
strategies.LoadLazyAttribute
)
# expire, it stays
sess.expire(u1)
assert isinstance(
attributes.instance_state(u1).callables['addresses'],
strategies.LoadLazyAttribute
)
# load over it. callable goes away.
sess.query(User).first()
assert 'addresses' not in attributes.instance_state(u1).callables
sess.expunge_all()
u1 = sess.query(User).options(lazyload(User.addresses)).first()
sess.expire(u1, ['addresses'])
assert isinstance(
attributes.instance_state(u1).callables['addresses'],
strategies.LoadLazyAttribute
)
# load the attr, goes away
u1.addresses
assert 'addresses' not in attributes.instance_state(u1).callables
class PolymorphicExpireTest(_base.MappedTest):
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
global people, engineers, Person, Engineer
people = Table('people', metadata,
Column('person_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('type', String(30)))
engineers = Table('engineers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
Column('status', String(30)),
)
@classmethod
def setup_classes(cls):
class Person(_base.ComparableEntity):
pass
class Engineer(Person):
pass
@classmethod
@testing.resolve_artifact_names
def insert_data(cls):
people.insert().execute(
{'person_id':1, 'name':'person1', 'type':'person'},
{'person_id':2, 'name':'engineer1', 'type':'engineer'},
{'person_id':3, 'name':'engineer2', 'type':'engineer'},
)
engineers.insert().execute(
{'person_id':2, 'status':'new engineer'},
{'person_id':3, 'status':'old engineer'},
)
@testing.resolve_artifact_names
def test_poly_deferred(self):
mapper(Person, people, polymorphic_on=people.c.type, polymorphic_identity='person')
mapper(Engineer, engineers, inherits=Person, polymorphic_identity='engineer')
sess = create_session()
[p1, e1, e2] = sess.query(Person).order_by(people.c.person_id).all()
sess.expire(p1)
sess.expire(e1, ['status'])
sess.expire(e2)
for p in [p1, e2]:
assert 'name' not in p.__dict__
assert 'name' in e1.__dict__
assert 'status' not in e2.__dict__
assert 'status' not in e1.__dict__
e1.name = 'new engineer name'
def go():
sess.query(Person).all()
self.assert_sql_count(testing.db, go, 1)
for p in [p1, e1, e2]:
assert 'name' in p.__dict__
assert 'status' not in e2.__dict__
assert 'status' not in e1.__dict__
def go():
assert e1.name == 'new engineer name'
assert e2.name == 'engineer2'
assert e1.status == 'new engineer'
assert e2.status == 'old engineer'
self.assert_sql_count(testing.db, go, 2)
eq_(Engineer.name.get_history(e1), (['new engineer name'],(), ['engineer1']))
class ExpiredPendingTest(_fixtures.FixtureTest):
run_define_tables = 'once'
run_setup_classes = 'once'
run_setup_mappers = None
run_inserts = None
@testing.resolve_artifact_names
def test_expired_pending(self):
mapper(User, users, properties={
'addresses':relation(Address, backref='user'),
})
mapper(Address, addresses)
sess = create_session()
a1 = Address(email_address='a1')
sess.add(a1)
sess.flush()
u1 = User(name='u1')
a1.user = u1
sess.flush()
# expire 'addresses'. backrefs
# which attach to u1 will expect to be "pending"
sess.expire(u1, ['addresses'])
# attach an Address. now its "pending"
# in user.addresses
a2 = Address(email_address='a2')
a2.user = u1
# expire u1.addresses again. this expires
# "pending" as well.
sess.expire(u1, ['addresses'])
# insert a new row
sess.execute(addresses.insert(), dict(email_address='a3', user_id=u1.id))
# only two addresses pulled from the DB, no "pending"
assert len(u1.addresses) == 2
sess.flush()
sess.expire_all()
assert len(u1.addresses) == 3
class RefreshTest(_fixtures.FixtureTest):
@testing.resolve_artifact_names
def test_refresh(self):
mapper(User, users, properties={
'addresses':relation(mapper(Address, addresses), backref='user')
})
s = create_session()
u = s.query(User).get(7)
u.name = 'foo'
a = Address()
assert sa.orm.object_session(a) is None
u.addresses.append(a)
assert a.email_address is None
assert id(a) in [id(x) for x in u.addresses]
s.refresh(u)
# its refreshed, so not dirty
assert u not in s.dirty
# username is back to the DB
assert u.name == 'jack'
assert id(a) not in [id(x) for x in u.addresses]
u.name = 'foo'
u.addresses.append(a)
# now its dirty
assert u in s.dirty
assert u.name == 'foo'
assert id(a) in [id(x) for x in u.addresses]
s.expire(u)
# get the attribute, it refreshes
assert u.name == 'jack'
assert id(a) not in [id(x) for x in u.addresses]
@testing.resolve_artifact_names
def test_persistence_check(self):
mapper(User, users)
s = create_session()
u = s.query(User).get(7)
s.expunge_all()
assert_raises_message(sa_exc.InvalidRequestError, r"is not persistent within this Session", lambda: s.refresh(u))
@testing.resolve_artifact_names
def test_refresh_expired(self):
mapper(User, users)
s = create_session()
u = s.query(User).get(7)
s.expire(u)
assert 'name' not in u.__dict__
s.refresh(u)
assert u.name == 'jack'
@testing.resolve_artifact_names
def test_refresh_with_lazy(self):
"""test that when a lazy loader is set as a trigger on an object's attribute
(at the attribute level, not the class level), a refresh() operation doesnt
fire the lazy loader or create any problems"""
s = create_session()
mapper(User, users, properties={'addresses':relation(mapper(Address, addresses))})
q = s.query(User).options(sa.orm.lazyload('addresses'))
u = q.filter(users.c.id==8).first()
def go():
s.refresh(u)
self.assert_sql_count(testing.db, go, 1)
@testing.resolve_artifact_names
def test_refresh_with_eager(self):
"""test that a refresh/expire operation loads rows properly and sends correct "isnew" state to eager loaders"""
mapper(User, users, properties={
'addresses':relation(mapper(Address, addresses), lazy=False)
})
s = create_session()
u = s.query(User).get(8)
assert len(u.addresses) == 3
s.refresh(u)
assert len(u.addresses) == 3
s = create_session()
u = s.query(User).get(8)
assert len(u.addresses) == 3
s.expire(u)
assert len(u.addresses) == 3
@testing.fails_on('maxdb', 'FIXME: unknown')
@testing.resolve_artifact_names
def test_refresh2(self):
"""test a hang condition that was occuring on expire/refresh"""
s = create_session()
mapper(Address, addresses)
mapper(User, users, properties = dict(addresses=relation(Address,cascade="all, delete-orphan",lazy=False)) )
u = User()
u.name='Justin'
a = Address(id=10, email_address='lala')
u.addresses.append(a)
s.add(u)
s.flush()
s.expunge_all()
u = s.query(User).filter(User.name=='Justin').one()
s.expire(u)
assert u.name == 'Justin'
s.refresh(u)
| {
"repo_name": "obeattie/sqlalchemy",
"path": "test/orm/test_expire.py",
"copies": "1",
"size": "35601",
"license": "mit",
"hash": 5676430808786989000,
"line_mean": 33.1005747126,
"line_max": 122,
"alpha_frac": 0.5752085616,
"autogenerated": false,
"ratio": 3.936421937195931,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.501163049879593,
"avg_score": null,
"num_lines": null
} |
"""Attribute/instance expiration, deferral of attributes, etc."""
import sqlalchemy as sa
from sqlalchemy import exc as sa_exc
from sqlalchemy import FetchedValue
from sqlalchemy import ForeignKey
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy.orm import attributes
from sqlalchemy.orm import contains_eager
from sqlalchemy.orm import defer
from sqlalchemy.orm import deferred
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import lazyload
from sqlalchemy.orm import make_transient_to_detached
from sqlalchemy.orm import mapper
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.orm import strategies
from sqlalchemy.orm import undefer
from sqlalchemy.sql import select
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.assertsql import CountStatements
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.util import gc_collect
from test.orm import _fixtures
class ExpireTest(_fixtures.FixtureTest):
def test_expire(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users,
properties={"addresses": relationship(Address, backref="user")},
)
mapper(Address, addresses)
sess = fixture_session(autoflush=False)
u = sess.query(User).get(7)
assert len(u.addresses) == 1
u.name = "foo"
del u.addresses[0]
sess.expire(u)
assert "name" not in u.__dict__
def go():
assert u.name == "jack"
self.assert_sql_count(testing.db, go, 1)
assert "name" in u.__dict__
u.name = "foo"
sess.flush()
# change the value in the DB
sess.execute(users.update(users.c.id == 7, values=dict(name="jack")))
sess.expire(u)
# object isn't refreshed yet, using dict to bypass trigger
assert u.__dict__.get("name") != "jack"
assert "name" in attributes.instance_state(u).expired_attributes
sess.query(User).all()
# test that it refreshed
assert u.__dict__["name"] == "jack"
assert "name" not in attributes.instance_state(u).expired_attributes
def go():
assert u.name == "jack"
self.assert_sql_count(testing.db, go, 0)
def test_expire_autoflush(self):
User, users = self.classes.User, self.tables.users
Address, addresses = self.classes.Address, self.tables.addresses
mapper(User, users)
mapper(Address, addresses, properties={"user": relationship(User)})
s = fixture_session()
a1 = s.query(Address).get(2)
u1 = s.query(User).get(7)
a1.user = u1
s.expire(a1, ["user_id"])
# autoflushes
eq_(a1.user_id, 7)
def test_persistence_check(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
s = fixture_session()
u = s.query(User).get(7)
s.expunge_all()
assert_raises_message(
sa_exc.InvalidRequestError,
r"is not persistent within this Session",
s.expire,
u,
)
def test_get_refreshes(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
s = fixture_session(autocommit=False)
u = s.query(User).get(10)
s.expire_all()
def go():
s.query(User).get(10) # get() refreshes
self.assert_sql_count(testing.db, go, 1)
def go():
eq_(u.name, "chuck") # attributes unexpired
self.assert_sql_count(testing.db, go, 0)
def go():
s.query(User).get(10) # expire flag reset, so not expired
self.assert_sql_count(testing.db, go, 0)
def test_get_on_deleted_expunges(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
s = fixture_session(autocommit=False)
u = s.query(User).get(10)
s.expire_all()
s.execute(users.delete().where(User.id == 10))
# object is gone, get() returns None, removes u from session
assert u in s
assert s.query(User).get(10) is None
assert u not in s # and expunges
def test_refresh_on_deleted_raises(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
s = fixture_session(autocommit=False)
u = s.query(User).get(10)
s.expire_all()
s.expire_all()
s.execute(users.delete().where(User.id == 10))
# raises ObjectDeletedError
assert_raises_message(
sa.orm.exc.ObjectDeletedError,
"Instance '<User at .*?>' has been "
"deleted, or its row is otherwise not present.",
getattr,
u,
"name",
)
def test_rollback_undoes_expunge_from_deleted(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
s = fixture_session(autocommit=False)
u = s.query(User).get(10)
s.expire_all()
s.execute(users.delete().where(User.id == 10))
# do a get()/remove u from session
assert s.query(User).get(10) is None
assert u not in s
s.rollback()
assert u in s
# but now its back, rollback has occurred, the
# _remove_newly_deleted is reverted
eq_(u.name, "chuck")
def test_deferred(self):
"""test that unloaded, deferred attributes aren't included in the
expiry list."""
Order, orders = self.classes.Order, self.tables.orders
mapper(
Order,
orders,
properties={"description": deferred(orders.c.description)},
)
s = fixture_session()
o1 = s.query(Order).first()
assert "description" not in o1.__dict__
s.expire(o1)
# the deferred attribute is listed as expired (new in 1.4)
eq_(
inspect(o1).expired_attributes,
{"id", "isopen", "address_id", "user_id", "description"},
)
# unexpire by accessing isopen
assert o1.isopen is not None
# all expired_attributes are cleared
eq_(inspect(o1).expired_attributes, set())
# but description wasn't loaded (new in 1.4)
assert "description" not in o1.__dict__
# loads using deferred callable
assert o1.description
def test_deferred_notfound(self):
users, User = self.tables.users, self.classes.User
mapper(User, users, properties={"name": deferred(users.c.name)})
s = fixture_session(autocommit=False)
u = s.query(User).get(10)
assert "name" not in u.__dict__
s.execute(users.delete().where(User.id == 10))
assert_raises_message(
sa.orm.exc.ObjectDeletedError,
"Instance '<User at .*?>' has been "
"deleted, or its row is otherwise not present.",
getattr,
u,
"name",
)
def test_lazyload_autoflushes(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users,
properties={
"addresses": relationship(
Address, order_by=addresses.c.email_address
)
},
)
mapper(Address, addresses)
s = fixture_session(autoflush=True, autocommit=False)
u = s.query(User).get(8)
adlist = u.addresses
eq_(
adlist,
[
Address(email_address="ed@bettyboop.com"),
Address(email_address="ed@lala.com"),
Address(email_address="ed@wood.com"),
],
)
a1 = u.addresses[2]
a1.email_address = "aaaaa"
s.expire(u, ["addresses"])
eq_(
u.addresses,
[
Address(email_address="aaaaa"),
Address(email_address="ed@bettyboop.com"),
Address(email_address="ed@lala.com"),
],
)
def test_refresh_collection_exception(self):
"""test graceful failure for currently unsupported
immediate refresh of a collection"""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users,
properties={
"addresses": relationship(
Address, order_by=addresses.c.email_address
)
},
)
mapper(Address, addresses)
s = fixture_session(autoflush=True, autocommit=False)
u = s.query(User).get(8)
assert_raises_message(
sa_exc.InvalidRequestError,
"properties specified for refresh",
s.refresh,
u,
["addresses"],
)
def test_refresh_cancels_expire(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
s = fixture_session()
u = s.query(User).get(7)
s.expire(u)
s.refresh(u)
def go():
u = s.query(User).get(7)
eq_(u.name, "jack")
self.assert_sql_count(testing.db, go, 0)
def test_expire_doesntload_on_set(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
sess = fixture_session(autoflush=False)
u = sess.query(User).get(7)
sess.expire(u, attribute_names=["name"])
def go():
u.name = "somenewname"
self.assert_sql_count(testing.db, go, 0)
sess.flush()
sess.expunge_all()
assert sess.query(User).get(7).name == "somenewname"
def test_no_session(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = fixture_session()
u = sess.query(User).get(7)
sess.expire(u, attribute_names=["name"])
sess.expunge(u)
assert_raises(orm_exc.DetachedInstanceError, getattr, u, "name")
def test_pending_raises(self):
users, User = self.tables.users, self.classes.User
# this was the opposite in 0.4, but the reasoning there seemed off.
# expiring a pending instance makes no sense, so should raise
mapper(User, users)
sess = fixture_session()
u = User(id=15)
sess.add(u)
assert_raises(sa_exc.InvalidRequestError, sess.expire, u, ["name"])
def test_no_instance_key(self):
User, users = self.classes.User, self.tables.users
# this tests an artificial condition such that
# an instance is pending, but has expired attributes. this
# is actually part of a larger behavior when postfetch needs to
# occur during a flush() on an instance that was just inserted
mapper(User, users)
sess = fixture_session(autoflush=False)
u = sess.query(User).get(7)
sess.expire(u, attribute_names=["name"])
sess.expunge(u)
attributes.instance_state(u).key = None
assert "name" not in u.__dict__
sess.add(u)
assert u.name == "jack"
def test_no_instance_key_no_pk(self):
users, User = self.tables.users, self.classes.User
# same as test_no_instance_key, but the PK columns
# are absent. ensure an error is raised.
mapper(User, users)
sess = fixture_session()
u = sess.query(User).get(7)
sess.expire(u, attribute_names=["name", "id"])
sess.expunge(u)
attributes.instance_state(u).key = None
assert "name" not in u.__dict__
sess.add(u)
assert_raises(sa_exc.InvalidRequestError, getattr, u, "name")
def test_expire_preserves_changes(self):
"""test that the expire load operation doesn't revert post-expire
changes"""
Order, orders = self.classes.Order, self.tables.orders
mapper(Order, orders)
sess = fixture_session(autoflush=False)
o = sess.query(Order).get(3)
sess.expire(o)
o.description = "order 3 modified"
def go():
assert o.isopen == 1
self.assert_sql_count(testing.db, go, 1)
assert o.description == "order 3 modified"
del o.description
assert "description" not in o.__dict__
sess.expire(o, ["isopen"])
sess.query(Order).all()
assert o.isopen == 1
assert "description" not in o.__dict__
assert o.description is None
o.isopen = 15
sess.expire(o, ["isopen", "description"])
o.description = "some new description"
sess.query(Order).all()
assert o.isopen == 1
assert o.description == "some new description"
sess.expire(o, ["isopen", "description"])
sess.query(Order).all()
del o.isopen
def go():
assert o.isopen is None
self.assert_sql_count(testing.db, go, 0)
o.isopen = 14
sess.expire(o)
o.description = "another new description"
sess.query(Order).all()
assert o.isopen == 1
assert o.description == "another new description"
def test_expire_committed(self):
"""test that the committed state of the attribute receives the most
recent DB data"""
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders)
sess = fixture_session(autoflush=False)
o = sess.query(Order).get(3)
sess.expire(o)
sess.execute(orders.update(), dict(description="order 3 modified"))
assert o.isopen == 1
assert (
attributes.instance_state(o).dict["description"]
== "order 3 modified"
)
def go():
sess.flush()
self.assert_sql_count(testing.db, go, 0)
def test_expire_cascade(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users,
properties={
"addresses": relationship(
Address, cascade="all, refresh-expire"
)
},
)
mapper(Address, addresses)
s = fixture_session(autoflush=False)
u = s.query(User).get(8)
assert u.addresses[0].email_address == "ed@wood.com"
u.addresses[0].email_address = "someotheraddress"
s.expire(u)
assert u.addresses[0].email_address == "ed@wood.com"
def test_refresh_cascade(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users,
properties={
"addresses": relationship(
Address, cascade="all, refresh-expire"
)
},
)
mapper(Address, addresses)
s = fixture_session(autoflush=False)
u = s.query(User).get(8)
assert u.addresses[0].email_address == "ed@wood.com"
u.addresses[0].email_address = "someotheraddress"
s.refresh(u)
assert u.addresses[0].email_address == "ed@wood.com"
def test_expire_cascade_pending_orphan(self):
cascade = "save-update, refresh-expire, delete, delete-orphan"
self._test_cascade_to_pending(cascade, True)
def test_refresh_cascade_pending_orphan(self):
cascade = "save-update, refresh-expire, delete, delete-orphan"
self._test_cascade_to_pending(cascade, False)
def test_expire_cascade_pending(self):
cascade = "save-update, refresh-expire"
self._test_cascade_to_pending(cascade, True)
def test_refresh_cascade_pending(self):
cascade = "save-update, refresh-expire"
self._test_cascade_to_pending(cascade, False)
def _test_cascade_to_pending(self, cascade, expire_or_refresh):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users,
properties={"addresses": relationship(Address, cascade=cascade)},
)
mapper(Address, addresses)
s = fixture_session(autoflush=False)
u = s.query(User).get(8)
a = Address(id=12, email_address="foobar")
u.addresses.append(a)
if expire_or_refresh:
s.expire(u)
else:
s.refresh(u)
if "delete-orphan" in cascade:
assert a not in s
else:
assert a in s
assert a not in u.addresses
s.flush()
def test_expired_lazy(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users,
properties={"addresses": relationship(Address, backref="user")},
)
mapper(Address, addresses)
sess = fixture_session()
u = sess.query(User).get(7)
sess.expire(u)
assert "name" not in u.__dict__
assert "addresses" not in u.__dict__
def go():
assert u.addresses[0].email_address == "jack@bean.com"
assert u.name == "jack"
# two loads
self.assert_sql_count(testing.db, go, 2)
assert "name" in u.__dict__
assert "addresses" in u.__dict__
def test_expired_eager(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users,
properties={
"addresses": relationship(
Address, backref="user", lazy="joined"
)
},
)
mapper(Address, addresses)
sess = fixture_session()
u = sess.query(User).get(7)
sess.expire(u)
assert "name" not in u.__dict__
assert "addresses" not in u.__dict__
def go():
assert u.addresses[0].email_address == "jack@bean.com"
assert u.name == "jack"
# one load, due to #1763 allows joinedload to
# take over
self.assert_sql_count(testing.db, go, 1)
assert "name" in u.__dict__
assert "addresses" in u.__dict__
sess.expire(u, ["name", "addresses"])
assert "name" not in u.__dict__
assert "addresses" not in u.__dict__
def go():
sess.query(User).filter_by(id=7).one()
assert u.addresses[0].email_address == "jack@bean.com"
assert u.name == "jack"
# one load, since relationship() + scalar are
# together when eager load used with Query
self.assert_sql_count(testing.db, go, 1)
def test_unexpire_eager_dont_overwrite_related(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users,
properties={
"addresses": relationship(
Address, backref="user", lazy="joined"
)
},
)
mapper(Address, addresses)
sess = fixture_session(autoflush=False)
u = sess.query(User).get(7)
a1 = u.addresses[0]
eq_(a1.email_address, "jack@bean.com")
sess.expire(u)
a1.email_address = "foo"
assert a1 in u.addresses
eq_(a1.email_address, "foo")
assert a1 in sess.dirty
@testing.combinations(
("contains,joined",),
("contains,contains",),
)
def test_unexpire_eager_dont_include_contains_eager(self, case):
"""test #6449
testing that contains_eager is downgraded to lazyload during
a refresh, including if additional eager loaders are off the
contains_eager
"""
orders, Order, users, Address, addresses, User = (
self.tables.orders,
self.classes.Order,
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users,
properties={"orders": relationship(Order, order_by=orders.c.id)},
)
mapper(Address, addresses, properties={"user": relationship(User)})
mapper(Order, orders)
sess = fixture_session(autoflush=False)
with self.sql_execution_asserter(testing.db) as asserter:
if case == "contains,joined":
a1 = (
sess.query(Address)
.join(Address.user)
.options(
contains_eager(Address.user).joinedload(User.orders)
)
.filter(Address.id == 1)
.one()
)
elif case == "contains,contains":
# legacy query.first() can't be used here because it sets
# limit 1 without the correct query wrapping. 1.3 has
# the same problem though it renders differently
a1 = (
sess.query(Address)
.join(Address.user)
.join(User.orders)
.order_by(Order.id)
.options(
contains_eager(Address.user).contains_eager(
User.orders
)
)
.filter(Address.id == 1)
.one()
)
eq_(
a1,
Address(
id=1,
user=User(
id=7, orders=[Order(id=1), Order(id=3), Order(id=5)]
),
),
)
# ensure load with either contains_eager().joinedload() or
# contains_eager().contains_eager() worked as expected
asserter.assert_(CountStatements(1))
# expire object, reset the session fully and re-add so that
# the related User / Order objects are not in the identity map,
# allows SQL count below to be deterministic
sess.expire(a1)
sess.close()
sess.add(a1)
# assert behavior on unexpire
with self.sql_execution_asserter(testing.db) as asserter:
a1.user
assert "user" in a1.__dict__
if case == "contains,joined":
# joinedload took place
assert "orders" in a1.user.__dict__
elif case == "contains,contains":
# contains eager is downgraded to a lazy load
assert "orders" not in a1.user.__dict__
eq_(
a1,
Address(
id=1,
user=User(
id=7, orders=[Order(id=1), Order(id=3), Order(id=5)]
),
),
)
if case == "contains,joined":
# the joinedloader for Address->User works,
# so we get refresh(Address).lazyload(Address.user).
# joinedload(User.order)
asserter.assert_(CountStatements(2))
elif case == "contains,contains":
# both contains_eagers become normal loads so we get
# refresh(Address).lazyload(Address.user).lazyload(User.order]
asserter.assert_(CountStatements(3))
def test_relationship_changes_preserved(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users,
properties={
"addresses": relationship(
Address, backref="user", lazy="joined"
)
},
)
mapper(Address, addresses)
sess = fixture_session(autoflush=False)
u = sess.query(User).get(8)
sess.expire(u, ["name", "addresses"])
u.addresses
assert "name" not in u.__dict__
del u.addresses[1]
u.name
assert "name" in u.__dict__
assert len(u.addresses) == 2
def test_mapper_joinedload_props_load(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
# changed in #1763, eager loaders are run when we unexpire
mapper(
User,
users,
properties={
"addresses": relationship(
Address, backref="user", lazy="joined"
)
},
)
mapper(Address, addresses)
sess = fixture_session()
u = sess.query(User).get(8)
sess.expire(u)
u.id
assert "addresses" in u.__dict__
u.addresses
assert "addresses" in u.__dict__
def test_options_joinedload_props_load(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
# changed in #1763, eager loaders are run when we unexpire
mapper(
User,
users,
properties={"addresses": relationship(Address, backref="user")},
)
mapper(Address, addresses)
sess = fixture_session()
u = sess.query(User).options(joinedload(User.addresses)).get(8)
sess.expire(u)
u.id
assert "addresses" in u.__dict__
u.addresses
assert "addresses" in u.__dict__
def test_joinedload_props_load_two(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users,
properties={
"addresses": relationship(
Address, backref="user", lazy="joined"
)
},
)
mapper(Address, addresses)
sess = fixture_session()
u = sess.query(User).get(8)
sess.expire(u)
# here, the lazy loader will encounter the attribute already
# loaded when it goes to get the PK, so the loader itself
# needs to no longer fire off.
def go():
u.addresses
assert "addresses" in u.__dict__
assert "id" in u.__dict__
self.assert_sql_count(testing.db, go, 1)
def test_expire_synonym(self):
User, users = self.classes.User, self.tables.users
mapper(User, users, properties={"uname": sa.orm.synonym("name")})
sess = fixture_session()
u = sess.query(User).get(7)
assert "name" in u.__dict__
assert u.uname == u.name
sess.expire(u)
assert "name" not in u.__dict__
sess.execute(users.update(users.c.id == 7), dict(name="jack2"))
assert u.name == "jack2"
assert u.uname == "jack2"
assert "name" in u.__dict__
# this wont work unless we add API hooks through the attr. system to
# provide "expire" behavior on a synonym
# sess.expire(u, ['uname'])
# users.update(users.c.id==7).execute(name='jack3')
# assert u.uname == 'jack3'
def test_partial_expire(self):
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders)
sess = fixture_session(autoflush=False)
o = sess.query(Order).get(3)
sess.expire(o, attribute_names=["description"])
assert "id" in o.__dict__
assert "description" not in o.__dict__
assert attributes.instance_state(o).dict["isopen"] == 1
sess.execute(
orders.update(orders.c.id == 3),
dict(description="order 3 modified"),
)
def go():
assert o.description == "order 3 modified"
self.assert_sql_count(testing.db, go, 1)
assert (
attributes.instance_state(o).dict["description"]
== "order 3 modified"
)
o.isopen = 5
sess.expire(o, attribute_names=["description"])
assert "id" in o.__dict__
assert "description" not in o.__dict__
assert o.__dict__["isopen"] == 5
assert attributes.instance_state(o).committed_state["isopen"] == 1
def go():
assert o.description == "order 3 modified"
self.assert_sql_count(testing.db, go, 1)
assert o.__dict__["isopen"] == 5
assert (
attributes.instance_state(o).dict["description"]
== "order 3 modified"
)
assert attributes.instance_state(o).committed_state["isopen"] == 1
sess.flush()
sess.expire(o, attribute_names=["id", "isopen", "description"])
assert "id" not in o.__dict__
assert "isopen" not in o.__dict__
assert "description" not in o.__dict__
def go():
assert o.description == "order 3 modified"
assert o.id == 3
assert o.isopen == 5
self.assert_sql_count(testing.db, go, 1)
def test_partial_expire_lazy(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users,
properties={"addresses": relationship(Address, backref="user")},
)
mapper(Address, addresses)
sess = fixture_session(autoflush=False)
u = sess.query(User).get(8)
sess.expire(u, ["name", "addresses"])
assert "name" not in u.__dict__
assert "addresses" not in u.__dict__
# hit the lazy loader. just does the lazy load,
# doesn't do the overall refresh
def go():
assert u.addresses[0].email_address == "ed@wood.com"
self.assert_sql_count(testing.db, go, 1)
assert "name" not in u.__dict__
# check that mods to expired lazy-load attributes
# only do the lazy load
sess.expire(u, ["name", "addresses"])
def go():
u.addresses = [Address(id=10, email_address="foo@bar.com")]
self.assert_sql_count(testing.db, go, 1)
sess.flush()
# flush has occurred, and addresses was modified,
# so the addresses collection got committed and is
# longer expired
def go():
assert u.addresses[0].email_address == "foo@bar.com"
assert len(u.addresses) == 1
self.assert_sql_count(testing.db, go, 0)
# but the name attribute was never loaded and so
# still loads
def go():
assert u.name == "ed"
self.assert_sql_count(testing.db, go, 1)
def test_partial_expire_eager(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users,
properties={
"addresses": relationship(
Address, backref="user", lazy="joined"
)
},
)
mapper(Address, addresses)
sess = fixture_session(autoflush=False)
u = sess.query(User).get(8)
sess.expire(u, ["name", "addresses"])
assert "name" not in u.__dict__
assert "addresses" not in u.__dict__
def go():
assert u.addresses[0].email_address == "ed@wood.com"
self.assert_sql_count(testing.db, go, 1)
# check that mods to expired eager-load attributes
# do the refresh
sess.expire(u, ["name", "addresses"])
def go():
u.addresses = [Address(id=10, email_address="foo@bar.com")]
self.assert_sql_count(testing.db, go, 1)
sess.flush()
# this should ideally trigger the whole load
# but currently it works like the lazy case
def go():
assert u.addresses[0].email_address == "foo@bar.com"
assert len(u.addresses) == 1
self.assert_sql_count(testing.db, go, 0)
def go():
assert u.name == "ed"
# scalar attributes have their own load
self.assert_sql_count(testing.db, go, 1)
# ideally, this was already loaded, but we aren't
# doing it that way right now
# self.assert_sql_count(testing.db, go, 0)
def test_relationships_load_on_query(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users,
properties={"addresses": relationship(Address, backref="user")},
)
mapper(Address, addresses)
sess = fixture_session(autoflush=False)
u = sess.query(User).get(8)
assert "name" in u.__dict__
u.addresses
assert "addresses" in u.__dict__
sess.expire(u, ["name", "addresses"])
assert "name" not in u.__dict__
assert "addresses" not in u.__dict__
(
sess.query(User)
.options(sa.orm.joinedload("addresses"))
.filter_by(id=8)
.all()
)
assert "name" in u.__dict__
assert "addresses" in u.__dict__
def test_partial_expire_deferred(self):
orders, Order = self.tables.orders, self.classes.Order
mapper(
Order,
orders,
properties={"description": sa.orm.deferred(orders.c.description)},
)
sess = fixture_session(autoflush=False)
o = sess.query(Order).get(3)
sess.expire(o, ["description", "isopen"])
assert "isopen" not in o.__dict__
assert "description" not in o.__dict__
# test that expired attribute access does not refresh
# the deferred
def go():
assert o.isopen == 1
assert o.description == "order 3"
# requires two statements
self.assert_sql_count(testing.db, go, 2)
sess.expire(o, ["description", "isopen"])
assert "isopen" not in o.__dict__
assert "description" not in o.__dict__
# test that the deferred attribute does not trigger the full
# reload
def go():
assert o.description == "order 3"
assert o.isopen == 1
self.assert_sql_count(testing.db, go, 2)
sa.orm.clear_mappers()
mapper(Order, orders)
sess.expunge_all()
# same tests, using deferred at the options level
o = sess.query(Order).options(sa.orm.defer("description")).get(3)
assert "description" not in o.__dict__
# sanity check
def go():
assert o.description == "order 3"
self.assert_sql_count(testing.db, go, 1)
assert "description" in o.__dict__
assert "isopen" in o.__dict__
sess.expire(o, ["description", "isopen"])
assert "isopen" not in o.__dict__
assert "description" not in o.__dict__
# test that expired attribute access refreshes
# the deferred
def go():
assert o.isopen == 1
assert o.description == "order 3"
self.assert_sql_count(testing.db, go, 1)
sess.expire(o, ["description", "isopen"])
assert "isopen" not in o.__dict__
assert "description" not in o.__dict__
# test that the deferred attribute triggers the full
# reload
def go():
assert o.description == "order 3"
assert o.isopen == 1
self.assert_sql_count(testing.db, go, 1)
def test_joinedload_query_refreshes(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users,
properties={
"addresses": relationship(
Address, backref="user", lazy="joined"
)
},
)
mapper(Address, addresses)
sess = fixture_session(autoflush=False)
u = sess.query(User).get(8)
assert len(u.addresses) == 3
sess.expire(u)
assert "addresses" not in u.__dict__
sess.query(User).filter_by(id=8).all()
assert "addresses" in u.__dict__
assert len(u.addresses) == 3
@testing.requires.predictable_gc
def test_expire_all(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users,
properties={
"addresses": relationship(
Address,
backref="user",
lazy="joined",
order_by=addresses.c.id,
)
},
)
mapper(Address, addresses)
sess = fixture_session(autoflush=False)
userlist = sess.query(User).order_by(User.id).all()
eq_(self.static.user_address_result, userlist)
eq_(len(list(sess)), 9)
sess.expire_all()
gc_collect()
eq_(len(list(sess)), 4) # since addresses were gc'ed
userlist = sess.query(User).order_by(User.id).all()
eq_(self.static.user_address_result, userlist)
eq_(len(list(sess)), 9)
def test_state_change_col_to_deferred(self):
"""Behavioral test to verify the current activity of loader
callables
"""
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = fixture_session(autoflush=False)
# deferred attribute option, gets the LoadDeferredColumns
# callable
u1 = sess.query(User).options(defer(User.name)).first()
assert isinstance(
attributes.instance_state(u1).callables["name"],
strategies.LoadDeferredColumns,
)
# expire the attr, it gets the InstanceState callable
sess.expire(u1, ["name"])
assert "name" in attributes.instance_state(u1).expired_attributes
assert "name" not in attributes.instance_state(u1).callables
# load it, callable is gone
u1.name
assert "name" not in attributes.instance_state(u1).expired_attributes
assert "name" not in attributes.instance_state(u1).callables
# same for expire all
sess.expunge_all()
u1 = sess.query(User).options(defer(User.name)).first()
sess.expire(u1)
assert "name" in attributes.instance_state(u1).expired_attributes
assert "name" not in attributes.instance_state(u1).callables
# load over it. everything normal.
sess.query(User).first()
assert "name" not in attributes.instance_state(u1).expired_attributes
assert "name" not in attributes.instance_state(u1).callables
sess.expunge_all()
u1 = sess.query(User).first()
# for non present, still expires the same way
del u1.name
sess.expire(u1)
assert "name" in attributes.instance_state(u1).expired_attributes
assert "name" not in attributes.instance_state(u1).callables
def test_state_deferred_to_col(self):
"""Behavioral test to verify the current activity of
loader callables
"""
users, User = self.tables.users, self.classes.User
mapper(User, users, properties={"name": deferred(users.c.name)})
sess = fixture_session(autoflush=False)
u1 = sess.query(User).options(undefer(User.name)).first()
assert "name" not in attributes.instance_state(u1).callables
# mass expire, the attribute was loaded,
# the attribute gets the callable
sess.expire(u1)
assert "name" in attributes.instance_state(u1).expired_attributes
assert "name" not in attributes.instance_state(u1).callables
# load it
u1.name
assert "name" not in attributes.instance_state(u1).expired_attributes
assert "name" not in attributes.instance_state(u1).callables
# mass expire, attribute was loaded but then deleted,
# the callable goes away - the state wants to flip
# it back to its "deferred" loader.
sess.expunge_all()
u1 = sess.query(User).options(undefer(User.name)).first()
del u1.name
sess.expire(u1)
assert "name" in attributes.instance_state(u1).expired_attributes
assert "name" not in attributes.instance_state(u1).callables
# single attribute expire, the attribute gets the callable
sess.expunge_all()
u1 = sess.query(User).options(undefer(User.name)).first()
sess.expire(u1, ["name"])
# the expire cancels the undefer
assert "name" in attributes.instance_state(u1).expired_attributes
assert "name" not in attributes.instance_state(u1).callables
def test_state_noload_to_lazy(self):
"""Behavioral test to verify the current activity of
loader callables
"""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users,
properties={"addresses": relationship(Address, lazy="noload")},
)
mapper(Address, addresses)
sess = fixture_session(autoflush=False)
u1 = sess.query(User).options(lazyload(User.addresses)).first()
assert isinstance(
attributes.instance_state(u1).callables["addresses"],
strategies.LoadLazyAttribute,
)
# expire, it goes away from callables as of 1.4 and is considered
# to be expired
sess.expire(u1)
assert "addresses" in attributes.instance_state(u1).expired_attributes
assert "addresses" not in attributes.instance_state(u1).callables
# load it
sess.query(User).first()
assert (
"addresses" not in attributes.instance_state(u1).expired_attributes
)
assert "addresses" not in attributes.instance_state(u1).callables
sess.expunge_all()
u1 = sess.query(User).options(lazyload(User.addresses)).first()
sess.expire(u1, ["addresses"])
assert (
"addresses" not in attributes.instance_state(u1).expired_attributes
)
assert isinstance(
attributes.instance_state(u1).callables["addresses"],
strategies.LoadLazyAttribute,
)
# load the attr, goes away
u1.addresses
assert (
"addresses" not in attributes.instance_state(u1).expired_attributes
)
assert "addresses" not in attributes.instance_state(u1).callables
def test_deferred_expire_w_transient_to_detached(self):
orders, Order = self.tables.orders, self.classes.Order
mapper(
Order,
orders,
properties={"description": deferred(orders.c.description)},
)
s = fixture_session()
item = Order(id=1)
make_transient_to_detached(item)
s.add(item)
item.isopen
assert "description" not in item.__dict__
def test_deferred_expire_normally(self):
orders, Order = self.tables.orders, self.classes.Order
mapper(
Order,
orders,
properties={"description": deferred(orders.c.description)},
)
s = fixture_session()
item = s.query(Order).first()
s.expire(item)
item.isopen
assert "description" not in item.__dict__
def test_deferred_expire_explicit_attrs(self):
orders, Order = self.tables.orders, self.classes.Order
mapper(
Order,
orders,
properties={"description": deferred(orders.c.description)},
)
s = fixture_session()
item = s.query(Order).first()
s.expire(item, ["isopen", "description"])
item.isopen
assert "description" not in item.__dict__
class PolymorphicExpireTest(fixtures.MappedTest):
run_inserts = "once"
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table(
"people",
metadata,
Column(
"person_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("name", String(50)),
Column("type", String(30)),
)
Table(
"engineers",
metadata,
Column(
"person_id",
Integer,
ForeignKey("people.person_id"),
primary_key=True,
),
Column("status", String(30)),
)
@classmethod
def setup_classes(cls):
class Person(cls.Basic):
pass
class Engineer(Person):
pass
@classmethod
def insert_data(cls, connection):
people, engineers = cls.tables.people, cls.tables.engineers
connection.execute(
people.insert(),
[
{"person_id": 1, "name": "person1", "type": "person"},
{"person_id": 2, "name": "engineer1", "type": "engineer"},
{"person_id": 3, "name": "engineer2", "type": "engineer"},
],
)
connection.execute(
engineers.insert(),
[
{"person_id": 2, "status": "new engineer"},
{"person_id": 3, "status": "old engineer"},
],
)
@classmethod
def setup_mappers(cls):
Person, people, engineers, Engineer = (
cls.classes.Person,
cls.tables.people,
cls.tables.engineers,
cls.classes.Engineer,
)
mapper(
Person,
people,
polymorphic_on=people.c.type,
polymorphic_identity="person",
)
mapper(
Engineer,
engineers,
inherits=Person,
polymorphic_identity="engineer",
)
def test_poly_deferred(self):
Person, people, Engineer = (
self.classes.Person,
self.tables.people,
self.classes.Engineer,
)
sess = fixture_session(autoflush=False)
[p1, e1, e2] = sess.query(Person).order_by(people.c.person_id).all()
sess.expire(p1)
sess.expire(e1, ["status"])
sess.expire(e2)
for p in [p1, e2]:
assert "name" not in p.__dict__
assert "name" in e1.__dict__
assert "status" not in e2.__dict__
assert "status" not in e1.__dict__
e1.name = "new engineer name"
def go():
sess.query(Person).all()
self.assert_sql_count(testing.db, go, 1)
for p in [p1, e1, e2]:
assert "name" in p.__dict__
assert "status" not in e2.__dict__
assert "status" not in e1.__dict__
def go():
assert e1.name == "new engineer name"
assert e2.name == "engineer2"
assert e1.status == "new engineer"
assert e2.status == "old engineer"
self.assert_sql_count(testing.db, go, 2)
eq_(
Engineer.name.get_history(e1),
(["new engineer name"], (), ["engineer1"]),
)
def test_no_instance_key(self):
Engineer = self.classes.Engineer
sess = fixture_session(autoflush=False)
e1 = sess.query(Engineer).get(2)
sess.expire(e1, attribute_names=["name"])
sess.expunge(e1)
attributes.instance_state(e1).key = None
assert "name" not in e1.__dict__
sess.add(e1)
assert e1.name == "engineer1"
def test_no_instance_key_pk_absent(self):
Engineer = self.classes.Engineer
# same as test_no_instance_key, but the PK columns
# are absent. ensure an error is raised.
sess = fixture_session(autoflush=False)
e1 = sess.query(Engineer).get(2)
sess.expire(e1, attribute_names=["name", "person_id"])
sess.expunge(e1)
attributes.instance_state(e1).key = None
assert "name" not in e1.__dict__
sess.add(e1)
assert_raises(sa_exc.InvalidRequestError, getattr, e1, "name")
class ExpiredPendingTest(_fixtures.FixtureTest):
run_define_tables = "once"
run_setup_classes = "once"
run_setup_mappers = None
run_inserts = None
def test_expired_pending(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users,
properties={"addresses": relationship(Address, backref="user")},
)
mapper(Address, addresses)
sess = fixture_session(autoflush=False)
a1 = Address(email_address="a1")
sess.add(a1)
sess.flush()
u1 = User(name="u1")
a1.user = u1
sess.flush()
# expire 'addresses'. backrefs
# which attach to u1 will expect to be "pending"
sess.expire(u1, ["addresses"])
# attach an Address. now its "pending"
# in user.addresses
a2 = Address(email_address="a2")
a2.user = u1
# expire u1.addresses again. this expires
# "pending" as well.
sess.expire(u1, ["addresses"])
# insert a new row
sess.execute(
addresses.insert(), dict(email_address="a3", user_id=u1.id)
)
# only two addresses pulled from the DB, no "pending"
assert len(u1.addresses) == 2
sess.flush()
sess.expire_all()
assert len(u1.addresses) == 3
class LifecycleTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"data",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
)
Table(
"data_fetched",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30), FetchedValue()),
)
Table(
"data_defer",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
Column("data2", String(30)),
)
@classmethod
def setup_classes(cls):
class Data(cls.Comparable):
pass
class DataFetched(cls.Comparable):
pass
class DataDefer(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
mapper(cls.classes.Data, cls.tables.data)
mapper(cls.classes.DataFetched, cls.tables.data_fetched)
mapper(
cls.classes.DataDefer,
cls.tables.data_defer,
properties={"data": deferred(cls.tables.data_defer.c.data)},
)
def test_attr_not_inserted(self):
Data = self.classes.Data
sess = fixture_session()
d1 = Data()
sess.add(d1)
sess.flush()
# we didn't insert a value for 'data',
# so its not in dict, but also when we hit it, it isn't
# expired because there's no column default on it or anything like that
assert "data" not in d1.__dict__
def go():
eq_(d1.data, None)
self.assert_sql_count(testing.db, go, 0)
def test_attr_not_inserted_expired(self):
Data = self.classes.Data
sess = fixture_session(autoflush=False)
d1 = Data()
sess.add(d1)
sess.flush()
assert "data" not in d1.__dict__
# with an expire, we emit
sess.expire(d1)
def go():
eq_(d1.data, None)
self.assert_sql_count(testing.db, go, 1)
def test_attr_not_inserted_fetched(self):
Data = self.classes.DataFetched
sess = fixture_session()
d1 = Data()
sess.add(d1)
sess.flush()
assert "data" not in d1.__dict__
def go():
eq_(d1.data, None)
# this one is marked as "fetch" so we emit SQL
self.assert_sql_count(testing.db, go, 1)
def test_cols_missing_in_load(self):
Data = self.classes.Data
with Session(testing.db) as sess, sess.begin():
d1 = Data(data="d1")
sess.add(d1)
sess = fixture_session()
d1 = sess.query(Data).from_statement(select(Data.id)).first()
# cols not present in the row are implicitly expired
def go():
eq_(d1.data, "d1")
self.assert_sql_count(testing.db, go, 1)
def test_deferred_cols_missing_in_load_state_reset(self):
Data = self.classes.DataDefer
with Session(testing.db) as sess, sess.begin():
d1 = Data(data="d1")
sess.add(d1)
with Session(testing.db) as sess:
d1 = (
sess.query(Data)
.from_statement(select(Data.id))
.options(undefer(Data.data))
.first()
)
d1.data = "d2"
# the deferred loader has to clear out any state
# on the col, including that 'd2' here
d1 = sess.query(Data).populate_existing().first()
def go():
eq_(d1.data, "d1")
self.assert_sql_count(testing.db, go, 1)
class RefreshTest(_fixtures.FixtureTest):
def test_refresh(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users,
properties={
"addresses": relationship(
mapper(Address, addresses), backref="user"
)
},
)
s = fixture_session(autoflush=False)
u = s.query(User).get(7)
u.name = "foo"
a = Address()
assert sa.orm.object_session(a) is None
u.addresses.append(a)
assert a.email_address is None
assert id(a) in [id(x) for x in u.addresses]
s.refresh(u)
# its refreshed, so not dirty
assert u not in s.dirty
# username is back to the DB
assert u.name == "jack"
assert id(a) not in [id(x) for x in u.addresses]
u.name = "foo"
u.addresses.append(a)
# now its dirty
assert u in s.dirty
assert u.name == "foo"
assert id(a) in [id(x) for x in u.addresses]
s.expire(u)
# get the attribute, it refreshes
assert u.name == "jack"
assert id(a) not in [id(x) for x in u.addresses]
def test_persistence_check(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
s = fixture_session()
u = s.query(User).get(7)
s.expunge_all()
assert_raises_message(
sa_exc.InvalidRequestError,
r"is not persistent within this Session",
lambda: s.refresh(u),
)
def test_refresh_autoflush(self):
User, users = self.classes.User, self.tables.users
Address, addresses = self.classes.Address, self.tables.addresses
mapper(User, users)
mapper(Address, addresses, properties={"user": relationship(User)})
s = fixture_session()
a1 = s.query(Address).get(2)
u1 = s.query(User).get(7)
a1.user = u1
s.refresh(a1, ["user_id"])
# autoflushes
eq_(a1.user_id, 7)
def test_refresh_expired(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
s = fixture_session()
u = s.query(User).get(7)
s.expire(u)
assert "name" not in u.__dict__
s.refresh(u)
assert u.name == "jack"
def test_refresh_with_lazy(self):
"""test that when a lazy loader is set as a trigger on an object's
attribute (at the attribute level, not the class level), a refresh()
operation doesn't fire the lazy loader or create any problems"""
User, Address, addresses, users = (
self.classes.User,
self.classes.Address,
self.tables.addresses,
self.tables.users,
)
s = fixture_session()
mapper(
User,
users,
properties={"addresses": relationship(mapper(Address, addresses))},
)
q = s.query(User).options(sa.orm.lazyload("addresses"))
u = q.filter(users.c.id == 8).first()
def go():
s.refresh(u)
self.assert_sql_count(testing.db, go, 1)
def test_refresh_with_eager(self):
"""test that a refresh/expire operation loads rows properly and sends
correct "isnew" state to eager loaders"""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users,
properties={
"addresses": relationship(
mapper(Address, addresses), lazy="joined"
)
},
)
s = fixture_session()
u = s.query(User).get(8)
assert len(u.addresses) == 3
s.refresh(u)
assert len(u.addresses) == 3
s = fixture_session()
u = s.query(User).get(8)
assert len(u.addresses) == 3
s.expire(u)
assert len(u.addresses) == 3
def test_refresh_maintains_deferred_options(self):
# testing a behavior that may have changed with
# [ticket:3822]
User, Address, Dingaling = self.classes("User", "Address", "Dingaling")
users, addresses, dingalings = self.tables(
"users", "addresses", "dingalings"
)
mapper(User, users, properties={"addresses": relationship(Address)})
mapper(
Address,
addresses,
properties={"dingalings": relationship(Dingaling)},
)
mapper(Dingaling, dingalings)
s = fixture_session()
q = (
s.query(User)
.filter_by(name="fred")
.options(sa.orm.lazyload("addresses").joinedload("dingalings"))
)
u1 = q.one()
# "addresses" is not present on u1, but when u1.addresses
# lazy loads, it should also joinedload dingalings. This is
# present in state.load_options and state.load_path. The
# refresh operation should not reset these attributes.
s.refresh(u1)
def go():
eq_(
u1.addresses,
[
Address(
email_address="fred@fred.com",
dingalings=[Dingaling(data="ding 2/5")],
)
],
)
self.assert_sql_count(testing.db, go, 1)
def test_refresh2(self):
"""test a hang condition that was occurring on expire/refresh"""
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
s = fixture_session()
mapper(Address, addresses)
mapper(
User,
users,
properties=dict(
addresses=relationship(
Address, cascade="all, delete-orphan", lazy="joined"
)
),
)
u = User()
u.name = "Justin"
a = Address(id=10, email_address="lala")
u.addresses.append(a)
s.add(u)
s.flush()
s.expunge_all()
u = s.query(User).filter(User.name == "Justin").one()
s.expire(u)
assert u.name == "Justin"
s.refresh(u)
| {
"repo_name": "sqlalchemy/sqlalchemy",
"path": "test/orm/test_expire.py",
"copies": "3",
"size": "62125",
"license": "mit",
"hash": -8323893350973690000,
"line_mean": 28.7391096218,
"line_max": 79,
"alpha_frac": 0.5433239437,
"autogenerated": false,
"ratio": 4.093364960137049,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.613668890383705,
"avg_score": null,
"num_lines": null
} |
""" attribute.py
Provides functions for creating simple properties.
If, inside a class definition, you write:
attribute(foo=1, bar=2)
simple properties named 'foo' and 'bar' are created for this class.
Also, private instance variables '__foo' and '__bar' will be added
to instances of this class.
USEAGE:
# assumes attribute.py is on path
from attribute import *
class MyClass(object):
readable(foo=1, bar=2) # or, attribute('r', foo=1, bar=2)
writable(fro=3, boz=4) # or, attribute('w', fro=3, boz=4)
attribute(baz=5)
This is equivalent to the following:
class MyClass(object):
def __init__(self):
self.__foo = 1
self.__bar = 2
self.__fro = 3
self.__boz = 4
self.__baz = 5
def get_foo(self):
return self.__foo
def get_bar(self):
return self.__bar
def set_fro(self, value):
self.__fro = value
def set_boz(self, value):
self.__boz = value
def get_baz(self):
return self.__baz
def set_baz(self, value):
self.__baz = value
def del_baz(self):
del self.__baz
foo = property(fget=get_foo, doc="foo")
bar = property(fget=get_bar, doc="bar")
fro = property(fset=set_fro, doc="fro")
boz = property(fset=set_boz, doc="boz")
baz = property(fget=get_baz, fset=set_baz, fdel=del_baz, doc="baz")
"""
__all__ = ['attribute', 'readable', 'writable']
__version__ = '3.0'
__author__ = 'Sean Ross'
__credits__ = ['Guido van Rossum', 'Garth Kidd']
__created__ = '10/21/02'
import sys
def mangle(classname, attrname):
"""mangles name according to python name-mangling
conventions for private variables"""
return "_%s__%s" % (classname, attrname)
def class_space(classlevel=3):
"returns the calling class' name and dictionary"
frame = sys._getframe(classlevel)
classname = frame.f_code.co_name
classdict = frame.f_locals
return classname, classdict
# convenience function
def readable(**kwds):
"returns one read-only property for each (key,value) pair in kwds"
return _attribute(permission='r', **kwds)
# convenience function
def writable(**kwds):
"returns one write-only property for each (key,value) pair in kwds"
return _attribute(permission='w', **kwds)
# needed because of the way class_space is resolved in _attribute
def attribute(permission='rwd', **kwds):
"""returns one property for each (key,value) pair in kwds;
each property provides the specified level of access(permission):
'r': readable, 'w':writable, 'd':deletable
"""
return _attribute(permission, **kwds)
# based on code by Guido van Rossum, comp.lang.python 2001-07-31
def _attribute(permission='rwd', **kwds):
"""returns one property for each (key,value) pair in kwds;
each property provides the specified level of access(permission):
'r': readable, 'w':writable, 'd':deletable
"""
classname, classdict = class_space()
def _property(attrname, default):
propname, attrname = attrname, mangle(classname, attrname)
fget, fset, fdel, doc = None, None, None, propname
if 'r' in permission:
def fget(self):
value = default
try: value = getattr(self, attrname)
except AttributeError: setattr(self, attrname, default)
return value
if 'w' in permission:
def fset(self, value):
setattr(self, attrname, value)
if 'd' in permission:
def fdel(self):
try: delattr(self, attrname)
except AttributeError: pass
# calling fget can restore this attribute, so remove property
delattr(self.__class__, propname)
return property(fget=fget, fset=fset, fdel=fdel, doc=doc)
for attrname, default in kwds.items():
classdict[attrname] = _property(attrname, default)
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/157768_Automating_simple_property/recipe-157768.py",
"copies": "1",
"size": "3957",
"license": "mit",
"hash": -6295531705713396000,
"line_mean": 31.975,
"line_max": 78,
"alpha_frac": 0.6194086429,
"autogenerated": false,
"ratio": 3.670686456400742,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4790095099300742,
"avg_score": null,
"num_lines": null
} |
"""Attributes common to PolyData and Grid Objects."""
import collections
import logging
import warnings
from weakref import proxy
import numpy as np
import vtk
from vtk.util.numpy_support import numpy_to_vtk, vtk_to_numpy
import pyvista
from pyvista.utilities import (FieldAssociation, convert_array, get_array,
is_pyvista_dataset, parse_field_choice, raise_not_matching,
vtk_bit_array_to_char, vtk_id_list_to_array)
from .filters import DataSetFilters
log = logging.getLogger(__name__)
log.setLevel('CRITICAL')
# vector array names
DEFAULT_VECTOR_KEY = '_vectors'
class DataObject(object):
"""Methods common to all wrapped data objects."""
def __init__(self, *args, **kwargs):
"""Initialize the data object."""
self._field_bool_array_names = []
def __new__(cls, *args, **kwargs):
"""Allocate memory for the data object."""
if cls is DataObject:
raise TypeError("pyvista.DataObject is an abstract class and may not be instantiated.")
return object.__new__(cls, *args, **kwargs)
def shallow_copy(self, to_copy):
"""Shallow copy the given mesh to this mesh."""
return self.ShallowCopy(to_copy)
def deep_copy(self, to_copy):
"""Overwrite this mesh with the given mesh as a deep copy."""
return self.DeepCopy(to_copy)
def save(self, filename, binary=True): # pragma: no cover
"""Write this mesh to a file.
Parameters
----------
filename : str
Filename of mesh to be written. File type is inferred from
the extension of the filename unless overridden with
ftype.
binary : bool, optional
Writes the file as binary when True and ASCII when False.
Notes
-----
Binary files write much faster than ASCII and have a smaller
file size.
"""
raise NotImplementedError('{} mesh type does not have a save method.'.format(type(self)))
def get_data_range(self, arr=None, preference='field'): # pragma: no cover
"""Get the non-NaN min and max of a named array.
Parameters
----------
arr : str, np.ndarray, optional
The name of the array to get the range. If None, the
active scalar is used
preference : str, optional
When scalars is specified, this is the preferred array type
to search for in the dataset. Must be either ``'point'``,
``'cell'``, or ``'field'``.
"""
raise NotImplementedError('{} mesh type does not have a `get_data_range` method.'.format(type(self)))
def _get_attrs(self): # pragma: no cover
"""Return the representation methods (internal helper)."""
raise NotImplementedError('Called only by the inherited class')
def head(self, display=True, html=None):
"""Return the header stats of this dataset.
If in IPython, this will be formatted to HTML. Otherwise returns a console friendly string.
"""
# Generate the output
if html:
fmt = ""
# HTML version
fmt += "\n"
fmt += "<table>\n"
fmt += "<tr><th>{}</th><th>Information</th></tr>\n".format(type(self).__name__)
row = "<tr><td>{}</td><td>{}</td></tr>\n"
# now make a call on the object to get its attributes as a list of len 2 tuples
for attr in self._get_attrs():
try:
fmt += row.format(attr[0], attr[2].format(*attr[1]))
except:
fmt += row.format(attr[0], attr[2].format(attr[1]))
if hasattr(self, 'n_arrays'):
fmt += row.format('N Arrays', self.n_arrays)
fmt += "</table>\n"
fmt += "\n"
if display:
from IPython.display import display, HTML
display(HTML(fmt))
return
return fmt
# Otherwise return a string that is Python console friendly
fmt = "{} ({})\n".format(type(self).__name__, hex(id(self)))
# now make a call on the object to get its attributes as a list of len 2 tuples
row = " {}:\t{}\n"
for attr in self._get_attrs():
try:
fmt += row.format(attr[0], attr[2].format(*attr[1]))
except:
fmt += row.format(attr[0], attr[2].format(attr[1]))
if hasattr(self, 'n_arrays'):
fmt += row.format('N Arrays', self.n_arrays)
return fmt
def _repr_html_(self): # pragma: no cover
"""Return a pretty representation for Jupyter notebooks.
This includes header details and information about all arrays.
"""
raise NotImplemented('Called only by the inherited class')
def copy_meta_from(self, ido): # pragma: no cover
"""Copy pyvista meta data onto this object from another object."""
pass # called only by the inherited class
def copy(self, deep=True):
"""Return a copy of the object.
Parameters
----------
deep : bool, optional
When True makes a full copy of the object.
Return
------
newobject : same as input
Deep or shallow copy of the input.
"""
thistype = type(self)
newobject = thistype()
if deep:
newobject.deep_copy(self)
else:
newobject.shallow_copy(self)
newobject.copy_meta_from(self)
return newobject
def _field_array(self, name=None):
"""Return field scalars of a vtk object.
Parameters
----------
name : str
Name of field scalars to retrieve.
Return
------
scalars : np.ndarray
Numpy array of scalars
"""
if name is None:
raise RuntimeError('Must specify an array to fetch.')
vtkarr = self.GetFieldData().GetAbstractArray(name)
if vtkarr is None:
raise AssertionError('({}) is not a valid field array.'.format(name))
# numpy does not support bit array data types
if isinstance(vtkarr, vtk.vtkBitArray):
vtkarr = vtk_bit_array_to_char(vtkarr)
if name not in self._point_bool_array_names:
self._field_bool_array_names.append(name)
array = convert_array(vtkarr)
if array.dtype == np.uint8 and name in self._field_bool_array_names:
array = array.view(np.bool)
return array
def _add_field_array(self, scalars, name, deep=True):
"""Add a field array to the mesh.
Parameters
----------
scalars : numpy.ndarray
Numpy array of scalars. Does not have to match number of points or
numbers of cells.
name : str
Name of field scalars to add.
deep : bool, optional
Does not copy scalars when False. A reference to the scalars
must be kept to avoid a segfault.
"""
if scalars is None:
raise TypeError('Empty array unable to be added')
if not isinstance(scalars, np.ndarray):
scalars = np.array(scalars)
# need to track which arrays are boolean as all boolean arrays
# must be stored as uint8
if scalars.dtype == np.bool:
scalars = scalars.view(np.uint8)
if name not in self._field_bool_array_names:
self._field_bool_array_names.append(name)
if not scalars.flags.c_contiguous:
scalars = np.ascontiguousarray(scalars)
vtkarr = convert_array(scalars, deep=deep)
vtkarr.SetName(name)
fdata = self.GetFieldData()
# must remove array if it already exists
if fdata.HasArray(name):
fdata.RemoveArray(name)
fdata.AddArray(vtkarr)
def _add_field_scalar(self, scalars, name, set_active=False, deep=True): # pragma: no cover
"""Add a field array.
DEPRECATED: Please use `_add_field_array` instead.
"""
warnings.warn('Deprecation Warning: `_add_field_scalar` is now `_add_field_array`', RuntimeWarning)
return self._add_field_array(scalars, name, set_active=set_active, deep=deep)
def add_field_array(self, scalars, name, deep=True):
"""Add a field array."""
self._add_field_array(scalars, name, deep=deep)
@property
def field_arrays(self):
"""Return all field arrays."""
fdata = self.GetFieldData()
narr = fdata.GetNumberOfArrays()
# just return if unmodified
if hasattr(self, '_field_arrays'):
keys = list(self._field_arrays.keys())
if narr == len(keys):
return self._field_arrays
# dictionary with callbacks
self._field_arrays = FieldScalarsDict(self)
for i in range(narr):
name = fdata.GetArrayName(i)
if name is None or len(name) < 1:
name = 'Field Array {}'.format(i)
fdata.GetAbstractArray(i).SetName(name)
self._field_arrays[name] = self._field_array(name)
self._field_arrays.enable_callback()
return self._field_arrays
def clear_field_arrays(self):
"""Remove all field arrays."""
keys = self.field_arrays.keys()
for key in keys:
self._remove_array(FieldAssociation.NONE, key)
@property
def memory_address(self):
"""Get address of the underlying C++ object in format 'Addr=%p'."""
return self.GetInformation().GetAddressAsString("")
class Common(DataSetFilters, DataObject):
"""Methods in common to spatially referenced objects."""
# Simply bind pyvista.plotting.plot to the object
plot = pyvista.plot
def __new__(cls, *args, **kwargs):
"""Allocate memory for the common object."""
if cls is Common:
raise TypeError("pyvista.Common is an abstract class and may not be instantiated.")
return object.__new__(cls, *args, **kwargs)
def __init__(self, *args, **kwargs):
"""Initialize the common object."""
super(Common, self).__init__()
self.references = []
self._point_bool_array_names = []
self._cell_bool_array_names = []
@property
def active_scalars_info(self):
"""Return the active scalar's field and name: [field, name]."""
if not hasattr(self, '_active_scalars_info'):
self._active_scalars_info = [FieldAssociation.POINT, None] # field and name
if not hasattr(self, '_last_active_scalars_name'):
self._last_active_scalars_name = None
field, name = self._active_scalars_info
# rare error where scalars name isn't a valid scalar
if name not in self.point_arrays:
if name not in self.cell_arrays:
if name in self.field_arrays:
raise RuntimeError('Field arrays cannot be made active. '
'Convert to point/cell arrays if possible.')
else:
name = None
exclude = ['__custom_rgba', 'Normals', 'vtkOriginalPointIds',
'TCoords']
def search_for_array(data):
arr = None
for i in range(data.GetNumberOfArrays()):
name = data.GetArrayName(i)
if name not in exclude:
arr = name
break
return arr
if name in exclude:
name = self._last_active_scalars_name
if name is None:
if self.n_arrays < 1:
return field, name
# find some array in the set field
parr = search_for_array(self.GetPointData())
carr = search_for_array(self.GetCellData())
if parr is not None:
self._active_scalars_info = [FieldAssociation.POINT, parr]
self.GetPointData().SetActiveScalars(parr)
elif carr is not None:
self._active_scalars_info = [FieldAssociation.CELL, carr]
self.GetCellData().SetActiveScalars(carr)
return self._active_scalars_info
@property
def active_scalar_info(self): # pragma: no cover
"""Return the active scalar's field and name.
DEPRECATED: use `.active_scalars_info` instead
"""
warnings.warn("DEPRECATED: use `.active_scalars_info` instead")
return self.active_scalars_info
@property
def active_vectors_info(self):
"""Return the active scalar's field and name: [field, name]."""
if not hasattr(self, '_active_vectors_info'):
# Sometimes, precomputed normals aren't set as active
if 'Normals' in self.array_names:
self.set_active_vectors('Normals')
else:
self._active_vectors_info = [FieldAssociation.POINT, None] # field and name
_, name = self._active_vectors_info
# rare error where name isn't a valid array
if name not in self.point_arrays:
if name not in self.cell_arrays:
if name in self.field_arrays:
raise RuntimeError('Field arrays cannot be made active. '
'Convert to point/cell array if possible.')
else:
name = None
return self._active_vectors_info
@property
def active_vectors(self):
"""Return the active vectors array."""
field, name = self.active_vectors_info
if name:
if field is FieldAssociation.POINT:
return self.point_arrays[name]
if field is FieldAssociation.CELL:
return self.cell_arrays[name]
@property
def active_vectors_name(self):
"""Return the name of the active vectors array."""
return self.active_vectors_info[1]
@active_vectors_name.setter
def active_vectors_name(self, name):
"""Set the name of the active vector."""
return self.set_active_vectors(name)
@property
def active_scalars_name(self):
"""Return the active scalar's name."""
return self.active_scalars_info[1]
@active_scalars_name.setter
def active_scalars_name(self, name):
"""Set the name of the active scalar."""
return self.set_active_scalars(name)
@property
def active_scalar_name(self): # pragma: no cover
"""Return the active scalar's name."""
warnings.warn("DEPRECATED: use `.active_scalars_name` instead.")
return self.active_scalars_name
@active_scalar_name.setter
def active_scalar_name(self, name): # pragma: no cover
"""Set the name of the active scalar."""
warnings.warn("DEPRECATED: use `.active_scalars_name` instead.")
self.active_scalars_name = name
@property
def points(self):
"""Return a pointer to the points as a numpy object."""
pts = self.GetPoints()
if pts is None:
return None
vtk_data = pts.GetData()
# arr = vtk_to_numpy(vtk_data)
return pyvista.pyvista_ndarray(vtk_data)
@points.setter
def points(self, points):
"""Set points without copying."""
if not isinstance(points, np.ndarray):
raise TypeError('Points must be a numpy array')
vtk_points = pyvista.vtk_points(points, False)
pdata = self.GetPoints()
if not pdata:
self.SetPoints(vtk_points)
else:
pdata.SetData(vtk_points.GetData())
self.GetPoints().Modified()
self.Modified()
@property
def arrows(self):
"""Return a glyph representation of the active vector data as arrows.
Arrows will be located at the points of the mesh and
their size will be dependent on the length of the vector.
Their direction will be the "direction" of the vector
Return
------
arrows : pyvista.PolyData
Active scalars represented as arrows.
"""
if self.active_vectors is None:
return
name = self.active_vectors_name
return self.glyph(scale=name, orient=name)
@property
def vectors(self):
"""Return active vectors."""
return self.active_vectors
@vectors.setter
def vectors(self, array):
"""Set the active vector."""
if array.ndim != 2:
raise AssertionError('vector array must be a 2-dimensional array')
elif array.shape[1] != 3:
raise RuntimeError('vector array must be 3D')
elif array.shape[0] != self.n_points:
raise RuntimeError('Number of vectors be the same as the number of points')
self.point_arrays[DEFAULT_VECTOR_KEY] = array
self.active_vectors_name = DEFAULT_VECTOR_KEY
@property
def t_coords(self):
"""Return the active texture coordinates on the points."""
if self.GetPointData().GetTCoords() is not None:
return vtk_to_numpy(self.GetPointData().GetTCoords())
return None
@t_coords.setter
def t_coords(self, t_coords):
"""Set the array to use as the texture coordinates."""
if not isinstance(t_coords, np.ndarray):
raise TypeError('Texture coordinates must be a numpy array')
if t_coords.ndim != 2:
raise AssertionError('Texture coordinates must be a 2-dimensional array')
if t_coords.shape[0] != self.n_points:
raise AssertionError('Number of texture coordinates ({}) must match number of points ({})'.format(t_coords.shape[0], self.n_points))
if t_coords.shape[1] != 2:
raise AssertionError('Texture coordinates must only have 2 components, not ({})'.format(t_coords.shape[1]))
# if np.min(t_coords) < 0.0 or np.max(t_coords) > 1.0:
# warnings.warn('Texture coordinates are typically within (0, 1) range. Textures will repeat on this mesh.', RuntimeWarning)
# convert the array
vtkarr = numpy_to_vtk(t_coords)
vtkarr.SetName('Texture Coordinates')
self.GetPointData().SetTCoords(vtkarr)
self.GetPointData().Modified()
return
@property
def textures(self):
"""Return a dictionary to hold compatible ``vtk.vtkTexture`` objects.
When casting back to a VTK dataset or filtering this dataset, these textures
will not be passed.
"""
if not hasattr(self, '_textures'):
self._textures = {}
return self._textures
def clear_textures(self):
"""Clear the textures from this mesh."""
if hasattr(self, '_textures'):
del self._textures
def _activate_texture(mesh, name):
"""Grab a texture and update the active texture coordinates.
This makes sure to not destroy old texture coordinates.
Parameters
----------
name : str
The name of the texture and texture coordinates to activate
Return
------
vtk.vtkTexture : The active texture
"""
if name is True or isinstance(name, int):
keys = list(mesh.textures.keys())
# Grab the first name available if True
idx = 0 if not isinstance(name, int) or name is True else name
if idx > len(keys): # is this necessary?
idx = 0
try:
name = keys[idx]
except IndexError:
logging.warning('No textures associated with input mesh.')
return None
# Grab the texture object by name
try:
texture = mesh.textures[name]
except KeyError:
logging.warning('Texture ({}) not associated with this dataset'.format(name))
texture = None
else:
# Be sure to reset the tcoords if present
# Grab old coordinates
if name in mesh.array_names:
old_tcoord = mesh.GetPointData().GetTCoords()
mesh.GetPointData().SetTCoords(mesh.GetPointData().GetAbstractArray(name))
mesh.GetPointData().AddArray(old_tcoord)
mesh.Modified()
return texture
def set_active_scalars(self, name, preference='cell'):
"""Find the scalars by name and appropriately sets it as active.
To deactivate any active scalars, pass ``None`` as the ``name``.
"""
if name is None:
self.GetCellData().SetActiveScalars(None)
self.GetPointData().SetActiveScalars(None)
return
_, field = get_array(self, name, preference=preference, info=True)
self._last_active_scalars_name = self.active_scalars_info[1]
if field == FieldAssociation.POINT:
self.GetPointData().SetActiveScalars(name)
elif field == FieldAssociation.CELL:
self.GetCellData().SetActiveScalars(name)
else:
raise RuntimeError('Data field ({}) not useable'.format(field))
self._active_scalars_info = [field, name]
def set_active_scalar(self, name, preference='cell'): # pragma: no cover
"""Find the scalars by name and appropriately sets it as active.
To deactivate any active scalars, pass ``None`` as the ``name``.
"""
warnings.warn("DEPRECATED: please use `.set_active_scalars` instead.")
return self.set_active_scalars(name, preference=preference)
def set_active_vectors(self, name, preference='point'):
"""Find the vectors by name and appropriately sets it as active.
To deactivate any active scalars, pass ``None`` as the ``name``.
"""
if name is None:
self.GetCellData().SetActiveVectors(None)
self.GetPointData().SetActiveVectors(None)
return
_, field = get_array(self, name, preference=preference, info=True)
if field == FieldAssociation.POINT:
self.GetPointData().SetActiveVectors(name)
elif field == FieldAssociation.CELL:
self.GetCellData().SetActiveVectors(name)
else:
raise RuntimeError('Data field ({}) not useable'.format(field))
self._active_vectors_info = [field, name]
def rename_array(self, old_name, new_name, preference='cell'):
"""Change array name by searching for the array then renaming it."""
_, field = get_array(self, old_name, preference=preference, info=True)
was_active = False
if self.active_scalars_name == old_name:
was_active = True
if field == FieldAssociation.POINT:
self.point_arrays[new_name] = self.point_arrays.pop(old_name)
elif field == FieldAssociation.CELL:
self.cell_arrays[new_name] = self.cell_arrays.pop(old_name)
elif field == FieldAssociation.NONE:
self.field_arrays[new_name] = self.field_arrays.pop(old_name)
else:
raise RuntimeError('Array not found.')
if was_active:
self.set_active_scalars(new_name, preference=field)
def rename_scalar(self, old_name, new_name, preference='cell'): # pragma: no cover
"""Change an array name by searching for the array then renaming it.
DEPRECATED: please use `.rename_array` instead.
"""
warnings.warn("DEPRECATED: please use `.rename_array` instead.")
return self.rename_array(old_name, new_name, preference=preference)
@property
def active_scalars(self):
"""Return the active scalars as an array."""
field, name = self.active_scalars_info
if name is None:
return None
if field == FieldAssociation.POINT:
return self._point_array(name)
elif field == FieldAssociation.CELL:
return self._cell_array(name)
@property
def active_scalar(self): # pragma: no cover
"""Return the active scalars as an array.
DEPRECATED: Please use `.active_scalars` instead.
"""
warnings.warn("DEPRECATED: please use `.active_scalars` instead.")
return self.active_scalars
def _point_array(self, name=None):
"""Return point scalars of a vtk object.
Parameters
----------
name : str
Name of point scalars to retrieve.
Return
------
scalars : np.ndarray
Numpy array of scalars
"""
if name is None:
# use active scalars array
field, name = self.active_scalars_info
if field != FieldAssociation.POINT or name is None:
raise ValueError('Must specify an array to fetch.')
vtkarr = self.GetPointData().GetAbstractArray(name)
if vtkarr is None:
raise AssertionError('({}) is not a point scalar'.format(name))
# numpy does not support bit array data types
if isinstance(vtkarr, vtk.vtkBitArray):
vtkarr = vtk_bit_array_to_char(vtkarr)
if name not in self._point_bool_array_names:
self._point_bool_array_names.append(name)
array = convert_array(vtkarr)
if array.dtype == np.uint8 and name in self._point_bool_array_names:
array = array.view(np.bool)
return array
def _add_point_array(self, scalars, name, set_active=False, deep=True):
"""Add point scalars to the mesh.
Parameters
----------
scalars : numpy.ndarray
Numpy array of scalars. Must match number of points.
name : str
Name of point scalars to add.
set_active : bool, optional
Sets the scalars to the active plotting scalars. Default False.
deep : bool, optional
Does not copy scalars when False. A reference to the scalars
must be kept to avoid a segfault.
"""
if scalars is None:
raise TypeError('Empty array unable to be added')
if not isinstance(scalars, np.ndarray):
scalars = np.array(scalars)
if scalars.shape[0] != self.n_points:
raise Exception('Number of scalars must match the number of points')
# need to track which arrays are boolean as all boolean arrays
# must be stored as uint8
if scalars.dtype == np.bool:
scalars = scalars.view(np.uint8)
if name not in self._point_bool_array_names:
self._point_bool_array_names.append(name)
if not scalars.flags.c_contiguous:
scalars = np.ascontiguousarray(scalars)
vtkarr = convert_array(scalars, deep=deep)
vtkarr.SetName(name)
self.GetPointData().AddArray(vtkarr)
if set_active or self.active_scalars_info[1] is None:
self.GetPointData().SetActiveScalars(name)
self._active_scalars_info = [FieldAssociation.POINT, name]
def _add_point_scalar(self, scalars, name, set_active=False, deep=True): # pragma: no cover
"""Add points array.
DEPRECATED: Please use `_add_point_array` instead.
"""
warnings.warn('Deprecation Warning: `_add_point_scalar` is now `_add_point_array`', RuntimeWarning)
return self._add_point_array(scalars, name, set_active=set_active, deep=deep)
def get_data_range(self, arr=None, preference='cell'):
"""Get the non-NaN min and max of a named array.
Parameters
----------
arr : str, np.ndarray, optional
The name of the array to get the range. If None, the
active scalars is used.
preference : str, optional
When scalars is specified, this is the preferred array type
to search for in the dataset. Must be either ``'point'``,
``'cell'``, or ``'field'``.
"""
if arr is None:
# use active scalars array
_, arr = self.active_scalars_info
if isinstance(arr, str):
arr = get_array(self, arr, preference=preference)
# If array has no tuples return a NaN range
if arr is None or arr.size == 0 or not np.issubdtype(arr.dtype, np.number):
return (np.nan, np.nan)
# Use the array range
return np.nanmin(arr), np.nanmax(arr)
def points_to_double(self):
"""Make points double precision."""
if self.points.dtype != np.double:
self.points = self.points.astype(np.double)
def rotate_x(self, angle):
"""Rotate mesh about the x-axis.
Parameters
----------
angle : float
Angle in degrees to rotate about the x-axis.
"""
axis_rotation(self.points, angle, inplace=True, axis='x')
def rotate_y(self, angle):
"""Rotate mesh about the y-axis.
Parameters
----------
angle : float
Angle in degrees to rotate about the y-axis.
"""
axis_rotation(self.points, angle, inplace=True, axis='y')
def rotate_z(self, angle):
"""Rotate mesh about the z-axis.
Parameters
----------
angle : float
Angle in degrees to rotate about the z-axis.
"""
axis_rotation(self.points, angle, inplace=True, axis='z')
def translate(self, xyz):
"""Translate the mesh.
Parameters
----------
xyz : list or np.ndarray
Length 3 list or array.
"""
self.points += np.asarray(xyz)
def transform(self, trans):
"""Compute a transformation in place using a 4x4 transform.
Parameters
----------
trans : vtk.vtkMatrix4x4, vtk.vtkTransform, or np.ndarray
Accepts a vtk transformation object or a 4x4 transformation matrix.
"""
if isinstance(trans, vtk.vtkMatrix4x4):
t = pyvista.trans_from_matrix(trans)
elif isinstance(trans, vtk.vtkTransform):
t = pyvista.trans_from_matrix(trans.GetMatrix())
elif isinstance(trans, np.ndarray):
if trans.ndim != 2:
raise ValueError('Transformation array must be 4x4')
elif trans.shape[0] != 4 or trans.shape[1] != 4:
raise ValueError('Transformation array must be 4x4')
t = trans
else:
raise TypeError('Input transform must be either:\n'
'\tvtk.vtkMatrix4x4\n'
'\tvtk.vtkTransform\n'
'\t4x4 np.ndarray\n')
x = (self.points*t[0, :3]).sum(1) + t[0, -1]
y = (self.points*t[1, :3]).sum(1) + t[1, -1]
z = (self.points*t[2, :3]).sum(1) + t[2, -1]
# overwrite points
self.points[:, 0] = x
self.points[:, 1] = y
self.points[:, 2] = z
def _cell_array(self, name=None):
"""Return the cell scalars of a vtk object.
Parameters
----------
name : str
Name of cell scalars to retrieve.
Return
------
scalars : np.ndarray
Numpy array of scalars
"""
if name is None:
# use active scalars array
field, name = self.active_scalars_info
if field != FieldAssociation.CELL:
raise RuntimeError('Must specify an array to fetch.')
vtkarr = self.GetCellData().GetAbstractArray(name)
if vtkarr is None:
raise AssertionError('({}) is not a cell scalar'.format(name))
# numpy does not support bit array data types
if isinstance(vtkarr, vtk.vtkBitArray):
vtkarr = vtk_bit_array_to_char(vtkarr)
if name not in self._cell_bool_array_names:
self._cell_bool_array_names.append(name)
array = convert_array(vtkarr)
if array.dtype == np.uint8 and name in self._cell_bool_array_names:
array = array.view(np.bool)
return array
def _add_cell_array(self, scalars, name, set_active=False, deep=True):
"""Add cell scalars to the vtk object.
Parameters
----------
scalars : numpy.ndarray
Numpy array of scalars. Must match number of points.
name : str
Name of point scalars to add.
set_active : bool, optional
Sets the scalars to the active plotting scalars. Default False.
deep : bool, optional
Does not copy scalars when False. A reference to the scalars
must be kept to avoid a segfault.
"""
if scalars is None:
raise TypeError('Empty array unable to be added')
if not isinstance(scalars, np.ndarray):
scalars = np.array(scalars)
if scalars.shape[0] != self.n_cells:
raise ValueError('Number of scalars must match the number of cells (%d)'
% self.n_cells)
if not scalars.flags.c_contiguous:
raise ValueError('Array must be contigious')
if scalars.dtype == np.bool:
scalars = scalars.view(np.uint8)
self._cell_bool_array_names.append(name)
vtkarr = convert_array(scalars, deep=deep)
vtkarr.SetName(name)
self.GetCellData().AddArray(vtkarr)
if set_active or self.active_scalars_info[1] is None:
self.GetCellData().SetActiveScalars(name)
self._active_scalars_info = [FieldAssociation.CELL, name]
def _add_cell_scalar(self, scalars, name, set_active=False, deep=True): # pragma: no cover
"""Add a cell array.
DEPRECATED: Please use `_add_cell_array` instead.
"""
warnings.warn('Deprecation Warning: `_add_cell_scalar` is now `_add_cell_array`', RuntimeWarning)
return self._add_cell_array(scalars, name, set_active=set_active, deep=deep)
def copy_meta_from(self, ido):
"""Copy pyvista meta data onto this object from another object."""
self._active_scalars_info = ido.active_scalars_info
self._active_vectors_info = ido.active_vectors_info
if hasattr(ido, '_textures'):
self._textures = {}
for name, tex in ido._textures.items():
self._textures[name] = tex.copy()
@property
def point_arrays(self):
"""Return the all point arrays."""
pdata = self.GetPointData()
narr = pdata.GetNumberOfArrays()
# Update data if necessary
if hasattr(self, '_point_arrays'):
keys = list(self._point_arrays.keys())
if narr == len(keys):
if keys:
if self._point_arrays[keys[0]].shape[0] == self.n_points:
return self._point_arrays
else:
return self._point_arrays
# dictionary with callbacks
self._point_arrays = PointScalarsDict(self)
for i in range(narr):
name = pdata.GetArrayName(i)
if name is None or len(name) < 1:
name = 'Point Array {}'.format(i)
pdata.GetAbstractArray(i).SetName(name)
self._point_arrays[name] = self._point_array(name)
self._point_arrays.enable_callback()
return self._point_arrays
def _remove_array(self, field, key):
"""Remove a single array by name from each field (internal helper)."""
field = parse_field_choice(field)
if field == FieldAssociation.POINT:
self.GetPointData().RemoveArray(key)
elif field == FieldAssociation.CELL:
self.GetCellData().RemoveArray(key)
elif field == FieldAssociation.NONE:
self.GetFieldData().RemoveArray(key)
else:
raise NotImplementedError('Not able to remove arrays from the ({}) data fiedl'.format(field))
return
def clear_point_arrays(self):
"""Remove all point arrays."""
keys = self.point_arrays.keys()
for key in keys:
self._remove_array(FieldAssociation.POINT, key)
def clear_cell_arrays(self):
"""Remove all cell arrays."""
keys = self.cell_arrays.keys()
for key in keys:
self._remove_array(FieldAssociation.CELL, key)
def clear_arrays(self):
"""Remove all arrays from point/cell/field data."""
self.clear_point_arrays()
self.clear_cell_arrays()
self.clear_field_arrays()
@property
def cell_arrays(self):
"""Return the all cell arrays."""
cdata = self.GetCellData()
narr = cdata.GetNumberOfArrays()
# Update data if necessary
if hasattr(self, '_cell_arrays'):
keys = list(self._cell_arrays.keys())
if narr == len(keys):
if keys:
if self._cell_arrays[keys[0]].shape[0] == self.n_cells:
return self._cell_arrays
else:
return self._cell_arrays
# dictionary with callbacks
self._cell_arrays = CellScalarsDict(self)
for i in range(narr):
name = cdata.GetArrayName(i)
if name is None or len(name) < 1:
name = 'Cell Array {}'.format(i)
cdata.GetAbstractArray(i).SetName(name)
self._cell_arrays[name] = self._cell_array(name)
self._cell_arrays.enable_callback()
return self._cell_arrays
@property
def n_points(self):
"""Return the number of points in the entire dataset."""
return self.GetNumberOfPoints()
@property
def n_cells(self):
"""Return the number of cells in the entire dataset."""
return self.GetNumberOfCells()
@property
def number_of_points(self): # pragma: no cover
"""Return the number of points."""
return self.GetNumberOfPoints()
@property
def number_of_cells(self): # pragma: no cover
"""Return the number of cells."""
return self.GetNumberOfCells()
@property
def bounds(self):
"""Return the bounding box of this dataset.
The form is: (xmin,xmax, ymin,ymax, zmin,zmax).
"""
return list(self.GetBounds())
@property
def length(self):
"""Return the length of the diagonal of the bounding box."""
return self.GetLength()
@property
def center(self):
"""Return the center of the bounding box."""
return list(self.GetCenter())
@property
def extent(self):
"""Return the range of the bounding box."""
if hasattr(self, 'GetExtent'):
return list(self.GetExtent())
@extent.setter
def extent(self, extent):
"""Set the range of the bounding box."""
if hasattr(self, 'SetExtent'):
if len(extent) != 6:
raise ValueError('Extent must be a vector of 6 values.')
return self.SetExtent(extent)
else:
raise AttributeError('This mesh type does not handle extents.')
@property
def volume(self):
"""Return the mesh volume.
Return
------
volume : float
Total volume of the mesh.
"""
sizes = self.compute_cell_sizes(length=False, area=False, volume=True)
return np.sum(sizes.cell_arrays['Volume'])
def get_array(self, name, preference='cell', info=False):
"""Search both point, cell and field data for an array."""
return get_array(self, name, preference=preference, info=info)
def __getitem__(self, index):
"""Search both point, cell, and field data for an array."""
if isinstance(index, collections.Iterable) and not isinstance(index, str):
name, preference = index[0], index[1]
elif isinstance(index, str):
name = index
preference = 'cell'
else:
raise KeyError('Index ({}) not understood. Index must be a string name or a tuple of string name and string preference.'.format(index))
return self.get_array(name, preference=preference, info=False)
def _ipython_key_completions_(self):
return self.array_names
def __setitem__(self, name, scalars):
"""Add/set an array in the point_arrays, or cell_arrays accordingly.
It depends on the array's length, or specified mode.
"""
# First check points - think of case with vertex cells
# there would be the same number of cells as points but we'd want
# the data to be on the nodes.
if scalars is None:
raise TypeError('Empty array unable to be added.')
if not isinstance(scalars, np.ndarray):
scalars = np.array(scalars)
# Now check array size to determine which field to place array
if scalars.shape[0] == self.n_points:
self.point_arrays[name] = scalars
elif scalars.shape[0] == self.n_cells:
self.cell_arrays[name] = scalars
else:
# Field data must be set explicitly as it could be a point of
# confusion for new users
raise_not_matching(scalars, self)
return
@property
def n_arrays(self):
"""Return the number of arrays present in the dataset."""
n = self.GetPointData().GetNumberOfArrays()
n += self.GetCellData().GetNumberOfArrays()
n += self.GetFieldData().GetNumberOfArrays()
return n
@property
def n_scalars(self): # pragma: no cover
"""Return the number of scalars.
DEPRECATED: Please use `n_arrays` instead.
"""
warnings.warn('Deprecation Warning: `n_scalars` is now `n_arrays`', RuntimeWarning)
return self.n_arrays
@property
def array_names(self):
"""Return a list of array names for the dataset.
This makes sure to put the active scalars' name first in the list.
"""
names = []
for i in range(self.GetPointData().GetNumberOfArrays()):
names.append(self.GetPointData().GetArrayName(i))
for i in range(self.GetCellData().GetNumberOfArrays()):
names.append(self.GetCellData().GetArrayName(i))
for i in range(self.GetFieldData().GetNumberOfArrays()):
names.append(self.GetFieldData().GetArrayName(i))
try:
names.remove(self.active_scalars_name)
names.insert(0, self.active_scalars_name)
except ValueError:
pass
return names
@property
def scalar_names(self): # pragma: no cover
"""Return the array names.
DEPRECATED: Please use `array_names` instead.
"""
warnings.warn('Deprecation Warning: `scalar_names` is now `array_names`', RuntimeWarning)
return self.array_names
def _get_attrs(self):
"""Return the representation methods (internal helper)."""
attrs = []
attrs.append(("N Cells", self.GetNumberOfCells(), "{}"))
attrs.append(("N Points", self.GetNumberOfPoints(), "{}"))
bds = self.bounds
fmt = "{}, {}".format(pyvista.FLOAT_FORMAT, pyvista.FLOAT_FORMAT)
attrs.append(("X Bounds", (bds[0], bds[1]), fmt))
attrs.append(("Y Bounds", (bds[2], bds[3]), fmt))
attrs.append(("Z Bounds", (bds[4], bds[5]), fmt))
# if self.n_cells <= pyvista.REPR_VOLUME_MAX_CELLS and self.n_cells > 0:
# attrs.append(("Volume", (self.volume), pyvista.FLOAT_FORMAT))
return attrs
def _repr_html_(self):
"""Return a pretty representation for Jupyter notebooks.
It includes header details and information about all arrays.
"""
fmt = ""
if self.n_arrays > 0:
fmt += "<table>"
fmt += "<tr><th>Header</th><th>Data Arrays</th></tr>"
fmt += "<tr><td>"
# Get the header info
fmt += self.head(display=False, html=True)
# Fill out arrays
if self.n_arrays > 0:
fmt += "</td><td>"
fmt += "\n"
fmt += "<table>\n"
titles = ["Name", "Field", "Type", "N Comp", "Min", "Max"]
fmt += "<tr>" + "".join(["<th>{}</th>".format(t) for t in titles]) + "</tr>\n"
row = "<tr><td>{}</td><td>{}</td><td>{}</td><td>{}</td><td>{}</td><td>{}</td></tr>\n"
row = "<tr>" + "".join(["<td>{}</td>" for i in range(len(titles))]) + "</tr>\n"
def format_array(name, arr, field):
"""Format array information for printing (internal helper)."""
dl, dh = self.get_data_range(arr)
dl = pyvista.FLOAT_FORMAT.format(dl)
dh = pyvista.FLOAT_FORMAT.format(dh)
if name == self.active_scalars_info[1]:
name = '<b>{}</b>'.format(name)
if arr.ndim > 1:
ncomp = arr.shape[1]
else:
ncomp = 1
return row.format(name, field, arr.dtype, ncomp, dl, dh)
for key, arr in self.point_arrays.items():
fmt += format_array(key, arr, 'Points')
for key, arr in self.cell_arrays.items():
fmt += format_array(key, arr, 'Cells')
for key, arr in self.field_arrays.items():
fmt += format_array(key, arr, 'Fields')
fmt += "</table>\n"
fmt += "\n"
fmt += "</td></tr> </table>"
return fmt
def __repr__(self):
"""Return the object representation."""
return self.head(display=False, html=False)
def __str__(self):
"""Return the object string representation."""
return self.head(display=False, html=False)
def overwrite(self, mesh):
"""Overwrite this mesh inplace with the new mesh's geometries and data.
Parameters
----------
mesh : vtk.vtkDataSet
The overwriting mesh.
"""
self.deep_copy(mesh)
if is_pyvista_dataset(mesh):
self.copy_meta_from(mesh)
def cast_to_unstructured_grid(self):
"""Get a new representation of this object as an :class:`pyvista.UnstructuredGrid`."""
alg = vtk.vtkAppendFilter()
alg.AddInputData(self)
alg.Update()
return pyvista.filters._get_output(alg)
@property
def quality(self): # no cover
"""Return cell quality using PyANSYS.
Computes the minimum scaled jacobian of each cell.
Cells that have values below 0 are invalid for
a finite element analysis.
Note
----
This casts the input to an unstructured grid
Return
------
cellquality : np.ndarray
Minimum scaled jacobian of each cell. Ranges from -1 to 1.
Notes
-----
Requires pyansys to be installed.
"""
try:
import pyansys
except ImportError:
raise ImportError('Install pyansys for this function')
if not isinstance(self, pyvista.UnstructuredGrid):
dataset = self.cast_to_unstructured_grid()
else:
dataset = self
return pyansys.CellQuality(dataset)
def find_closest_point(self, point, n=1):
"""Find index of closest point in this mesh to the given point.
If wanting to query many points, use a KDTree with scipy or another
library as those implementations will be easier to work with.
See: https://github.com/pyvista/pyvista-support/issues/107
Parameters
----------
point : iterable(float)
Length 3 coordinate of the point to query.
n : int, optional
If greater than ``1``, returns the indices of the ``n`` closest
points.
Return
------
int : the index of the point in this mesh that is closes to the given point.
"""
if not isinstance(point, collections.Iterable) or len(point) != 3:
raise TypeError("Given point must be a length three iterable.")
if not isinstance(n, int):
raise TypeError("`n` must be a positive integer.")
if n < 1:
raise ValueError("`n` must be a positive integer.")
locator = vtk.vtkPointLocator()
locator.SetDataSet(self)
locator.BuildLocator()
if n < 2:
index = locator.FindClosestPoint(point)
else:
id_list = vtk.vtkIdList()
locator.FindClosestNPoints(n, point, id_list)
index = vtk_id_list_to_array(id_list)
return index
class _ScalarsDict(dict):
"""Internal helper for scalars dictionaries."""
def __init__(self, data):
"""Initialize the scalars dict."""
self.data = proxy(data)
dict.__init__(self)
self.callback_enabled = False
self.remover = None
self.modifier = None
def enable_callback(self):
"""Enable callbacks to be set True."""
self.callback_enabled = True
def adder(self, scalars, name, set_active=False, deep=True): # pragma: no cover
raise NotImplementedError()
def pop(self, key):
"""Get and remove an element by key name."""
arr = dict.pop(self, key).copy()
self.remover(key)
return arr
def update(self, data):
"""Update this dictionary with the key-value pairs from a given dictionary."""
if not isinstance(data, (dict, pyvista.Table)):
raise TypeError('Data to update must be in a dictionary or PyVista Table.')
for k, v in data.items():
arr = np.array(v)
try:
self[k] = arr
except TypeError:
logging.warning("Values under key ({}) not supported by VTK".format(k))
return
def __setitem__(self, key, val):
"""Ensure that data is contiguous."""
if isinstance(val, (list, tuple)):
val = np.array(val)
if self.callback_enabled:
self.adder(val, key, deep=False)
dict.__setitem__(self, key, val)
self.modifier()
def __delitem__(self, key):
"""Remove item by key name."""
self.remover(key)
return dict.__delitem__(self, key)
class CellScalarsDict(_ScalarsDict):
"""Update internal cell data when an array is added or removed from the dictionary."""
def __init__(self, data):
"""Initialize the cell array dict."""
_ScalarsDict.__init__(self, data)
self.remover = lambda key: self.data._remove_array(FieldAssociation.CELL, key)
self.modifier = lambda *args: self.data.GetCellData().Modified()
def adder(self, scalars, name, set_active=False, deep=True):
"""Add a cell array."""
self.data._add_cell_array(scalars, name, set_active=False, deep=deep)
class PointScalarsDict(_ScalarsDict):
"""Update internal point data when an array is added or removed from the dictionary."""
def __init__(self, data):
"""Initialize the point array dict."""
_ScalarsDict.__init__(self, data)
self.remover = lambda key: self.data._remove_array(FieldAssociation.POINT, key)
self.modifier = lambda *args: self.data.GetPointData().Modified()
def adder(self, scalars, name, set_active=False, deep=True):
"""Add a point array."""
self.data._add_point_array(scalars, name, set_active=False, deep=deep)
class FieldScalarsDict(_ScalarsDict):
"""Update internal field data when an array is added or removed from the dictionary."""
def __init__(self, data):
"""Initialize the field array dict."""
_ScalarsDict.__init__(self, data)
self.remover = lambda key: self.data._remove_array(FieldAssociation.NONE, key)
self.modifier = lambda *args: self.data.GetFieldData().Modified()
def adder(self, scalars, name, set_active=False, deep=True):
"""Add a field array."""
self.data._add_field_array(scalars, name, deep=deep)
def axis_rotation(points, angle, inplace=False, deg=True, axis='z'):
"""Rotate points angle (in deg) about an axis."""
axis = axis.lower()
# Copy original array to if not inplace
if not inplace:
points = points.copy()
# Convert angle to radians
if deg:
angle *= np.pi / 180
if axis == 'x':
y = points[:, 1] * np.cos(angle) - points[:, 2] * np.sin(angle)
z = points[:, 1] * np.sin(angle) + points[:, 2] * np.cos(angle)
points[:, 1] = y
points[:, 2] = z
elif axis == 'y':
x = points[:, 0] * np.cos(angle) + points[:, 2] * np.sin(angle)
z = - points[:, 0] * np.sin(angle) + points[:, 2] * np.cos(angle)
points[:, 0] = x
points[:, 2] = z
elif axis == 'z':
x = points[:, 0] * np.cos(angle) - points[:, 1] * np.sin(angle)
y = points[:, 0] * np.sin(angle) + points[:, 1] * np.cos(angle)
points[:, 0] = x
points[:, 1] = y
else:
raise Exception('invalid axis. Must be either "x", "y", or "z"')
if not inplace:
return points
| {
"repo_name": "akaszynski/vtkInterface",
"path": "pyvista/core/common.py",
"copies": "1",
"size": "53338",
"license": "mit",
"hash": -5678833343762723000,
"line_mean": 32.4827369743,
"line_max": 147,
"alpha_frac": 0.5759683528,
"autogenerated": false,
"ratio": 4.211781427668983,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5287749780468983,
"avg_score": null,
"num_lines": null
} |
"""Attribute selector plugin.
Oftentimes when testing you will want to select tests based on
criteria rather then simply by filename. For example, you might want
to run all tests except for the slow ones. You can do this with the
Attribute selector plugin by setting attributes on your test methods.
Here is an example:
.. code-block:: python
def test_big_download():
import urllib
# commence slowness...
test_big_download.slow = 1
Once you've assigned an attribute ``slow = 1`` you can exclude that
test and all other tests having the slow attribute by running ::
$ nosetests -a '!slow'
There is also a decorator available for you that will set attributes.
Here's how to set ``slow=1`` like above with the decorator:
.. code-block:: python
from nose.plugins.attrib import attr
@attr('slow')
def test_big_download():
import urllib
# commence slowness...
And here's how to set an attribute with a specific value:
.. code-block:: python
from nose.plugins.attrib import attr
@attr(speed='slow')
def test_big_download():
import urllib
# commence slowness...
This test could be run with ::
$ nosetests -a speed=slow
Below is a reference to the different syntaxes available.
Simple syntax
-------------
Examples of using the ``-a`` and ``--attr`` options:
* ``nosetests -a status=stable``
Only runs tests with attribute "status" having value "stable"
* ``nosetests -a priority=2,status=stable``
Runs tests having both attributes and values
* ``nosetests -a priority=2 -a slow``
Runs tests that match either attribute
* ``nosetests -a tags=http``
If a test's ``tags`` attribute was a list and it contained the value
``http`` then it would be run
* ``nosetests -a slow``
Runs tests with the attribute ``slow`` if its value does not equal False
(False, [], "", etc...)
* ``nosetests -a '!slow'``
Runs tests that do NOT have the attribute ``slow`` or have a ``slow``
attribute that is equal to False
**NOTE**:
if your shell (like bash) interprets '!' as a special character make sure to
put single quotes around it.
Expression Evaluation
---------------------
Examples using the ``-A`` and ``--eval-attr`` options:
* ``nosetests -A "not slow"``
Evaluates the Python expression "not slow" and runs the test if True
* ``nosetests -A "(priority > 5) and not slow"``
Evaluates a complex Python expression and runs the test if True
"""
import logging
import os
import sys
from inspect import isfunction
from nose.plugins.base import Plugin
from nose.util import tolist
log = logging.getLogger('nose.plugins.attrib')
compat_24 = sys.version_info >= (2, 4)
def attr(*args, **kwargs):
"""Decorator that adds attributes to objects
for use with the Attribute (-a) plugin.
"""
def wrap(func):
for name in args:
# these are just True flags:
setattr(func, name, 1)
func.__dict__.update(kwargs)
return func
return wrap
class ContextHelper:
"""Returns default values for dictionary lookups."""
def __init__(self, obj):
self.obj = obj
def __getitem__(self, name):
return self.obj.get(name, False)
class AttributeGetter:
"""Helper for looking up attributes
First we check the method, and if the attribute is not present,
we check the method's class.
"""
missing = object()
def __init__(self, cls, method):
self.cls = cls
self.method = method
def get(self, name, default=None):
log.debug('Get %s from %s.%s', name, self.cls, self.method)
val = self.method.__dict__.get(name, self.missing)
if val is self.missing:
log.debug('No attribute %s in method, getting from class',
name)
val = getattr(self.cls, name, default)
log.debug('Class attribute %s value: %s', name, val)
return val
class AttributeSelector(Plugin):
"""Selects test cases to be run based on their attributes.
"""
def __init__(self):
Plugin.__init__(self)
self.attribs = []
def options(self, parser, env):
"""Register command line options"""
parser.add_option("-a", "--attr",
dest="attr", action="append",
default=env.get('NOSE_ATTR'),
metavar="ATTR",
help="Run only tests that have attributes "
"specified by ATTR [NOSE_ATTR]")
# disable in < 2.4: eval can't take needed args
if compat_24:
parser.add_option("-A", "--eval-attr",
dest="eval_attr", metavar="EXPR", action="append",
default=env.get('NOSE_EVAL_ATTR'),
help="Run only tests for whose attributes "
"the Python expression EXPR evaluates "
"to True [NOSE_EVAL_ATTR]")
def configure(self, options, config):
"""Configure the plugin and system, based on selected options.
attr and eval_attr may each be lists.
self.attribs will be a list of lists of tuples. In that list, each
list is a group of attributes, all of which must match for the rule to
match.
"""
self.attribs = []
# handle python eval-expression parameter
if compat_24 and options.eval_attr:
eval_attr = tolist(options.eval_attr)
for attr in eval_attr:
# "<python expression>"
# -> eval(expr) in attribute context must be True
def eval_in_context(expr, attribs):
return eval(expr, None, ContextHelper(attribs))
self.attribs.append([(attr, eval_in_context)])
# attribute requirements are a comma separated list of
# 'key=value' pairs
if options.attr:
std_attr = tolist(options.attr)
for attr in std_attr:
# all attributes within an attribute group must match
attr_group = []
for attrib in attr.strip().split(","):
# don't die on trailing comma
if not attrib:
continue
items = attrib.split("=", 1)
if len(items) > 1:
# "name=value"
# -> 'str(obj.name) == value' must be True
key, value = items
else:
key = items[0]
if key[0] == "!":
# "!name"
# 'bool(obj.name)' must be False
key = key[1:]
value = False
else:
# "name"
# -> 'bool(obj.name)' must be True
value = True
attr_group.append((key, value))
self.attribs.append(attr_group)
if self.attribs:
self.enabled = True
def validateAttrib(self, attribs):
# TODO: is there a need for case-sensitive value comparison?
# within each group, all must match for the group to match
# if any group matches, then the attribute set as a whole
# has matched
any = False
for group in self.attribs:
match = True
for key, value in group:
obj_value = attribs.get(key)
if callable(value):
if not value(key, attribs):
match = False
break
elif value is True:
# value must exist and be True
if not bool(obj_value):
match = False
break
elif value is False:
# value must not exist or be False
if bool(obj_value):
match = False
break
elif type(obj_value) in (list, tuple):
# value must be found in the list attribute
if not str(value).lower() in [str(x).lower()
for x in obj_value]:
match = False
break
else:
# value must match, convert to string and compare
if (value != obj_value
and str(value).lower() != str(obj_value).lower()):
match = False
break
any = any or match
if any:
# not True because we don't want to FORCE the selection of the
# item, only say that it is acceptable
return None
return False
def wantClass(self, cls):
"""Accept the class if the class or any method is wanted.
"""
cls_attr = cls.__dict__
if self.validateAttrib(cls_attr) is not False:
return None
# Methods in __dict__.values() are functions, oddly enough.
methods = filter(isfunction, cls_attr.values())
wanted = filter(lambda m: m is not False,
map(self.wantFunction, methods))
if wanted:
return None
return False
def wantFunction(self, function):
"""Accept the function if its attributes match.
"""
return self.validateAttrib(function.__dict__)
def wantMethod(self, method):
"""Accept the method if its attributes match.
"""
attribs = AttributeGetter(method.im_class, method)
return self.validateAttrib(attribs)
| {
"repo_name": "yongshengwang/hue",
"path": "build/env/lib/python2.7/site-packages/nose-0.11.3-py2.7.egg/nose/plugins/attrib.py",
"copies": "4",
"size": "9857",
"license": "apache-2.0",
"hash": -5700206111975440000,
"line_mean": 33.2256944444,
"line_max": 80,
"alpha_frac": 0.5426600386,
"autogenerated": false,
"ratio": 4.597481343283582,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7140141381883582,
"avg_score": null,
"num_lines": null
} |
"""Attributes for auto-completion."""
from typing import List
from typing import Optional
from typing import TYPE_CHECKING
from beancount.core.data import Entries
from beancount.core.data import Transaction
from beancount.core.getters import get_active_years as getters_get_active_years
from beancount.core.getters import get_all_links
from beancount.core.getters import get_all_tags
from fava.core.module_base import FavaModule
from fava.util.date import FiscalYearEnd
from fava.util.ranking import ExponentialDecayRanker
if TYPE_CHECKING:
from fava.core import FavaLedger
def get_active_years(entries: Entries, fye: FiscalYearEnd) -> List[str]:
"""Returns active years, with support for fiscal years.
Args:
entries: Beancount entries
fye: fiscal year end
Returns:
A reverse sorted list of years or fiscal years that occur in the
entries.
"""
if fye == (12, 31):
return sorted(
map(str, getters_get_active_years(entries)), reverse=True
)
seen = set()
month = fye.month
day = fye.day
for entry in entries:
date = entry.date
if date.month > month or date.month == month and date.day > day:
seen.add(entry.date.year + 1)
else:
seen.add(entry.date.year)
return [f"FY{year}" for year in sorted(seen, reverse=True)]
class AttributesModule(FavaModule):
"""Some attributes of the ledger (mostly for auto-completion)."""
def __init__(self, ledger: "FavaLedger") -> None:
super().__init__(ledger)
self.accounts: List[str] = []
self.currencies: List[str] = []
self.payees: List[str] = []
self.links: List[str] = []
self.tags: List[str] = []
self.years: List[str] = []
def load_file(self) -> None:
all_entries = self.ledger.all_entries
self.links = get_all_links(all_entries)
self.tags = get_all_tags(all_entries)
self.years = get_active_years(
all_entries, self.ledger.fava_options["fiscal-year-end"]
)
account_ranker = ExponentialDecayRanker(
sorted(self.ledger.accounts.keys())
)
currency_ranker = ExponentialDecayRanker()
payee_ranker = ExponentialDecayRanker()
transactions = self.ledger.all_entries_by_type.Transaction
for txn in transactions:
if txn.payee:
payee_ranker.update(txn.payee, txn.date)
for posting in txn.postings:
account_ranker.update(posting.account, txn.date)
currency_ranker.update(
posting.units.currency, txn.date # type: ignore
)
if posting.cost and posting.cost.currency is not None:
currency_ranker.update(posting.cost.currency, txn.date)
self.accounts = account_ranker.sort()
self.currencies = currency_ranker.sort()
self.payees = payee_ranker.sort()
def payee_accounts(self, payee: str) -> List[str]:
"""Rank accounts for the given payee."""
account_ranker = ExponentialDecayRanker(self.accounts)
transactions = self.ledger.all_entries_by_type.Transaction
for txn in transactions:
if txn.payee == payee:
for posting in txn.postings:
account_ranker.update(posting.account, txn.date)
return account_ranker.sort()
def payee_transaction(self, payee: str) -> Optional[Transaction]:
"""The last transaction for the given payee."""
transactions = self.ledger.all_entries_by_type.Transaction
for txn in reversed(transactions):
if txn.payee == payee:
return txn
return None
| {
"repo_name": "beancount/fava",
"path": "src/fava/core/attributes.py",
"copies": "2",
"size": "3759",
"license": "mit",
"hash": -1591602126895865600,
"line_mean": 34.4622641509,
"line_max": 79,
"alpha_frac": 0.628358606,
"autogenerated": false,
"ratio": 3.7627627627627627,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5391121368762762,
"avg_score": null,
"num_lines": null
} |
"""Attributes for auto-completion."""
from typing import List
from beancount.core import getters
from beancount.core.data import Entries
from beancount.core.data import Transaction
from fava.core.module_base import FavaModule
from fava.util.date import FiscalYearEnd
from fava.util.ranking import ExponentialDecayRanker
def get_active_years(entries: Entries, fye: FiscalYearEnd) -> List[str]:
"""Returns active years, with support for fiscal years.
Args:
entries: Beancount entries
fye: fiscal year end
Returns:
A reverse sorted list of years or fiscal years that occur in the
entries.
"""
if fye == (12, 31):
return sorted(
map(str, getters.get_active_years(entries)), reverse=True
)
seen = set()
month = fye.month
day = fye.day
for entry in entries:
date = entry.date
if date.month > month or date.month == month and date.day > day:
seen.add(entry.date.year + 1)
else:
seen.add(entry.date.year)
return [f"FY{year}" for year in sorted(seen, reverse=True)]
class AttributesModule(FavaModule):
"""Some attributes of the ledger (mostly for auto-completion)."""
def __init__(self, ledger) -> None:
super().__init__(ledger)
self.accounts: List[str] = []
self.currencies: List[str] = []
self.payees: List[str] = []
self.links: List[str] = []
self.tags: List[str] = []
self.years: List[str] = []
def load_file(self) -> None:
all_entries = self.ledger.all_entries
self.links = getters.get_all_links(all_entries)
self.tags = getters.get_all_tags(all_entries)
self.years = get_active_years(
all_entries, self.ledger.fava_options["fiscal-year-end"]
)
account_ranker = ExponentialDecayRanker(
sorted(self.ledger.accounts.keys())
)
currency_ranker = ExponentialDecayRanker()
payee_ranker = ExponentialDecayRanker()
transactions = self.ledger.all_entries_by_type[Transaction]
for txn in transactions:
if txn.payee:
payee_ranker.update(txn.payee, txn.date)
for posting in txn.postings:
account_ranker.update(posting.account, txn.date)
currency_ranker.update(posting.units.currency, txn.date)
if posting.cost:
currency_ranker.update(posting.cost.currency, txn.date)
self.accounts = account_ranker.sort()
self.currencies = currency_ranker.sort()
self.payees = payee_ranker.sort()
def payee_accounts(self, payee: str) -> List[str]:
"""Rank accounts for the given payee."""
account_ranker = ExponentialDecayRanker(self.accounts)
transactions = self.ledger.all_entries_by_type[Transaction]
for txn in transactions:
if txn.payee == payee:
for posting in txn.postings:
account_ranker.update(posting.account, txn.date)
return account_ranker.sort()
def payee_transaction(self, payee):
"""The last transaction for the given payee."""
transactions = self.ledger.all_entries_by_type[Transaction]
for txn in reversed(transactions):
if txn.payee == payee:
return txn
return None
| {
"repo_name": "aumayr/beancount-web",
"path": "src/fava/core/attributes.py",
"copies": "1",
"size": "3383",
"license": "mit",
"hash": -5034807808477338000,
"line_mean": 33.8762886598,
"line_max": 75,
"alpha_frac": 0.6180904523,
"autogenerated": false,
"ratio": 3.767260579064588,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4885351031364588,
"avg_score": null,
"num_lines": null
} |
"""Defines SQLAlchemy's system of class instrumentation..
This module is usually not directly visible to user applications, but
defines a large part of the ORM's interactivity.
SQLA's instrumentation system is completely customizable, in which
case an understanding of the general mechanics of this module is helpful.
An example of full customization is in /examples/custom_attributes.
"""
import operator
from operator import attrgetter, itemgetter
import types
import weakref
from sqlalchemy import util
from sqlalchemy.orm import interfaces, collections, exc
import sqlalchemy.exceptions as sa_exc
# lazy imports
_entity_info = None
identity_equal = None
state = None
PASSIVE_NO_RESULT = util.symbol('PASSIVE_NO_RESULT')
ATTR_WAS_SET = util.symbol('ATTR_WAS_SET')
NO_VALUE = util.symbol('NO_VALUE')
NEVER_SET = util.symbol('NEVER_SET')
# "passive" get settings
# TODO: the True/False values need to be factored out
# of the rest of ORM code
# don't fire off any callables, and don't initialize the attribute to
# an empty value
PASSIVE_NO_INITIALIZE = True #util.symbol('PASSIVE_NO_INITIALIZE')
# don't fire off any callables, but if no callables present
# then initialize to an empty value/collection
# this is used by backrefs.
PASSIVE_NO_FETCH = util.symbol('PASSIVE_NO_FETCH')
# fire callables/initialize as needed
PASSIVE_OFF = False #util.symbol('PASSIVE_OFF')
INSTRUMENTATION_MANAGER = '__sa_instrumentation_manager__'
"""Attribute, elects custom instrumentation when present on a mapped class.
Allows a class to specify a slightly or wildly different technique for
tracking changes made to mapped attributes and collections.
Only one instrumentation implementation is allowed in a given object
inheritance hierarchy.
The value of this attribute must be a callable and will be passed a class
object. The callable must return one of:
- An instance of an interfaces.InstrumentationManager or subclass
- An object implementing all or some of InstrumentationManager (TODO)
- A dictionary of callables, implementing all or some of the above (TODO)
- An instance of a ClassManager or subclass
interfaces.InstrumentationManager is public API and will remain stable
between releases. ClassManager is not public and no guarantees are made
about stability. Caveat emptor.
This attribute is consulted by the default SQLAlchemy instrumentation
resolution code. If custom finders are installed in the global
instrumentation_finders list, they may or may not choose to honor this
attribute.
"""
instrumentation_finders = []
"""An extensible sequence of instrumentation implementation finding callables.
Finders callables will be passed a class object. If None is returned, the
next finder in the sequence is consulted. Otherwise the return must be an
instrumentation factory that follows the same guidelines as
INSTRUMENTATION_MANAGER.
By default, the only finder is find_native_user_instrumentation_hook, which
searches for INSTRUMENTATION_MANAGER. If all finders return None, standard
ClassManager instrumentation is used.
"""
class QueryableAttribute(interfaces.PropComparator):
def __init__(self, key, impl=None, comparator=None, parententity=None):
"""Construct an InstrumentedAttribute.
comparator
a sql.Comparator to which class-level compare/math events will be sent
"""
self.key = key
self.impl = impl
self.comparator = comparator
self.parententity = parententity
def get_history(self, instance, **kwargs):
return self.impl.get_history(instance_state(instance), instance_dict(instance), **kwargs)
def __selectable__(self):
# TODO: conditionally attach this method based on clause_element ?
return self
def __clause_element__(self):
return self.comparator.__clause_element__()
def label(self, name):
return self.__clause_element__().label(name)
def operate(self, op, *other, **kwargs):
return op(self.comparator, *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
return op(other, self.comparator, **kwargs)
def hasparent(self, state, optimistic=False):
return self.impl.hasparent(state, optimistic=optimistic)
def __getattr__(self, key):
try:
return getattr(self.comparator, key)
except AttributeError:
raise AttributeError(
'Neither %r object nor %r object has an attribute %r' % (
type(self).__name__,
type(self.comparator).__name__,
key)
)
def __str__(self):
return repr(self.parententity) + "." + self.property.key
@property
def property(self):
return self.comparator.property
class InstrumentedAttribute(QueryableAttribute):
"""Public-facing descriptor, placed in the mapped class dictionary."""
def __set__(self, instance, value):
self.impl.set(instance_state(instance), instance_dict(instance), value, None)
def __delete__(self, instance):
self.impl.delete(instance_state(instance), instance_dict(instance))
def __get__(self, instance, owner):
if instance is None:
return self
return self.impl.get(instance_state(instance), instance_dict(instance))
class _ProxyImpl(object):
accepts_scalar_loader = False
expire_missing = True
def __init__(self, key):
self.key = key
def proxied_attribute_factory(descriptor):
"""Create an InstrumentedAttribute / user descriptor hybrid.
Returns a new InstrumentedAttribute type that delegates descriptor
behavior and getattr() to the given descriptor.
"""
class Proxy(InstrumentedAttribute):
"""A combination of InsturmentedAttribute and a regular descriptor."""
def __init__(self, key, descriptor, comparator, parententity):
self.key = key
# maintain ProxiedAttribute.user_prop compatability.
self.descriptor = self.user_prop = descriptor
self._comparator = comparator
self._parententity = parententity
self.impl = _ProxyImpl(key)
@util.memoized_property
def comparator(self):
if util.callable(self._comparator):
self._comparator = self._comparator()
return self._comparator
def __get__(self, instance, owner):
"""Delegate __get__ to the original descriptor."""
if instance is None:
descriptor.__get__(instance, owner)
return self
return descriptor.__get__(instance, owner)
def __set__(self, instance, value):
"""Delegate __set__ to the original descriptor."""
return descriptor.__set__(instance, value)
def __delete__(self, instance):
"""Delegate __delete__ to the original descriptor."""
return descriptor.__delete__(instance)
def __getattr__(self, attribute):
"""Delegate __getattr__ to the original descriptor and/or comparator."""
try:
return getattr(descriptor, attribute)
except AttributeError:
try:
return getattr(self._comparator, attribute)
except AttributeError:
raise AttributeError(
'Neither %r object nor %r object has an attribute %r' % (
type(descriptor).__name__,
type(self._comparator).__name__,
attribute)
)
Proxy.__name__ = type(descriptor).__name__ + 'Proxy'
util.monkeypatch_proxied_specials(Proxy, type(descriptor),
name='descriptor',
from_instance=descriptor)
return Proxy
class AttributeImpl(object):
"""internal implementation for instrumented attributes."""
def __init__(self, class_, key,
callable_, trackparent=False, extension=None,
compare_function=None, active_history=False,
parent_token=None, expire_missing=True,
**kwargs):
"""Construct an AttributeImpl.
\class_
associated class
key
string name of the attribute
\callable_
optional function which generates a callable based on a parent
instance, which produces the "default" values for a scalar or
collection attribute when it's first accessed, if not present
already.
trackparent
if True, attempt to track if an instance has a parent attached
to it via this attribute.
extension
a single or list of AttributeExtension object(s) which will
receive set/delete/append/remove/etc. events.
compare_function
a function that compares two values which are normally
assignable to this attribute.
active_history
indicates that get_history() should always return the "old" value,
even if it means executing a lazy callable upon attribute change.
parent_token
Usually references the MapperProperty, used as a key for
the hasparent() function to identify an "owning" attribute.
Allows multiple AttributeImpls to all match a single
owner attribute.
expire_missing
if False, don't add an "expiry" callable to this attribute
during state.expire_attributes(None), if no value is present
for this key.
"""
self.class_ = class_
self.key = key
self.callable_ = callable_
self.trackparent = trackparent
self.parent_token = parent_token or self
if compare_function is None:
self.is_equal = operator.eq
else:
self.is_equal = compare_function
self.extensions = util.to_list(extension or [])
for e in self.extensions:
if e.active_history:
active_history = True
break
self.active_history = active_history
self.expire_missing = expire_missing
def hasparent(self, state, optimistic=False):
"""Return the boolean value of a `hasparent` flag attached to
the given state.
The `optimistic` flag determines what the default return value
should be if no `hasparent` flag can be located.
As this function is used to determine if an instance is an
*orphan*, instances that were loaded from storage should be
assumed to not be orphans, until a True/False value for this
flag is set.
An instance attribute that is loaded by a callable function
will also not have a `hasparent` flag.
"""
return state.parents.get(id(self.parent_token), optimistic)
def sethasparent(self, state, value):
"""Set a boolean flag on the given item corresponding to
whether or not it is attached to a parent object via the
attribute represented by this ``InstrumentedAttribute``.
"""
state.parents[id(self.parent_token)] = value
def set_callable(self, state, callable_):
"""Set a callable function for this attribute on the given object.
This callable will be executed when the attribute is next
accessed, and is assumed to construct part of the instances
previously stored state. When its value or values are loaded,
they will be established as part of the instance's *committed
state*. While *trackparent* information will be assembled for
these instances, attribute-level event handlers will not be
fired.
The callable overrides the class level callable set in the
``InstrumentedAttribute`` constructor.
"""
state.callables[self.key] = callable_
def get_history(self, state, dict_, passive=PASSIVE_OFF):
raise NotImplementedError()
def _get_callable(self, state):
if self.key in state.callables:
return state.callables[self.key]
elif self.callable_ is not None:
return self.callable_(state)
else:
return None
def initialize(self, state, dict_):
"""Initialize the given state's attribute with an empty value."""
dict_[self.key] = None
return None
def get(self, state, dict_, passive=PASSIVE_OFF):
"""Retrieve a value from the given object.
If a callable is assembled on this object's attribute, and
passive is False, the callable will be executed and the
resulting value will be set as the new value for this attribute.
"""
try:
return dict_[self.key]
except KeyError:
# if no history, check for lazy callables, etc.
if state.committed_state.get(self.key, NEVER_SET) is NEVER_SET:
if passive is PASSIVE_NO_INITIALIZE:
return PASSIVE_NO_RESULT
callable_ = self._get_callable(state)
if callable_ is not None:
#if passive is not PASSIVE_OFF:
# return PASSIVE_NO_RESULT
value = callable_(passive=passive)
if value is PASSIVE_NO_RESULT:
return value
elif value is not ATTR_WAS_SET:
return self.set_committed_value(state, dict_, value)
else:
if self.key not in dict_:
return self.get(state, dict_, passive=passive)
return dict_[self.key]
# Return a new, empty value
return self.initialize(state, dict_)
def append(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
self.set(state, dict_, value, initiator, passive=passive)
def remove(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
self.set(state, dict_, None, initiator, passive=passive)
def set(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
raise NotImplementedError()
def get_committed_value(self, state, dict_, passive=PASSIVE_OFF):
"""return the unchanged value of this attribute"""
if self.key in state.committed_state:
if state.committed_state[self.key] is NO_VALUE:
return None
else:
return state.committed_state.get(self.key)
else:
return self.get(state, dict_, passive=passive)
def set_committed_value(self, state, dict_, value):
"""set an attribute value on the given instance and 'commit' it."""
state.commit(dict_, [self.key])
state.callables.pop(self.key, None)
state.dict[self.key] = value
return value
class ScalarAttributeImpl(AttributeImpl):
"""represents a scalar value-holding InstrumentedAttribute."""
accepts_scalar_loader = True
uses_objects = False
def delete(self, state, dict_):
# TODO: catch key errors, convert to attributeerror?
if self.active_history:
old = self.get(state, dict_)
else:
old = dict_.get(self.key, NO_VALUE)
if self.extensions:
self.fire_remove_event(state, dict_, old, None)
state.modified_event(dict_, self, False, old)
del dict_[self.key]
def get_history(self, state, dict_, passive=PASSIVE_OFF):
return History.from_attribute(
self, state, dict_.get(self.key, NO_VALUE))
def set(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
if initiator is self:
return
if self.active_history:
old = self.get(state, dict_)
else:
old = dict_.get(self.key, NO_VALUE)
if self.extensions:
value = self.fire_replace_event(state, dict_, value, old, initiator)
state.modified_event(dict_, self, False, old)
dict_[self.key] = value
def fire_replace_event(self, state, dict_, value, previous, initiator):
for ext in self.extensions:
value = ext.set(state, value, previous, initiator or self)
return value
def fire_remove_event(self, state, dict_, value, initiator):
for ext in self.extensions:
ext.remove(state, value, initiator or self)
@property
def type(self):
self.property.columns[0].type
class MutableScalarAttributeImpl(ScalarAttributeImpl):
"""represents a scalar value-holding InstrumentedAttribute, which can detect
changes within the value itself.
"""
uses_objects = False
def __init__(self, class_, key, callable_,
class_manager, copy_function=None,
compare_function=None, **kwargs):
super(ScalarAttributeImpl, self).__init__(
class_,
key,
callable_,
compare_function=compare_function,
**kwargs)
class_manager.mutable_attributes.add(key)
if copy_function is None:
raise sa_exc.ArgumentError(
"MutableScalarAttributeImpl requires a copy function")
self.copy = copy_function
def get_history(self, state, dict_, passive=PASSIVE_OFF):
if not dict_:
v = state.committed_state.get(self.key, NO_VALUE)
else:
v = dict_.get(self.key, NO_VALUE)
return History.from_attribute(
self, state, v)
def check_mutable_modified(self, state, dict_):
added, \
unchanged, \
deleted = self.get_history(state, dict_, passive=PASSIVE_NO_INITIALIZE)
return bool(added or deleted)
def get(self, state, dict_, passive=PASSIVE_OFF):
if self.key not in state.mutable_dict:
ret = ScalarAttributeImpl.get(self, state, dict_, passive=passive)
if ret is not PASSIVE_NO_RESULT:
state.mutable_dict[self.key] = ret
return ret
else:
return state.mutable_dict[self.key]
def delete(self, state, dict_):
ScalarAttributeImpl.delete(self, state, dict_)
state.mutable_dict.pop(self.key)
def set(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
if initiator is self:
return
if self.extensions:
old = self.get(state, dict_)
value = self.fire_replace_event(state, dict_, value, old, initiator)
state.modified_event(dict_, self, True, NEVER_SET)
dict_[self.key] = value
state.mutable_dict[self.key] = value
class ScalarObjectAttributeImpl(ScalarAttributeImpl):
"""represents a scalar-holding InstrumentedAttribute,
where the target object is also instrumented.
Adds events to delete/set operations.
"""
accepts_scalar_loader = False
uses_objects = True
def __init__(self, class_, key, callable_,
trackparent=False, extension=None, copy_function=None,
compare_function=None, **kwargs):
super(ScalarObjectAttributeImpl, self).__init__(
class_,
key,
callable_,
trackparent=trackparent,
extension=extension,
compare_function=compare_function,
**kwargs)
if compare_function is None:
self.is_equal = identity_equal
def delete(self, state, dict_):
old = self.get(state, dict_)
self.fire_remove_event(state, dict_, old, self)
del dict_[self.key]
def get_history(self, state, dict_, passive=PASSIVE_OFF):
if self.key in dict_:
return History.from_attribute(self, state, dict_[self.key])
else:
current = self.get(state, dict_, passive=passive)
if current is PASSIVE_NO_RESULT:
return HISTORY_BLANK
else:
return History.from_attribute(self, state, current)
def set(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
"""Set a value on the given InstanceState.
`initiator` is the ``InstrumentedAttribute`` that initiated the
``set()`` operation and is used to control the depth of a circular
setter operation.
"""
if initiator is self:
return
if self.active_history:
old = self.get(state, dict_)
else:
old = self.get(state, dict_, passive=PASSIVE_NO_FETCH)
value = self.fire_replace_event(state, dict_, value, old, initiator)
dict_[self.key] = value
def fire_remove_event(self, state, dict_, value, initiator):
if self.trackparent and value is not None:
self.sethasparent(instance_state(value), False)
for ext in self.extensions:
ext.remove(state, value, initiator or self)
state.modified_event(dict_, self, False, value)
def fire_replace_event(self, state, dict_, value, previous, initiator):
if self.trackparent:
if (previous is not value and
previous is not None and
previous is not PASSIVE_NO_RESULT):
self.sethasparent(instance_state(previous), False)
for ext in self.extensions:
value = ext.set(state, value, previous, initiator or self)
state.modified_event(dict_, self, False, previous)
if self.trackparent:
if value is not None:
self.sethasparent(instance_state(value), True)
return value
class CollectionAttributeImpl(AttributeImpl):
"""A collection-holding attribute that instruments changes in membership.
Only handles collections of instrumented objects.
InstrumentedCollectionAttribute holds an arbitrary, user-specified
container object (defaulting to a list) and brokers access to the
CollectionAdapter, a "view" onto that object that presents consistent
bag semantics to the orm layer independent of the user data implementation.
"""
accepts_scalar_loader = False
uses_objects = True
def __init__(self, class_, key, callable_,
typecallable=None, trackparent=False, extension=None,
copy_function=None, compare_function=None, **kwargs):
super(CollectionAttributeImpl, self).__init__(
class_,
key,
callable_,
trackparent=trackparent,
extension=extension,
compare_function=compare_function,
**kwargs)
if copy_function is None:
copy_function = self.__copy
self.copy = copy_function
self.collection_factory = typecallable
def __copy(self, item):
return [y for y in list(collections.collection_adapter(item))]
def get_history(self, state, dict_, passive=PASSIVE_OFF):
current = self.get(state, dict_, passive=passive)
if current is PASSIVE_NO_RESULT:
return HISTORY_BLANK
else:
return History.from_attribute(self, state, current)
def fire_append_event(self, state, dict_, value, initiator):
for ext in self.extensions:
value = ext.append(state, value, initiator or self)
state.modified_event(dict_, self, True, NEVER_SET, passive=PASSIVE_NO_INITIALIZE)
if self.trackparent and value is not None:
self.sethasparent(instance_state(value), True)
return value
def fire_pre_remove_event(self, state, dict_, initiator):
state.modified_event(dict_, self, True, NEVER_SET, passive=PASSIVE_NO_INITIALIZE)
def fire_remove_event(self, state, dict_, value, initiator):
if self.trackparent and value is not None:
self.sethasparent(instance_state(value), False)
for ext in self.extensions:
ext.remove(state, value, initiator or self)
state.modified_event(dict_, self, True, NEVER_SET, passive=PASSIVE_NO_INITIALIZE)
def delete(self, state, dict_):
if self.key not in dict_:
return
state.modified_event(dict_, self, True, NEVER_SET)
collection = self.get_collection(state, state.dict)
collection.clear_with_event()
# TODO: catch key errors, convert to attributeerror?
del dict_[self.key]
def initialize(self, state, dict_):
"""Initialize this attribute with an empty collection."""
_, user_data = self._initialize_collection(state)
dict_[self.key] = user_data
return user_data
def _initialize_collection(self, state):
return state.manager.initialize_collection(
self.key, state, self.collection_factory)
def append(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
if initiator is self:
return
collection = self.get_collection(state, dict_, passive=passive)
if collection is PASSIVE_NO_RESULT:
value = self.fire_append_event(state, dict_, value, initiator)
assert self.key not in dict_, "Collection was loaded during event handling."
state.get_pending(self.key).append(value)
else:
collection.append_with_event(value, initiator)
def remove(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
if initiator is self:
return
collection = self.get_collection(state, state.dict, passive=passive)
if collection is PASSIVE_NO_RESULT:
self.fire_remove_event(state, dict_, value, initiator)
assert self.key not in dict_, "Collection was loaded during event handling."
state.get_pending(self.key).remove(value)
else:
collection.remove_with_event(value, initiator)
def set(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
"""Set a value on the given object.
`initiator` is the ``InstrumentedAttribute`` that initiated the
``set()`` operation and is used to control the depth of a circular
setter operation.
"""
if initiator is self:
return
self._set_iterable(
state, dict_, value,
lambda adapter, i: adapter.adapt_like_to_iterable(i))
def _set_iterable(self, state, dict_, iterable, adapter=None):
"""Set a collection value from an iterable of state-bearers.
``adapter`` is an optional callable invoked with a CollectionAdapter
and the iterable. Should return an iterable of state-bearing
instances suitable for appending via a CollectionAdapter. Can be used
for, e.g., adapting an incoming dictionary into an iterator of values
rather than keys.
"""
# pulling a new collection first so that an adaptation exception does
# not trigger a lazy load of the old collection.
new_collection, user_data = self._initialize_collection(state)
if adapter:
new_values = list(adapter(new_collection, iterable))
else:
new_values = list(iterable)
old = self.get(state, dict_)
# ignore re-assignment of the current collection, as happens
# implicitly with in-place operators (foo.collection |= other)
if old is iterable:
return
state.modified_event(dict_, self, True, old)
old_collection = self.get_collection(state, dict_, old)
dict_[self.key] = user_data
collections.bulk_replace(new_values, old_collection, new_collection)
old_collection.unlink(old)
def set_committed_value(self, state, dict_, value):
"""Set an attribute value on the given instance and 'commit' it."""
collection, user_data = self._initialize_collection(state)
if value:
for item in value:
collection.append_without_event(item)
state.callables.pop(self.key, None)
state.dict[self.key] = user_data
state.commit(dict_, [self.key])
if self.key in state.pending:
# pending items exist. issue a modified event,
# add/remove new items.
state.modified_event(dict_, self, True, user_data)
pending = state.pending.pop(self.key)
added = pending.added_items
removed = pending.deleted_items
for item in added:
collection.append_without_event(item)
for item in removed:
collection.remove_without_event(item)
return user_data
def get_collection(self, state, dict_, user_data=None, passive=PASSIVE_OFF):
"""Retrieve the CollectionAdapter associated with the given state.
Creates a new CollectionAdapter if one does not exist.
"""
if user_data is None:
user_data = self.get(state, dict_, passive=passive)
if user_data is PASSIVE_NO_RESULT:
return user_data
return getattr(user_data, '_sa_adapter')
class GenericBackrefExtension(interfaces.AttributeExtension):
"""An extension which synchronizes a two-way relationship.
A typical two-way relationship is a parent object containing a list of
child objects, where each child object references the parent. The other
are two objects which contain scalar references to each other.
"""
active_history = False
def __init__(self, key):
self.key = key
def set(self, state, child, oldchild, initiator):
if oldchild is child:
return child
if oldchild is not None and oldchild is not PASSIVE_NO_RESULT:
# With lazy=None, there's no guarantee that the full collection is
# present when updating via a backref.
old_state, old_dict = instance_state(oldchild), instance_dict(oldchild)
impl = old_state.get_impl(self.key)
try:
impl.remove(old_state,
old_dict,
state.obj(),
initiator, passive=PASSIVE_NO_FETCH)
except (ValueError, KeyError, IndexError):
pass
if child is not None:
child_state, child_dict = instance_state(child), instance_dict(child)
child_state.get_impl(self.key).append(
child_state,
child_dict,
state.obj(),
initiator, passive=PASSIVE_NO_FETCH)
return child
def append(self, state, child, initiator):
child_state, child_dict = instance_state(child), instance_dict(child)
child_state.get_impl(self.key).append(
child_state,
child_dict,
state.obj(),
initiator, passive=PASSIVE_NO_FETCH)
return child
def remove(self, state, child, initiator):
if child is not None:
child_state, child_dict = instance_state(child), instance_dict(child)
child_state.get_impl(self.key).remove(
child_state,
child_dict,
state.obj(),
initiator, passive=PASSIVE_NO_FETCH)
class Events(object):
def __init__(self):
self.original_init = object.__init__
self.on_init = ()
self.on_init_failure = ()
self.on_load = ()
self.on_resurrect = ()
def run(self, event, *args, **kwargs):
for fn in getattr(self, event):
fn(*args, **kwargs)
def add_listener(self, event, listener):
# not thread safe... problem? mb: nope
bucket = getattr(self, event)
if bucket == ():
setattr(self, event, [listener])
else:
bucket.append(listener)
def remove_listener(self, event, listener):
bucket = getattr(self, event)
bucket.remove(listener)
class ClassManager(dict):
"""tracks state information at the class level."""
MANAGER_ATTR = '_sa_class_manager'
STATE_ATTR = '_sa_instance_state'
event_registry_factory = Events
deferred_scalar_loader = None
def __init__(self, class_):
self.class_ = class_
self.factory = None # where we came from, for inheritance bookkeeping
self.info = {}
self.mapper = None
self.new_init = None
self.mutable_attributes = set()
self.local_attrs = {}
self.originals = {}
for base in class_.__mro__[-2:0:-1]: # reverse, skipping 1st and last
if not isinstance(base, type):
continue
cls_state = manager_of_class(base)
if cls_state:
self.update(cls_state)
self.events = self.event_registry_factory()
self.manage()
self._instrument_init()
def _configure_create_arguments(self,
_source=None,
deferred_scalar_loader=None):
"""Accept extra **kw arguments passed to create_manager_for_cls.
The current contract of ClassManager and other managers is that they
take a single "cls" argument in their constructor (as per
test/orm/instrumentation.py InstrumentationCollisionTest). This
is to provide consistency with the current API of "class manager"
callables and such which may return various ClassManager and
ClassManager-like instances. So create_manager_for_cls sends
in ClassManager-specific arguments via this method once the
non-proxied ClassManager is available.
"""
if _source:
deferred_scalar_loader = _source.deferred_scalar_loader
if deferred_scalar_loader:
self.deferred_scalar_loader = deferred_scalar_loader
def _subclass_manager(self, cls):
"""Create a new ClassManager for a subclass of this ClassManager's class.
This is called automatically when attributes are instrumented so that
the attributes can be propagated to subclasses against their own
class-local manager, without the need for mappers etc. to have already
pre-configured managers for the full class hierarchy. Mappers
can post-configure the auto-generated ClassManager when needed.
"""
manager = manager_of_class(cls)
if manager is None:
manager = _create_manager_for_cls(cls, _source=self)
return manager
def _instrument_init(self):
# TODO: self.class_.__init__ is often the already-instrumented
# __init__ from an instrumented superclass. We still need to make
# our own wrapper, but it would
# be nice to wrap the original __init__ and not our existing wrapper
# of such, since this adds method overhead.
self.events.original_init = self.class_.__init__
self.new_init = _generate_init(self.class_, self)
self.install_member('__init__', self.new_init)
def _uninstrument_init(self):
if self.new_init:
self.uninstall_member('__init__')
self.new_init = None
def _create_instance_state(self, instance):
if self.mutable_attributes:
return state.MutableAttrInstanceState(instance, self)
else:
return state.InstanceState(instance, self)
def manage(self):
"""Mark this instance as the manager for its class."""
setattr(self.class_, self.MANAGER_ATTR, self)
def dispose(self):
"""Dissasociate this manager from its class."""
delattr(self.class_, self.MANAGER_ATTR)
def manager_getter(self):
return attrgetter(self.MANAGER_ATTR)
def instrument_attribute(self, key, inst, propagated=False):
if propagated:
if key in self.local_attrs:
return # don't override local attr with inherited attr
else:
self.local_attrs[key] = inst
self.install_descriptor(key, inst)
self[key] = inst
for cls in self.class_.__subclasses__():
manager = self._subclass_manager(cls)
manager.instrument_attribute(key, inst, True)
def post_configure_attribute(self, key):
pass
def uninstrument_attribute(self, key, propagated=False):
if key not in self:
return
if propagated:
if key in self.local_attrs:
return # don't get rid of local attr
else:
del self.local_attrs[key]
self.uninstall_descriptor(key)
del self[key]
if key in self.mutable_attributes:
self.mutable_attributes.remove(key)
for cls in self.class_.__subclasses__():
manager = self._subclass_manager(cls)
manager.uninstrument_attribute(key, True)
def unregister(self):
"""remove all instrumentation established by this ClassManager."""
self._uninstrument_init()
self.mapper = self.events = None
self.info.clear()
for key in list(self):
if key in self.local_attrs:
self.uninstrument_attribute(key)
def install_descriptor(self, key, inst):
if key in (self.STATE_ATTR, self.MANAGER_ATTR):
raise KeyError("%r: requested attribute name conflicts with "
"instrumentation attribute of the same name." % key)
setattr(self.class_, key, inst)
def uninstall_descriptor(self, key):
delattr(self.class_, key)
def install_member(self, key, implementation):
if key in (self.STATE_ATTR, self.MANAGER_ATTR):
raise KeyError("%r: requested attribute name conflicts with "
"instrumentation attribute of the same name." % key)
self.originals.setdefault(key, getattr(self.class_, key, None))
setattr(self.class_, key, implementation)
def uninstall_member(self, key):
original = self.originals.pop(key, None)
if original is not None:
setattr(self.class_, key, original)
def instrument_collection_class(self, key, collection_class):
return collections.prepare_instrumentation(collection_class)
def initialize_collection(self, key, state, factory):
user_data = factory()
adapter = collections.CollectionAdapter(
self.get_impl(key), state, user_data)
return adapter, user_data
def is_instrumented(self, key, search=False):
if search:
return key in self
else:
return key in self.local_attrs
def get_impl(self, key):
return self[key].impl
@property
def attributes(self):
return self.itervalues()
## InstanceState management
def new_instance(self, state=None):
instance = self.class_.__new__(self.class_)
setattr(instance, self.STATE_ATTR, state or self._create_instance_state(instance))
return instance
def setup_instance(self, instance, state=None):
setattr(instance, self.STATE_ATTR, state or self._create_instance_state(instance))
def teardown_instance(self, instance):
delattr(instance, self.STATE_ATTR)
def _new_state_if_none(self, instance):
"""Install a default InstanceState if none is present.
A private convenience method used by the __init__ decorator.
"""
if hasattr(instance, self.STATE_ATTR):
return False
else:
state = self._create_instance_state(instance)
setattr(instance, self.STATE_ATTR, state)
return state
def state_getter(self):
"""Return a (instance) -> InstanceState callable.
"state getter" callables should raise either KeyError or
AttributeError if no InstanceState could be found for the
instance.
"""
return attrgetter(self.STATE_ATTR)
def dict_getter(self):
return attrgetter('__dict__')
def has_state(self, instance):
return hasattr(instance, self.STATE_ATTR)
def has_parent(self, state, key, optimistic=False):
"""TODO"""
return self.get_impl(key).hasparent(state, optimistic=optimistic)
def __nonzero__(self):
"""All ClassManagers are non-zero regardless of attribute state."""
return True
def __repr__(self):
return '<%s of %r at %x>' % (
self.__class__.__name__, self.class_, id(self))
class _ClassInstrumentationAdapter(ClassManager):
"""Adapts a user-defined InstrumentationManager to a ClassManager."""
def __init__(self, class_, override, **kw):
self._adapted = override
self._get_state = self._adapted.state_getter(class_)
self._get_dict = self._adapted.dict_getter(class_)
ClassManager.__init__(self, class_, **kw)
def manage(self):
self._adapted.manage(self.class_, self)
def dispose(self):
self._adapted.dispose(self.class_)
def manager_getter(self):
return self._adapted.manager_getter(self.class_)
def instrument_attribute(self, key, inst, propagated=False):
ClassManager.instrument_attribute(self, key, inst, propagated)
if not propagated:
self._adapted.instrument_attribute(self.class_, key, inst)
def post_configure_attribute(self, key):
self._adapted.post_configure_attribute(self.class_, key, self[key])
def install_descriptor(self, key, inst):
self._adapted.install_descriptor(self.class_, key, inst)
def uninstall_descriptor(self, key):
self._adapted.uninstall_descriptor(self.class_, key)
def install_member(self, key, implementation):
self._adapted.install_member(self.class_, key, implementation)
def uninstall_member(self, key):
self._adapted.uninstall_member(self.class_, key)
def instrument_collection_class(self, key, collection_class):
return self._adapted.instrument_collection_class(
self.class_, key, collection_class)
def initialize_collection(self, key, state, factory):
delegate = getattr(self._adapted, 'initialize_collection', None)
if delegate:
return delegate(key, state, factory)
else:
return ClassManager.initialize_collection(self, key, state, factory)
def new_instance(self, state=None):
instance = self.class_.__new__(self.class_)
self.setup_instance(instance, state)
return instance
def _new_state_if_none(self, instance):
"""Install a default InstanceState if none is present.
A private convenience method used by the __init__ decorator.
"""
if self.has_state(instance):
return False
else:
return self.setup_instance(instance)
def setup_instance(self, instance, state=None):
self._adapted.initialize_instance_dict(self.class_, instance)
if state is None:
state = self._create_instance_state(instance)
# the given instance is assumed to have no state
self._adapted.install_state(self.class_, instance, state)
return state
def teardown_instance(self, instance):
self._adapted.remove_state(self.class_, instance)
def has_state(self, instance):
try:
state = self._get_state(instance)
except exc.NO_STATE:
return False
else:
return True
def state_getter(self):
return self._get_state
def dict_getter(self):
return self._get_dict
class History(tuple):
"""A 3-tuple of added, unchanged and deleted values.
Each tuple member is an iterable sequence.
"""
__slots__ = ()
added = property(itemgetter(0))
unchanged = property(itemgetter(1))
deleted = property(itemgetter(2))
def __new__(cls, added, unchanged, deleted):
return tuple.__new__(cls, (added, unchanged, deleted))
def __nonzero__(self):
return self != HISTORY_BLANK
def sum(self):
return (self.added or []) +\
(self.unchanged or []) +\
(self.deleted or [])
def non_deleted(self):
return (self.added or []) +\
(self.unchanged or [])
def non_added(self):
return (self.unchanged or []) +\
(self.deleted or [])
def has_changes(self):
return bool(self.added or self.deleted)
def as_state(self):
return History(
[(c is not None and c is not PASSIVE_NO_RESULT)
and instance_state(c) or None
for c in self.added],
[(c is not None and c is not PASSIVE_NO_RESULT)
and instance_state(c) or None
for c in self.unchanged],
[(c is not None and c is not PASSIVE_NO_RESULT)
and instance_state(c) or None
for c in self.deleted],
)
@classmethod
def from_attribute(cls, attribute, state, current):
original = state.committed_state.get(attribute.key, NEVER_SET)
if hasattr(attribute, 'get_collection'):
current = attribute.get_collection(state, state.dict, current)
if original is NO_VALUE:
return cls(list(current), (), ())
elif original is NEVER_SET:
return cls((), list(current), ())
else:
current_set = util.IdentitySet(current)
original_set = util.IdentitySet(original)
# ensure duplicates are maintained
return cls(
[x for x in current if x not in original_set],
[x for x in current if x in original_set],
[x for x in original if x not in current_set]
)
else:
if current is NO_VALUE:
if (original is not None and
original is not NEVER_SET and
original is not NO_VALUE):
deleted = [original]
else:
deleted = ()
return cls((), (), deleted)
elif original is NO_VALUE:
return cls([current], (), ())
elif (original is NEVER_SET or
attribute.is_equal(current, original) is True):
# dont let ClauseElement expressions here trip things up
return cls((), [current], ())
else:
if original is not None:
deleted = [original]
else:
deleted = ()
return cls([current], (), deleted)
HISTORY_BLANK = History(None, None, None)
def get_history(obj, key, **kwargs):
"""Return a History record for the given object and attribute key.
obj is an instrumented object instance. An InstanceState
is accepted directly for backwards compatibility but
this usage is deprecated.
"""
return get_state_history(instance_state(obj), key, **kwargs)
def get_state_history(state, key, **kwargs):
return state.get_history(key, **kwargs)
def has_parent(cls, obj, key, optimistic=False):
"""TODO"""
manager = manager_of_class(cls)
state = instance_state(obj)
return manager.has_parent(state, key, optimistic)
def register_class(class_, **kw):
"""Register class instrumentation.
Returns the existing or newly created class manager.
"""
manager = manager_of_class(class_)
if manager is None:
manager = _create_manager_for_cls(class_, **kw)
return manager
def unregister_class(class_):
"""Unregister class instrumentation."""
instrumentation_registry.unregister(class_)
def register_attribute(class_, key, **kw):
proxy_property = kw.pop('proxy_property', None)
comparator = kw.pop('comparator', None)
parententity = kw.pop('parententity', None)
register_descriptor(class_, key, proxy_property, comparator, parententity)
if not proxy_property:
register_attribute_impl(class_, key, **kw)
def register_attribute_impl(class_, key,
uselist=False, callable_=None,
useobject=False, mutable_scalars=False,
impl_class=None, **kw):
manager = manager_of_class(class_)
if uselist:
factory = kw.pop('typecallable', None)
typecallable = manager.instrument_collection_class(
key, factory or list)
else:
typecallable = kw.pop('typecallable', None)
if impl_class:
impl = impl_class(class_, key, typecallable, **kw)
elif uselist:
impl = CollectionAttributeImpl(class_, key, callable_,
typecallable=typecallable, **kw)
elif useobject:
impl = ScalarObjectAttributeImpl(class_, key, callable_, **kw)
elif mutable_scalars:
impl = MutableScalarAttributeImpl(class_, key, callable_,
class_manager=manager, **kw)
else:
impl = ScalarAttributeImpl(class_, key, callable_, **kw)
manager[key].impl = impl
manager.post_configure_attribute(key)
def register_descriptor(class_, key, proxy_property=None, comparator=None, parententity=None, property_=None):
manager = manager_of_class(class_)
if proxy_property:
proxy_type = proxied_attribute_factory(proxy_property)
descriptor = proxy_type(key, proxy_property, comparator, parententity)
else:
descriptor = InstrumentedAttribute(key, comparator=comparator, parententity=parententity)
manager.instrument_attribute(key, descriptor)
def unregister_attribute(class_, key):
manager_of_class(class_).uninstrument_attribute(key)
def init_collection(obj, key):
"""Initialize a collection attribute and return the collection adapter.
This function is used to provide direct access to collection internals
for a previously unloaded attribute. e.g.::
collection_adapter = init_collection(someobject, 'elements')
for elem in values:
collection_adapter.append_without_event(elem)
For an easier way to do the above, see :func:`~sqlalchemy.orm.attributes.set_committed_value`.
obj is an instrumented object instance. An InstanceState
is accepted directly for backwards compatibility but
this usage is deprecated.
"""
state = instance_state(obj)
dict_ = state.dict
return init_state_collection(state, dict_, key)
def init_state_collection(state, dict_, key):
"""Initialize a collection attribute and return the collection adapter."""
attr = state.get_impl(key)
user_data = attr.initialize(state, dict_)
return attr.get_collection(state, dict_, user_data)
def set_committed_value(instance, key, value):
"""Set the value of an attribute with no history events.
Cancels any previous history present. The value should be
a scalar value for scalar-holding attributes, or
an iterable for any collection-holding attribute.
This is the same underlying method used when a lazy loader
fires off and loads additional data from the database.
In particular, this method can be used by application code
which has loaded additional attributes or collections through
separate queries, which can then be attached to an instance
as though it were part of its original loaded state.
"""
state, dict_ = instance_state(instance), instance_dict(instance)
state.get_impl(key).set_committed_value(state, dict_, value)
def set_attribute(instance, key, value):
"""Set the value of an attribute, firing history events.
This function may be used regardless of instrumentation
applied directly to the class, i.e. no descriptors are required.
Custom attribute management schemes will need to make usage
of this method to establish attribute state as understood
by SQLAlchemy.
"""
state, dict_ = instance_state(instance), instance_dict(instance)
state.get_impl(key).set(state, dict_, value, None)
def get_attribute(instance, key):
"""Get the value of an attribute, firing any callables required.
This function may be used regardless of instrumentation
applied directly to the class, i.e. no descriptors are required.
Custom attribute management schemes will need to make usage
of this method to make usage of attribute state as understood
by SQLAlchemy.
"""
state, dict_ = instance_state(instance), instance_dict(instance)
return state.get_impl(key).get(state, dict_)
def del_attribute(instance, key):
"""Delete the value of an attribute, firing history events.
This function may be used regardless of instrumentation
applied directly to the class, i.e. no descriptors are required.
Custom attribute management schemes will need to make usage
of this method to establish attribute state as understood
by SQLAlchemy.
"""
state, dict_ = instance_state(instance), instance_dict(instance)
state.get_impl(key).delete(state, dict_)
def is_instrumented(instance, key):
"""Return True if the given attribute on the given instance is instrumented
by the attributes package.
This function may be used regardless of instrumentation
applied directly to the class, i.e. no descriptors are required.
"""
return manager_of_class(instance.__class__).is_instrumented(key, search=True)
class InstrumentationRegistry(object):
"""Private instrumentation registration singleton.
All classes are routed through this registry
when first instrumented, however the InstrumentationRegistry
is not actually needed unless custom ClassManagers are in use.
"""
_manager_finders = weakref.WeakKeyDictionary()
_state_finders = util.WeakIdentityMapping()
_dict_finders = util.WeakIdentityMapping()
_extended = False
def create_manager_for_cls(self, class_, **kw):
assert class_ is not None
assert manager_of_class(class_) is None
for finder in instrumentation_finders:
factory = finder(class_)
if factory is not None:
break
else:
factory = ClassManager
existing_factories = self._collect_management_factories_for(class_).\
difference([factory])
if existing_factories:
raise TypeError(
"multiple instrumentation implementations specified "
"in %s inheritance hierarchy: %r" % (
class_.__name__, list(existing_factories)))
manager = factory(class_)
if not isinstance(manager, ClassManager):
manager = _ClassInstrumentationAdapter(class_, manager)
if factory != ClassManager and not self._extended:
# somebody invoked a custom ClassManager.
# reinstall global "getter" functions with the more
# expensive ones.
self._extended = True
_install_lookup_strategy(self)
manager._configure_create_arguments(**kw)
manager.factory = factory
self._manager_finders[class_] = manager.manager_getter()
self._state_finders[class_] = manager.state_getter()
self._dict_finders[class_] = manager.dict_getter()
return manager
def _collect_management_factories_for(self, cls):
"""Return a collection of factories in play or specified for a hierarchy.
Traverses the entire inheritance graph of a cls and returns a collection
of instrumentation factories for those classes. Factories are extracted
from active ClassManagers, if available, otherwise
instrumentation_finders is consulted.
"""
hierarchy = util.class_hierarchy(cls)
factories = set()
for member in hierarchy:
manager = manager_of_class(member)
if manager is not None:
factories.add(manager.factory)
else:
for finder in instrumentation_finders:
factory = finder(member)
if factory is not None:
break
else:
factory = None
factories.add(factory)
factories.discard(None)
return factories
def manager_of_class(self, cls):
# this is only called when alternate instrumentation has been established
if cls is None:
return None
try:
finder = self._manager_finders[cls]
except KeyError:
return None
else:
return finder(cls)
def state_of(self, instance):
# this is only called when alternate instrumentation has been established
if instance is None:
raise AttributeError("None has no persistent state.")
try:
return self._state_finders[instance.__class__](instance)
except KeyError:
raise AttributeError("%r is not instrumented" % instance.__class__)
def dict_of(self, instance):
# this is only called when alternate instrumentation has been established
if instance is None:
raise AttributeError("None has no persistent state.")
try:
return self._dict_finders[instance.__class__](instance)
except KeyError:
raise AttributeError("%r is not instrumented" % instance.__class__)
def unregister(self, class_):
if class_ in self._manager_finders:
manager = self.manager_of_class(class_)
manager.unregister()
manager.dispose()
del self._manager_finders[class_]
del self._state_finders[class_]
del self._dict_finders[class_]
if ClassManager.MANAGER_ATTR in class_.__dict__:
delattr(class_, ClassManager.MANAGER_ATTR)
instrumentation_registry = InstrumentationRegistry()
def _install_lookup_strategy(implementation):
"""Replace global class/object management functions
with either faster or more comprehensive implementations,
based on whether or not extended class instrumentation
has been detected.
This function is called only by InstrumentationRegistry()
and unit tests specific to this behavior.
"""
global instance_state, instance_dict, manager_of_class
if implementation is util.symbol('native'):
instance_state = attrgetter(ClassManager.STATE_ATTR)
instance_dict = attrgetter("__dict__")
def manager_of_class(cls):
return cls.__dict__.get(ClassManager.MANAGER_ATTR, None)
else:
instance_state = instrumentation_registry.state_of
instance_dict = instrumentation_registry.dict_of
manager_of_class = instrumentation_registry.manager_of_class
_create_manager_for_cls = instrumentation_registry.create_manager_for_cls
# Install default "lookup" strategies. These are basically
# very fast attrgetters for key attributes.
# When a custom ClassManager is installed, more expensive per-class
# strategies are copied over these.
_install_lookup_strategy(util.symbol('native'))
def find_native_user_instrumentation_hook(cls):
"""Find user-specified instrumentation management for a class."""
return getattr(cls, INSTRUMENTATION_MANAGER, None)
instrumentation_finders.append(find_native_user_instrumentation_hook)
def _generate_init(class_, class_manager):
"""Build an __init__ decorator that triggers ClassManager events."""
# TODO: we should use the ClassManager's notion of the
# original '__init__' method, once ClassManager is fixed
# to always reference that.
original__init__ = class_.__init__
assert original__init__
# Go through some effort here and don't change the user's __init__
# calling signature.
# FIXME: need to juggle local names to avoid constructor argument
# clashes.
func_body = """\
def __init__(%(apply_pos)s):
new_state = class_manager._new_state_if_none(%(self_arg)s)
if new_state:
return new_state.initialize_instance(%(apply_kw)s)
else:
return original__init__(%(apply_kw)s)
"""
func_vars = util.format_argspec_init(original__init__, grouped=False)
func_text = func_body % func_vars
# Py3K
#func_defaults = getattr(original__init__, '__defaults__', None)
# Py2K
func = getattr(original__init__, 'im_func', original__init__)
func_defaults = getattr(func, 'func_defaults', None)
# end Py2K
env = locals().copy()
exec func_text in env
__init__ = env['__init__']
__init__.__doc__ = original__init__.__doc__
if func_defaults:
__init__.func_defaults = func_defaults
return __init__
| {
"repo_name": "obeattie/sqlalchemy",
"path": "lib/sqlalchemy/orm/attributes.py",
"copies": "1",
"size": "62613",
"license": "mit",
"hash": -5276984382939762000,
"line_mean": 35.7016412661,
"line_max": 110,
"alpha_frac": 0.6108156453,
"autogenerated": false,
"ratio": 4.519815202483216,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009589073311753543,
"num_lines": 1706
} |
import operator, weakref
from itertools import chain
import UserDict
from sqlalchemy import util
from sqlalchemy.orm import interfaces, collections
from sqlalchemy.orm.util import identity_equal
from sqlalchemy import exceptions
PASSIVE_NORESULT = util.symbol('PASSIVE_NORESULT')
ATTR_WAS_SET = util.symbol('ATTR_WAS_SET')
NO_VALUE = util.symbol('NO_VALUE')
NEVER_SET = util.symbol('NEVER_SET')
class InstrumentedAttribute(interfaces.PropComparator):
"""public-facing instrumented attribute, placed in the
class dictionary.
"""
def __init__(self, impl, comparator=None):
"""Construct an InstrumentedAttribute.
comparator
a sql.Comparator to which class-level compare/math events will be sent
"""
self.impl = impl
self.comparator = comparator
def __set__(self, instance, value):
self.impl.set(instance._state, value, None)
def __delete__(self, instance):
self.impl.delete(instance._state)
def __get__(self, instance, owner):
if instance is None:
return self
return self.impl.get(instance._state)
def get_history(self, instance, **kwargs):
return self.impl.get_history(instance._state, **kwargs)
def clause_element(self):
return self.comparator.clause_element()
def expression_element(self):
return self.comparator.expression_element()
def operate(self, op, *other, **kwargs):
return op(self.comparator, *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
return op(other, self.comparator, **kwargs)
def hasparent(self, instance, optimistic=False):
return self.impl.hasparent(instance._state, optimistic=optimistic)
def _property(self):
from sqlalchemy.orm.mapper import class_mapper
return class_mapper(self.impl.class_).get_property(self.impl.key)
property = property(_property, doc="the MapperProperty object associated with this attribute")
class ProxiedAttribute(InstrumentedAttribute):
"""Adds InstrumentedAttribute class-level behavior to a regular descriptor.
Obsoleted by proxied_attribute_factory.
"""
class ProxyImpl(object):
accepts_scalar_loader = False
def __init__(self, key):
self.key = key
def __init__(self, key, user_prop, comparator=None):
self.user_prop = user_prop
self._comparator = comparator
self.key = key
self.impl = ProxiedAttribute.ProxyImpl(key)
def comparator(self):
if callable(self._comparator):
self._comparator = self._comparator()
return self._comparator
comparator = property(comparator)
def __get__(self, instance, owner):
if instance is None:
self.user_prop.__get__(instance, owner)
return self
return self.user_prop.__get__(instance, owner)
def __set__(self, instance, value):
return self.user_prop.__set__(instance, value)
def __delete__(self, instance):
return self.user_prop.__delete__(instance)
def proxied_attribute_factory(descriptor):
"""Create an InstrumentedAttribute / user descriptor hybrid.
Returns a new InstrumentedAttribute type that delegates descriptor
behavior and getattr() to the given descriptor.
"""
class ProxyImpl(object):
accepts_scalar_loader = False
def __init__(self, key):
self.key = key
class Proxy(InstrumentedAttribute):
"""A combination of InsturmentedAttribute and a regular descriptor."""
def __init__(self, key, descriptor, comparator):
self.key = key
# maintain ProxiedAttribute.user_prop compatability.
self.descriptor = self.user_prop = descriptor
self._comparator = comparator
self.impl = ProxyImpl(key)
def comparator(self):
if callable(self._comparator):
self._comparator = self._comparator()
return self._comparator
comparator = property(comparator)
def __get__(self, instance, owner):
"""Delegate __get__ to the original descriptor."""
if instance is None:
descriptor.__get__(instance, owner)
return self
return descriptor.__get__(instance, owner)
def __set__(self, instance, value):
"""Delegate __set__ to the original descriptor."""
return descriptor.__set__(instance, value)
def __delete__(self, instance):
"""Delegate __delete__ to the original descriptor."""
return descriptor.__delete__(instance)
def __getattr__(self, attribute):
"""Delegate __getattr__ to the original descriptor."""
return getattr(descriptor, attribute)
Proxy.__name__ = type(descriptor).__name__ + 'Proxy'
util.monkeypatch_proxied_specials(Proxy, type(descriptor),
name='descriptor',
from_instance=descriptor)
return Proxy
class AttributeImpl(object):
"""internal implementation for instrumented attributes."""
def __init__(self, class_, key, callable_, trackparent=False, extension=None, compare_function=None, **kwargs):
"""Construct an AttributeImpl.
class_
the class to be instrumented.
key
string name of the attribute
callable_
optional function which generates a callable based on a parent
instance, which produces the "default" values for a scalar or
collection attribute when it's first accessed, if not present
already.
trackparent
if True, attempt to track if an instance has a parent attached
to it via this attribute.
extension
an AttributeExtension object which will receive
set/delete/append/remove/etc. events.
compare_function
a function that compares two values which are normally
assignable to this attribute.
"""
self.class_ = class_
self.key = key
self.callable_ = callable_
self.trackparent = trackparent
if compare_function is None:
self.is_equal = operator.eq
else:
self.is_equal = compare_function
self.extensions = util.to_list(extension or [])
def hasparent(self, state, optimistic=False):
"""Return the boolean value of a `hasparent` flag attached to the given item.
The `optimistic` flag determines what the default return value
should be if no `hasparent` flag can be located.
As this function is used to determine if an instance is an
*orphan*, instances that were loaded from storage should be
assumed to not be orphans, until a True/False value for this
flag is set.
An instance attribute that is loaded by a callable function
will also not have a `hasparent` flag.
"""
return state.parents.get(id(self), optimistic)
def sethasparent(self, state, value):
"""Set a boolean flag on the given item corresponding to
whether or not it is attached to a parent object via the
attribute represented by this ``InstrumentedAttribute``.
"""
state.parents[id(self)] = value
def set_callable(self, state, callable_):
"""Set a callable function for this attribute on the given object.
This callable will be executed when the attribute is next
accessed, and is assumed to construct part of the instances
previously stored state. When its value or values are loaded,
they will be established as part of the instance's *committed
state*. While *trackparent* information will be assembled for
these instances, attribute-level event handlers will not be
fired.
The callable overrides the class level callable set in the
``InstrumentedAttribute` constructor.
"""
if callable_ is None:
self.initialize(state)
else:
state.callables[self.key] = callable_
def get_history(self, state, passive=False):
raise NotImplementedError()
def _get_callable(self, state):
if self.key in state.callables:
return state.callables[self.key]
elif self.callable_ is not None:
return self.callable_(state.obj())
else:
return None
def initialize(self, state):
"""Initialize this attribute on the given object instance with an empty value."""
state.dict[self.key] = None
return None
def get(self, state, passive=False):
"""Retrieve a value from the given object.
If a callable is assembled on this object's attribute, and
passive is False, the callable will be executed and the
resulting value will be set as the new value for this attribute.
"""
try:
return state.dict[self.key]
except KeyError:
# if no history, check for lazy callables, etc.
if self.key not in state.committed_state:
callable_ = self._get_callable(state)
if callable_ is not None:
if passive:
return PASSIVE_NORESULT
value = callable_()
if value is not ATTR_WAS_SET:
return self.set_committed_value(state, value)
else:
if self.key not in state.dict:
return self.get(state, passive=passive)
return state.dict[self.key]
# Return a new, empty value
return self.initialize(state)
def append(self, state, value, initiator, passive=False):
self.set(state, value, initiator)
def remove(self, state, value, initiator, passive=False):
self.set(state, None, initiator)
def set(self, state, value, initiator):
raise NotImplementedError()
def get_committed_value(self, state):
"""return the unchanged value of this attribute"""
if self.key in state.committed_state:
if state.committed_state[self.key] is NO_VALUE:
return None
else:
return state.committed_state.get(self.key)
else:
return self.get(state)
def set_committed_value(self, state, value):
"""set an attribute value on the given instance and 'commit' it."""
state.commit_attr(self, value)
return value
class ScalarAttributeImpl(AttributeImpl):
"""represents a scalar value-holding InstrumentedAttribute."""
accepts_scalar_loader = True
def delete(self, state):
if self.key not in state.committed_state:
state.committed_state[self.key] = state.dict.get(self.key, NO_VALUE)
# TODO: catch key errors, convert to attributeerror?
del state.dict[self.key]
state.modified=True
def get_history(self, state, passive=False):
return _create_history(self, state, state.dict.get(self.key, NO_VALUE))
def set(self, state, value, initiator):
if initiator is self:
return
if self.key not in state.committed_state:
state.committed_state[self.key] = state.dict.get(self.key, NO_VALUE)
state.dict[self.key] = value
state.modified=True
def type(self):
self.property.columns[0].type
type = property(type)
class MutableScalarAttributeImpl(ScalarAttributeImpl):
"""represents a scalar value-holding InstrumentedAttribute, which can detect
changes within the value itself.
"""
def __init__(self, class_, key, callable_, copy_function=None, compare_function=None, **kwargs):
super(ScalarAttributeImpl, self).__init__(class_, key, callable_, compare_function=compare_function, **kwargs)
class_._class_state.has_mutable_scalars = True
if copy_function is None:
raise exceptions.ArgumentError("MutableScalarAttributeImpl requires a copy function")
self.copy = copy_function
def get_history(self, state, passive=False):
return _create_history(self, state, state.dict.get(self.key, NO_VALUE))
def commit_to_state(self, state, value):
state.committed_state[self.key] = self.copy(value)
def check_mutable_modified(self, state):
(added, unchanged, deleted) = self.get_history(state, passive=True)
if added or deleted:
state.modified = True
return True
else:
return False
def set(self, state, value, initiator):
if initiator is self:
return
if self.key not in state.committed_state:
if self.key in state.dict:
state.committed_state[self.key] = self.copy(state.dict[self.key])
else:
state.committed_state[self.key] = NO_VALUE
state.dict[self.key] = value
state.modified=True
class ScalarObjectAttributeImpl(ScalarAttributeImpl):
"""represents a scalar-holding InstrumentedAttribute, where the target object is also instrumented.
Adds events to delete/set operations.
"""
accepts_scalar_loader = False
def __init__(self, class_, key, callable_, trackparent=False, extension=None, copy_function=None, compare_function=None, **kwargs):
super(ScalarObjectAttributeImpl, self).__init__(class_, key,
callable_, trackparent=trackparent, extension=extension,
compare_function=compare_function, **kwargs)
if compare_function is None:
self.is_equal = identity_equal
def delete(self, state):
old = self.get(state)
# TODO: catch key errors, convert to attributeerror?
del state.dict[self.key]
self.fire_remove_event(state, old, self)
def get_history(self, state, passive=False):
if self.key in state.dict:
return _create_history(self, state, state.dict[self.key])
else:
current = self.get(state, passive=passive)
if current is PASSIVE_NORESULT:
return (None, None, None)
else:
return _create_history(self, state, current)
def set(self, state, value, initiator):
"""Set a value on the given InstanceState.
`initiator` is the ``InstrumentedAttribute`` that initiated the
``set()` operation and is used to control the depth of a circular
setter operation.
"""
if initiator is self:
return
if value is not None and not hasattr(value, '_state'):
raise TypeError("Can not assign %s instance to %s's %r attribute, "
"a mapped instance was expected." % (
type(value).__name__, type(state.obj()).__name__, self.key))
# TODO: add options to allow the get() to be passive
old = self.get(state)
state.dict[self.key] = value
self.fire_replace_event(state, value, old, initiator)
def fire_remove_event(self, state, value, initiator):
if self.key not in state.committed_state:
state.committed_state[self.key] = value
state.modified = True
if self.trackparent and value is not None:
self.sethasparent(value._state, False)
instance = state.obj()
for ext in self.extensions:
ext.remove(instance, value, initiator or self)
def fire_replace_event(self, state, value, previous, initiator):
if self.key not in state.committed_state:
state.committed_state[self.key] = previous
state.modified = True
if self.trackparent:
if value is not None:
self.sethasparent(value._state, True)
if previous is not value and previous is not None:
self.sethasparent(previous._state, False)
instance = state.obj()
for ext in self.extensions:
ext.set(instance, value, previous, initiator or self)
class CollectionAttributeImpl(AttributeImpl):
"""A collection-holding attribute that instruments changes in membership.
Only handles collections of instrumented objects.
InstrumentedCollectionAttribute holds an arbitrary, user-specified
container object (defaulting to a list) and brokers access to the
CollectionAdapter, a "view" onto that object that presents consistent
bag semantics to the orm layer independent of the user data implementation.
"""
accepts_scalar_loader = False
def __init__(self, class_, key, callable_, typecallable=None, trackparent=False, extension=None, copy_function=None, compare_function=None, **kwargs):
super(CollectionAttributeImpl, self).__init__(class_,
key, callable_, trackparent=trackparent, extension=extension,
compare_function=compare_function, **kwargs)
if copy_function is None:
copy_function = self.__copy
self.copy = copy_function
if typecallable is None:
typecallable = list
self.collection_factory = \
collections._prepare_instrumentation(typecallable)
# may be removed in 0.5:
self.collection_interface = \
util.duck_type_collection(self.collection_factory())
def __copy(self, item):
return [y for y in list(collections.collection_adapter(item))]
def get_history(self, state, passive=False):
current = self.get(state, passive=passive)
if current is PASSIVE_NORESULT:
return (None, None, None)
else:
return _create_history(self, state, current)
def fire_append_event(self, state, value, initiator):
if self.key not in state.committed_state and self.key in state.dict:
state.committed_state[self.key] = self.copy(state.dict[self.key])
state.modified = True
if self.trackparent and value is not None:
self.sethasparent(value._state, True)
instance = state.obj()
for ext in self.extensions:
ext.append(instance, value, initiator or self)
def fire_pre_remove_event(self, state, initiator):
if self.key not in state.committed_state and self.key in state.dict:
state.committed_state[self.key] = self.copy(state.dict[self.key])
def fire_remove_event(self, state, value, initiator):
if self.key not in state.committed_state and self.key in state.dict:
state.committed_state[self.key] = self.copy(state.dict[self.key])
state.modified = True
if self.trackparent and value is not None:
self.sethasparent(value._state, False)
instance = state.obj()
for ext in self.extensions:
ext.remove(instance, value, initiator or self)
def delete(self, state):
if self.key not in state.dict:
return
state.modified = True
collection = self.get_collection(state)
collection.clear_with_event()
# TODO: catch key errors, convert to attributeerror?
del state.dict[self.key]
def initialize(self, state):
"""Initialize this attribute on the given object instance with an empty collection."""
_, user_data = self._build_collection(state)
state.dict[self.key] = user_data
return user_data
def append(self, state, value, initiator, passive=False):
if initiator is self:
return
collection = self.get_collection(state, passive=passive)
if collection is PASSIVE_NORESULT:
state.get_pending(self.key).append(value)
self.fire_append_event(state, value, initiator)
else:
collection.append_with_event(value, initiator)
def remove(self, state, value, initiator, passive=False):
if initiator is self:
return
collection = self.get_collection(state, passive=passive)
if collection is PASSIVE_NORESULT:
state.get_pending(self.key).remove(value)
self.fire_remove_event(state, value, initiator)
else:
collection.remove_with_event(value, initiator)
def set(self, state, value, initiator):
"""Set a value on the given object.
`initiator` is the ``InstrumentedAttribute`` that initiated the
``set()` operation and is used to control the depth of a circular
setter operation.
"""
if initiator is self:
return
self._set_iterable(
state, value,
lambda adapter, i: adapter.adapt_like_to_iterable(i))
def _set_iterable(self, state, iterable, adapter=None):
"""Set a collection value from an iterable of state-bearers.
``adapter`` is an optional callable invoked with a CollectionAdapter
and the iterable. Should return an iterable of state-bearing
instances suitable for appending via a CollectionAdapter. Can be used
for, e.g., adapting an incoming dictionary into an iterator of values
rather than keys.
"""
# pulling a new collection first so that an adaptation exception does
# not trigger a lazy load of the old collection.
new_collection, user_data = self._build_collection(state)
if adapter:
new_values = list(adapter(new_collection, iterable))
else:
new_values = list(iterable)
old = self.get(state)
# ignore re-assignment of the current collection, as happens
# implicitly with in-place operators (foo.collection |= other)
if old is iterable:
return
if self.key not in state.committed_state:
state.committed_state[self.key] = self.copy(old)
old_collection = self.get_collection(state, old)
state.dict[self.key] = user_data
state.modified = True
collections.bulk_replace(new_values, old_collection, new_collection)
old_collection.unlink(old)
def set_committed_value(self, state, value):
"""Set an attribute value on the given instance and 'commit' it.
Loads the existing collection from lazy callables in all cases.
"""
collection, user_data = self._build_collection(state)
if value:
for item in value:
collection.append_without_event(item)
state.callables.pop(self.key, None)
state.dict[self.key] = user_data
if self.key in state.pending:
# pending items. commit loaded data, add/remove new data
state.committed_state[self.key] = list(value or [])
added = state.pending[self.key].added_items
removed = state.pending[self.key].deleted_items
for item in added:
collection.append_without_event(item)
for item in removed:
collection.remove_without_event(item)
del state.pending[self.key]
elif self.key in state.committed_state:
# no pending items. remove committed state if any.
# (this can occur with an expired attribute)
del state.committed_state[self.key]
return user_data
def _build_collection(self, state):
"""build a new, blank collection and return it wrapped in a CollectionAdapter."""
user_data = self.collection_factory()
collection = collections.CollectionAdapter(self, state, user_data)
return collection, user_data
def get_collection(self, state, user_data=None, passive=False):
"""retrieve the CollectionAdapter associated with the given state.
Creates a new CollectionAdapter if one does not exist.
"""
if user_data is None:
user_data = self.get(state, passive=passive)
if user_data is PASSIVE_NORESULT:
return user_data
try:
return getattr(user_data, '_sa_adapter')
except AttributeError:
# TODO: this codepath never occurs, and this
# except/initialize should be removed
collections.CollectionAdapter(self, state, user_data)
return getattr(user_data, '_sa_adapter')
class GenericBackrefExtension(interfaces.AttributeExtension):
"""An extension which synchronizes a two-way relationship.
A typical two-way relationship is a parent object containing a
list of child objects, where each child object references the
parent. The other are two objects which contain scalar references
to each other.
"""
def __init__(self, key):
self.key = key
def set(self, instance, child, oldchild, initiator):
if oldchild is child:
return
if oldchild is not None:
# With lazy=None, there's no guarantee that the full collection is
# present when updating via a backref.
impl = getattr(oldchild.__class__, self.key).impl
try:
impl.remove(oldchild._state, instance, initiator, passive=True)
except (ValueError, KeyError, IndexError):
pass
if child is not None:
getattr(child.__class__, self.key).impl.append(child._state, instance, initiator, passive=True)
def append(self, instance, child, initiator):
getattr(child.__class__, self.key).impl.append(child._state, instance, initiator, passive=True)
def remove(self, instance, child, initiator):
if child is not None:
getattr(child.__class__, self.key).impl.remove(child._state, instance, initiator, passive=True)
class ClassState(object):
"""tracks state information at the class level."""
def __init__(self):
self.mappers = {}
self.attrs = {}
self.has_mutable_scalars = False
import sets
_empty_set = sets.ImmutableSet()
class InstanceState(object):
"""tracks state information at the instance level."""
def __init__(self, obj):
self.class_ = obj.__class__
self.obj = weakref.ref(obj, self.__cleanup)
self.dict = obj.__dict__
self.committed_state = {}
self.modified = False
self.callables = {}
self.parents = {}
self.pending = {}
self.appenders = {}
self.instance_dict = None
self.runid = None
self.expired_attributes = _empty_set
def __cleanup(self, ref):
# tiptoe around Python GC unpredictableness
instance_dict = self.instance_dict
if instance_dict is None:
return
instance_dict = instance_dict()
if instance_dict is None or instance_dict._mutex is None:
return
# the mutexing here is based on the assumption that gc.collect()
# may be firing off cleanup handlers in a different thread than that
# which is normally operating upon the instance dict.
instance_dict._mutex.acquire()
try:
try:
self.__resurrect(instance_dict)
except:
# catch app cleanup exceptions. no other way around this
# without warnings being produced
pass
finally:
instance_dict._mutex.release()
def _check_resurrect(self, instance_dict):
instance_dict._mutex.acquire()
try:
return self.obj() or self.__resurrect(instance_dict)
finally:
instance_dict._mutex.release()
def get_pending(self, key):
if key not in self.pending:
self.pending[key] = PendingCollection()
return self.pending[key]
def is_modified(self):
if self.modified:
return True
elif self.class_._class_state.has_mutable_scalars:
for attr in _managed_attributes(self.class_):
if hasattr(attr.impl, 'check_mutable_modified') and attr.impl.check_mutable_modified(self):
return True
else:
return False
else:
return False
def __resurrect(self, instance_dict):
if self.is_modified():
# store strong ref'ed version of the object; will revert
# to weakref when changes are persisted
obj = new_instance(self.class_, state=self)
self.obj = weakref.ref(obj, self.__cleanup)
self._strong_obj = obj
obj.__dict__.update(self.dict)
self.dict = obj.__dict__
return obj
else:
del instance_dict[self.dict['_instance_key']]
return None
def __getstate__(self):
return {'committed_state':self.committed_state, 'pending':self.pending, 'parents':self.parents, 'modified':self.modified, 'instance':self.obj(), 'expired_attributes':self.expired_attributes, 'callables':self.callables}
def __setstate__(self, state):
self.committed_state = state['committed_state']
self.parents = state['parents']
self.pending = state['pending']
self.modified = state['modified']
self.obj = weakref.ref(state['instance'])
self.class_ = self.obj().__class__
self.dict = self.obj().__dict__
self.callables = state['callables']
self.runid = None
self.appenders = {}
self.expired_attributes = state['expired_attributes']
def initialize(self, key):
getattr(self.class_, key).impl.initialize(self)
def set_callable(self, key, callable_):
self.dict.pop(key, None)
self.callables[key] = callable_
def __call__(self):
"""__call__ allows the InstanceState to act as a deferred
callable for loading expired attributes, which is also
serializable.
"""
instance = self.obj()
unmodified = self.unmodified
self.class_._class_state.deferred_scalar_loader(instance, [
attr.impl.key for attr in _managed_attributes(self.class_) if
attr.impl.accepts_scalar_loader and
attr.impl.key in self.expired_attributes and
attr.impl.key in unmodified
])
for k in self.expired_attributes:
self.callables.pop(k, None)
self.expired_attributes.clear()
return ATTR_WAS_SET
def unmodified(self):
"""a set of keys which have no uncommitted changes"""
return util.Set([
attr.impl.key for attr in _managed_attributes(self.class_) if
attr.impl.key not in self.committed_state
and (not hasattr(attr.impl, 'commit_to_state') or not attr.impl.check_mutable_modified(self))
])
unmodified = property(unmodified)
def expire_attributes(self, attribute_names):
self.expired_attributes = util.Set(self.expired_attributes)
if attribute_names is None:
for attr in _managed_attributes(self.class_):
self.dict.pop(attr.impl.key, None)
self.expired_attributes.add(attr.impl.key)
if attr.impl.accepts_scalar_loader:
self.callables[attr.impl.key] = self
self.committed_state = {}
else:
for key in attribute_names:
self.dict.pop(key, None)
self.committed_state.pop(key, None)
self.expired_attributes.add(key)
if getattr(self.class_, key).impl.accepts_scalar_loader:
self.callables[key] = self
def reset(self, key):
"""remove the given attribute and any callables associated with it."""
self.dict.pop(key, None)
self.callables.pop(key, None)
def commit_attr(self, attr, value):
"""set the value of an attribute and mark it 'committed'."""
if hasattr(attr, 'commit_to_state'):
attr.commit_to_state(self, value)
else:
self.committed_state.pop(attr.key, None)
self.dict[attr.key] = value
self.pending.pop(attr.key, None)
self.appenders.pop(attr.key, None)
# we have a value so we can also unexpire it
self.callables.pop(attr.key, None)
if attr.key in self.expired_attributes:
self.expired_attributes.remove(attr.key)
def commit(self, keys):
"""commit all attributes named in the given list of key names.
This is used by a partial-attribute load operation to mark committed those attributes
which were refreshed from the database.
Attributes marked as "expired" can potentially remain "expired" after this step
if a value was not populated in state.dict.
"""
if self.class_._class_state.has_mutable_scalars:
for key in keys:
attr = getattr(self.class_, key).impl
if hasattr(attr, 'commit_to_state') and attr.key in self.dict:
attr.commit_to_state(self, self.dict[attr.key])
else:
self.committed_state.pop(attr.key, None)
self.pending.pop(key, None)
self.appenders.pop(key, None)
else:
for key in keys:
self.committed_state.pop(key, None)
self.pending.pop(key, None)
self.appenders.pop(key, None)
# unexpire attributes which have loaded
for key in self.expired_attributes.intersection(keys):
if key in self.dict:
self.expired_attributes.remove(key)
self.callables.pop(key, None)
def commit_all(self):
"""commit all attributes unconditionally.
This is used after a flush() or a regular instance load or refresh operation
to mark committed all populated attributes.
Attributes marked as "expired" can potentially remain "expired" after this step
if a value was not populated in state.dict.
"""
self.committed_state = {}
self.modified = False
self.pending = {}
self.appenders = {}
# unexpire attributes which have loaded
for key in list(self.expired_attributes):
if key in self.dict:
self.expired_attributes.remove(key)
self.callables.pop(key, None)
if self.class_._class_state.has_mutable_scalars:
for attr in _managed_attributes(self.class_):
if hasattr(attr.impl, 'commit_to_state') and attr.impl.key in self.dict:
attr.impl.commit_to_state(self, self.dict[attr.impl.key])
# remove strong ref
self._strong_obj = None
class WeakInstanceDict(UserDict.UserDict):
"""similar to WeakValueDictionary, but wired towards 'state' objects."""
def __init__(self, *args, **kw):
self._wr = weakref.ref(self)
# RLock because the mutex is used by a cleanup handler, which can be
# called at any time (including within an already mutexed block)
self._mutex = util.threading.RLock()
UserDict.UserDict.__init__(self, *args, **kw)
def __getitem__(self, key):
state = self.data[key]
o = state.obj()
if o is None:
o = state._check_resurrect(self)
if o is None:
raise KeyError, key
return o
def __contains__(self, key):
try:
state = self.data[key]
o = state.obj()
if o is None:
o = state._check_resurrect(self)
except KeyError:
return False
return o is not None
def has_key(self, key):
return key in self
def __repr__(self):
return "<InstanceDict at %s>" % id(self)
def __setitem__(self, key, value):
if key in self.data:
self._mutex.acquire()
try:
if key in self.data:
self.data[key].instance_dict = None
finally:
self._mutex.release()
self.data[key] = value._state
value._state.instance_dict = self._wr
def __delitem__(self, key):
state = self.data[key]
state.instance_dict = None
del self.data[key]
def get(self, key, default=None):
try:
state = self.data[key]
except KeyError:
return default
else:
o = state.obj()
if o is None:
# This should only happen
return default
else:
return o
def items(self):
L = []
for key, state in self.data.items():
o = state.obj()
if o is not None:
L.append((key, o))
return L
def iteritems(self):
for state in self.data.itervalues():
value = state.obj()
if value is not None:
yield value._instance_key, value
def iterkeys(self):
return self.data.iterkeys()
def __iter__(self):
return self.data.iterkeys()
def __len__(self):
return len(self.values())
def itervalues(self):
for state in self.data.itervalues():
instance = state.obj()
if instance is not None:
yield instance
def values(self):
L = []
for state in self.data.values():
o = state.obj()
if o is not None:
L.append(o)
return L
def popitem(self):
raise NotImplementedError()
def pop(self, key, *args):
raise NotImplementedError()
def setdefault(self, key, default=None):
raise NotImplementedError()
def update(self, dict=None, **kwargs):
raise NotImplementedError()
def copy(self):
raise NotImplementedError()
def all_states(self):
return self.data.values()
class StrongInstanceDict(dict):
def all_states(self):
return [o._state for o in self.values()]
def _create_history(attr, state, current):
original = state.committed_state.get(attr.key, NEVER_SET)
if hasattr(attr, 'get_collection'):
current = attr.get_collection(state, current)
if original is NO_VALUE:
return (list(current), [], [])
elif original is NEVER_SET:
return ([], list(current), [])
else:
collection = util.OrderedIdentitySet(current)
s = util.OrderedIdentitySet(original)
return (list(collection.difference(s)), list(collection.intersection(s)), list(s.difference(collection)))
else:
if current is NO_VALUE:
if original not in [None, NEVER_SET, NO_VALUE]:
deleted = [original]
else:
deleted = []
return ([], [], deleted)
elif original is NO_VALUE:
return ([current], [], [])
elif original is NEVER_SET or attr.is_equal(current, original) is True: # dont let ClauseElement expressions here trip things up
return ([], [current], [])
else:
if original is not None:
deleted = [original]
else:
deleted = []
return ([current], [], deleted)
class PendingCollection(object):
"""stores items appended and removed from a collection that has not been loaded yet.
When the collection is loaded, the changes present in PendingCollection are applied
to produce the final result.
"""
def __init__(self):
self.deleted_items = util.IdentitySet()
self.added_items = util.OrderedIdentitySet()
def append(self, value):
if value in self.deleted_items:
self.deleted_items.remove(value)
self.added_items.add(value)
def remove(self, value):
if value in self.added_items:
self.added_items.remove(value)
self.deleted_items.add(value)
def _managed_attributes(class_):
"""return all InstrumentedAttributes associated with the given class_ and its superclasses."""
return chain(*[cl._class_state.attrs.values() for cl in class_.__mro__[:-1] if hasattr(cl, '_class_state')])
def get_history(state, key, **kwargs):
return getattr(state.class_, key).impl.get_history(state, **kwargs)
def get_as_list(state, key, passive=False):
"""return an InstanceState attribute as a list,
regardless of it being a scalar or collection-based
attribute.
returns None if passive=True and the getter returns
PASSIVE_NORESULT.
"""
attr = getattr(state.class_, key).impl
x = attr.get(state, passive=passive)
if x is PASSIVE_NORESULT:
return None
elif hasattr(attr, 'get_collection'):
return attr.get_collection(state, x, passive=passive)
elif isinstance(x, list):
return x
else:
return [x]
def has_parent(class_, instance, key, optimistic=False):
return getattr(class_, key).impl.hasparent(instance._state, optimistic=optimistic)
def _create_prop(class_, key, uselist, callable_, typecallable, useobject, mutable_scalars, impl_class, **kwargs):
if impl_class:
return impl_class(class_, key, typecallable, **kwargs)
elif uselist:
return CollectionAttributeImpl(class_, key, callable_, typecallable, **kwargs)
elif useobject:
return ScalarObjectAttributeImpl(class_, key, callable_,**kwargs)
elif mutable_scalars:
return MutableScalarAttributeImpl(class_, key, callable_, **kwargs)
else:
return ScalarAttributeImpl(class_, key, callable_, **kwargs)
def manage(instance):
"""initialize an InstanceState on the given instance."""
if not hasattr(instance, '_state'):
instance._state = InstanceState(instance)
def new_instance(class_, state=None):
"""create a new instance of class_ without its __init__() method being called.
Also initializes an InstanceState on the new instance.
"""
s = class_.__new__(class_)
if state:
s._state = state
else:
s._state = InstanceState(s)
return s
def _init_class_state(class_):
if not '_class_state' in class_.__dict__:
class_._class_state = ClassState()
def register_class(class_, extra_init=None, on_exception=None, deferred_scalar_loader=None):
_init_class_state(class_)
class_._class_state.deferred_scalar_loader=deferred_scalar_loader
oldinit = None
doinit = False
def init(instance, *args, **kwargs):
if not hasattr(instance, '_state'):
instance._state = InstanceState(instance)
if extra_init:
extra_init(class_, oldinit, instance, args, kwargs)
try:
if doinit:
oldinit(instance, *args, **kwargs)
elif args or kwargs:
# simulate error message raised by object(), but don't copy
# the text verbatim
raise TypeError("default constructor for object() takes no parameters")
except:
if on_exception:
on_exception(class_, oldinit, instance, args, kwargs)
raise
# override oldinit
oldinit = class_.__init__
if oldinit is None or not hasattr(oldinit, '_oldinit'):
init._oldinit = oldinit
class_.__init__ = init
# if oldinit is already one of our 'init' methods, replace it
elif hasattr(oldinit, '_oldinit'):
init._oldinit = oldinit._oldinit
class_.__init = init
oldinit = oldinit._oldinit
if oldinit is not None:
doinit = oldinit is not object.__init__
try:
init.__name__ = oldinit.__name__
init.__doc__ = oldinit.__doc__
except:
# cant set __name__ in py 2.3 !
pass
def unregister_class(class_):
if hasattr(class_, '__init__') and hasattr(class_.__init__, '_oldinit'):
if class_.__init__._oldinit is not None:
class_.__init__ = class_.__init__._oldinit
else:
delattr(class_, '__init__')
if '_class_state' in class_.__dict__:
_class_state = class_.__dict__['_class_state']
for key, attr in _class_state.attrs.iteritems():
if key in class_.__dict__:
delattr(class_, attr.impl.key)
delattr(class_, '_class_state')
def register_attribute(class_, key, uselist, useobject, callable_=None, proxy_property=None, mutable_scalars=False, impl_class=None, **kwargs):
_init_class_state(class_)
typecallable = kwargs.pop('typecallable', None)
if isinstance(typecallable, InstrumentedAttribute):
typecallable = None
comparator = kwargs.pop('comparator', None)
if key in class_.__dict__ and isinstance(class_.__dict__[key], InstrumentedAttribute):
# this currently only occurs if two primary mappers are made for the same class.
# TODO: possibly have InstrumentedAttribute check "entity_name" when searching for impl.
# raise an error if two attrs attached simultaneously otherwise
return
if proxy_property:
proxy_type = proxied_attribute_factory(proxy_property)
inst = proxy_type(key, proxy_property, comparator)
else:
inst = InstrumentedAttribute(_create_prop(class_, key, uselist, callable_, useobject=useobject,
typecallable=typecallable, mutable_scalars=mutable_scalars, impl_class=impl_class, **kwargs), comparator=comparator)
setattr(class_, key, inst)
class_._class_state.attrs[key] = inst
def unregister_attribute(class_, key):
class_state = class_._class_state
if key in class_state.attrs:
del class_._class_state.attrs[key]
delattr(class_, key)
def init_collection(instance, key):
"""Initialize a collection attribute and return the collection adapter."""
attr = getattr(instance.__class__, key).impl
state = instance._state
user_data = attr.initialize(state)
return attr.get_collection(state, user_data)
| {
"repo_name": "carlgao/lenga",
"path": "images/lenny64-peon/usr/share/python-support/python-sqlalchemy/sqlalchemy/orm/attributes.py",
"copies": "1",
"size": "46232",
"license": "mit",
"hash": 548088894391894660,
"line_mean": 34.7003861004,
"line_max": 226,
"alpha_frac": 0.6129304378,
"autogenerated": false,
"ratio": 4.309470544369873,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0017133762486991444,
"num_lines": 1295
} |
import weakref, threading, operator
from itertools import chain
import UserDict
from sqlalchemy import util
from sqlalchemy.orm import interfaces, collections
from sqlalchemy.orm.util import identity_equal
from sqlalchemy import exceptions
PASSIVE_NORESULT = object()
ATTR_WAS_SET = object()
NO_VALUE = object()
NEVER_SET = object()
class InstrumentedAttribute(interfaces.PropComparator):
"""public-facing instrumented attribute, placed in the
class dictionary.
"""
def __init__(self, impl, comparator=None):
"""Construct an InstrumentedAttribute.
comparator
a sql.Comparator to which class-level compare/math events will be sent
"""
self.impl = impl
self.comparator = comparator
def __set__(self, instance, value):
self.impl.set(instance._state, value, None)
def __delete__(self, instance):
self.impl.delete(instance._state)
def __get__(self, instance, owner):
if instance is None:
return self
return self.impl.get(instance._state)
def get_history(self, instance, **kwargs):
return self.impl.get_history(instance._state, **kwargs)
def clause_element(self):
return self.comparator.clause_element()
def expression_element(self):
return self.comparator.expression_element()
def operate(self, op, *other, **kwargs):
return op(self.comparator, *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
return op(other, self.comparator, **kwargs)
def hasparent(self, instance, optimistic=False):
return self.impl.hasparent(instance._state, optimistic=optimistic)
def _property(self):
from sqlalchemy.orm.mapper import class_mapper
return class_mapper(self.impl.class_).get_property(self.impl.key)
property = property(_property, doc="the MapperProperty object associated with this attribute")
class ProxiedAttribute(InstrumentedAttribute):
"""a 'proxy' attribute which adds InstrumentedAttribute
class-level behavior to any user-defined class property.
"""
class ProxyImpl(object):
accepts_scalar_loader = False
def __init__(self, key):
self.key = key
def __init__(self, key, user_prop, comparator=None):
self.user_prop = user_prop
self._comparator = comparator
self.key = key
self.impl = ProxiedAttribute.ProxyImpl(key)
def comparator(self):
if callable(self._comparator):
self._comparator = self._comparator()
return self._comparator
comparator = property(comparator)
def __get__(self, instance, owner):
if instance is None:
self.user_prop.__get__(instance, owner)
return self
return self.user_prop.__get__(instance, owner)
def __set__(self, instance, value):
return self.user_prop.__set__(instance, value)
def __delete__(self, instance):
return self.user_prop.__delete__(instance)
class AttributeImpl(object):
"""internal implementation for instrumented attributes."""
def __init__(self, class_, key, callable_, trackparent=False, extension=None, compare_function=None, **kwargs):
"""Construct an AttributeImpl.
class_
the class to be instrumented.
key
string name of the attribute
callable_
optional function which generates a callable based on a parent
instance, which produces the "default" values for a scalar or
collection attribute when it's first accessed, if not present
already.
trackparent
if True, attempt to track if an instance has a parent attached
to it via this attribute.
extension
an AttributeExtension object which will receive
set/delete/append/remove/etc. events.
compare_function
a function that compares two values which are normally
assignable to this attribute.
"""
self.class_ = class_
self.key = key
self.callable_ = callable_
self.trackparent = trackparent
if compare_function is None:
self.is_equal = operator.eq
else:
self.is_equal = compare_function
self.extensions = util.to_list(extension or [])
def hasparent(self, state, optimistic=False):
"""Return the boolean value of a `hasparent` flag attached to the given item.
The `optimistic` flag determines what the default return value
should be if no `hasparent` flag can be located.
As this function is used to determine if an instance is an
*orphan*, instances that were loaded from storage should be
assumed to not be orphans, until a True/False value for this
flag is set.
An instance attribute that is loaded by a callable function
will also not have a `hasparent` flag.
"""
return state.parents.get(id(self), optimistic)
def sethasparent(self, state, value):
"""Set a boolean flag on the given item corresponding to
whether or not it is attached to a parent object via the
attribute represented by this ``InstrumentedAttribute``.
"""
state.parents[id(self)] = value
def set_callable(self, state, callable_):
"""Set a callable function for this attribute on the given object.
This callable will be executed when the attribute is next
accessed, and is assumed to construct part of the instances
previously stored state. When its value or values are loaded,
they will be established as part of the instance's *committed
state*. While *trackparent* information will be assembled for
these instances, attribute-level event handlers will not be
fired.
The callable overrides the class level callable set in the
``InstrumentedAttribute` constructor.
"""
if callable_ is None:
self.initialize(state)
else:
state.callables[self.key] = callable_
def get_history(self, state, passive=False):
raise NotImplementedError()
def _get_callable(self, state):
if self.key in state.callables:
return state.callables[self.key]
elif self.callable_ is not None:
return self.callable_(state.obj())
else:
return None
def initialize(self, state):
"""Initialize this attribute on the given object instance with an empty value."""
state.dict[self.key] = None
return None
def get(self, state, passive=False):
"""Retrieve a value from the given object.
If a callable is assembled on this object's attribute, and
passive is False, the callable will be executed and the
resulting value will be set as the new value for this attribute.
"""
try:
return state.dict[self.key]
except KeyError:
# if no history, check for lazy callables, etc.
if self.key not in state.committed_state:
callable_ = self._get_callable(state)
if callable_ is not None:
if passive:
return PASSIVE_NORESULT
value = callable_()
if value is not ATTR_WAS_SET:
return self.set_committed_value(state, value)
else:
if self.key not in state.dict:
return self.get(state, passive=passive)
return state.dict[self.key]
# Return a new, empty value
return self.initialize(state)
def append(self, state, value, initiator, passive=False):
self.set(state, value, initiator)
def remove(self, state, value, initiator, passive=False):
self.set(state, None, initiator)
def set(self, state, value, initiator):
raise NotImplementedError()
def get_committed_value(self, state):
"""return the unchanged value of this attribute"""
if self.key in state.committed_state:
return state.committed_state.get(self.key)
else:
return self.get(state)
def set_committed_value(self, state, value):
"""set an attribute value on the given instance and 'commit' it."""
state.commit_attr(self, value)
# remove per-instance callable, if any
state.callables.pop(self.key, None)
state.dict[self.key] = value
return value
class ScalarAttributeImpl(AttributeImpl):
"""represents a scalar value-holding InstrumentedAttribute."""
accepts_scalar_loader = True
def delete(self, state):
if self.key not in state.committed_state:
state.committed_state[self.key] = state.dict.get(self.key, NO_VALUE)
# TODO: catch key errors, convert to attributeerror?
del state.dict[self.key]
state.modified=True
def get_history(self, state, passive=False):
return _create_history(self, state, state.dict.get(self.key, NO_VALUE))
def set(self, state, value, initiator):
if initiator is self:
return
if self.key not in state.committed_state:
state.committed_state[self.key] = state.dict.get(self.key, NO_VALUE)
state.dict[self.key] = value
state.modified=True
def type(self):
self.property.columns[0].type
type = property(type)
class MutableScalarAttributeImpl(ScalarAttributeImpl):
"""represents a scalar value-holding InstrumentedAttribute, which can detect
changes within the value itself.
"""
def __init__(self, class_, key, callable_, copy_function=None, compare_function=None, **kwargs):
super(ScalarAttributeImpl, self).__init__(class_, key, callable_, compare_function=compare_function, **kwargs)
class_._class_state.has_mutable_scalars = True
if copy_function is None:
raise exceptions.ArgumentError("MutableScalarAttributeImpl requires a copy function")
self.copy = copy_function
def get_history(self, state, passive=False):
return _create_history(self, state, state.dict.get(self.key, NO_VALUE))
def commit_to_state(self, state, value):
state.committed_state[self.key] = self.copy(value)
def check_mutable_modified(self, state):
(added, unchanged, deleted) = self.get_history(state, passive=True)
if added or deleted:
state.modified = True
return True
else:
return False
def set(self, state, value, initiator):
if initiator is self:
return
if self.key not in state.committed_state:
if self.key in state.dict:
state.committed_state[self.key] = self.copy(state.dict[self.key])
else:
state.committed_state[self.key] = NO_VALUE
state.dict[self.key] = value
state.modified=True
class ScalarObjectAttributeImpl(ScalarAttributeImpl):
"""represents a scalar-holding InstrumentedAttribute, where the target object is also instrumented.
Adds events to delete/set operations.
"""
accepts_scalar_loader = False
def __init__(self, class_, key, callable_, trackparent=False, extension=None, copy_function=None, compare_function=None, **kwargs):
super(ScalarObjectAttributeImpl, self).__init__(class_, key,
callable_, trackparent=trackparent, extension=extension,
compare_function=compare_function, **kwargs)
if compare_function is None:
self.is_equal = identity_equal
def delete(self, state):
old = self.get(state)
# TODO: catch key errors, convert to attributeerror?
del state.dict[self.key]
self.fire_remove_event(state, old, self)
def get_history(self, state, passive=False):
if self.key in state.dict:
return _create_history(self, state, state.dict[self.key])
else:
current = self.get(state, passive=passive)
if current is PASSIVE_NORESULT:
return (None, None, None)
else:
return _create_history(self, state, current)
def set(self, state, value, initiator):
"""Set a value on the given InstanceState.
`initiator` is the ``InstrumentedAttribute`` that initiated the
``set()` operation and is used to control the depth of a circular
setter operation.
"""
if initiator is self:
return
# TODO: add options to allow the get() to be passive
old = self.get(state)
state.dict[self.key] = value
self.fire_replace_event(state, value, old, initiator)
def fire_remove_event(self, state, value, initiator):
if self.key not in state.committed_state:
state.committed_state[self.key] = value
state.modified = True
if self.trackparent and value is not None:
self.sethasparent(value._state, False)
instance = state.obj()
for ext in self.extensions:
ext.remove(instance, value, initiator or self)
def fire_replace_event(self, state, value, previous, initiator):
if self.key not in state.committed_state:
state.committed_state[self.key] = previous
state.modified = True
if self.trackparent:
if value is not None:
self.sethasparent(value._state, True)
if previous is not value and previous is not None:
self.sethasparent(previous._state, False)
instance = state.obj()
for ext in self.extensions:
ext.set(instance, value, previous, initiator or self)
class CollectionAttributeImpl(AttributeImpl):
"""A collection-holding attribute that instruments changes in membership.
Only handles collections of instrumented objects.
InstrumentedCollectionAttribute holds an arbitrary, user-specified
container object (defaulting to a list) and brokers access to the
CollectionAdapter, a "view" onto that object that presents consistent
bag semantics to the orm layer independent of the user data implementation.
"""
accepts_scalar_loader = False
def __init__(self, class_, key, callable_, typecallable=None, trackparent=False, extension=None, copy_function=None, compare_function=None, **kwargs):
super(CollectionAttributeImpl, self).__init__(class_,
key, callable_, trackparent=trackparent, extension=extension,
compare_function=compare_function, **kwargs)
if copy_function is None:
copy_function = self.__copy
self.copy = copy_function
if typecallable is None:
typecallable = list
self.collection_factory = \
collections._prepare_instrumentation(typecallable)
# may be removed in 0.5:
self.collection_interface = \
util.duck_type_collection(self.collection_factory())
def __copy(self, item):
return [y for y in list(collections.collection_adapter(item))]
def get_history(self, state, passive=False):
if self.key in state.dict:
return _create_history(self, state, state.dict[self.key])
else:
current = self.get(state, passive=passive)
if current is PASSIVE_NORESULT:
return (None, None, None)
else:
return _create_history(self, state, current)
def fire_append_event(self, state, value, initiator):
if self.key not in state.committed_state and self.key in state.dict:
state.committed_state[self.key] = self.copy(state.dict[self.key])
state.modified = True
if self.trackparent and value is not None:
self.sethasparent(value._state, True)
instance = state.obj()
for ext in self.extensions:
ext.append(instance, value, initiator or self)
def fire_pre_remove_event(self, state, initiator):
if self.key not in state.committed_state and self.key in state.dict:
state.committed_state[self.key] = self.copy(state.dict[self.key])
def fire_remove_event(self, state, value, initiator):
if self.key not in state.committed_state and self.key in state.dict:
state.committed_state[self.key] = self.copy(state.dict[self.key])
state.modified = True
if self.trackparent and value is not None:
self.sethasparent(value._state, False)
instance = state.obj()
for ext in self.extensions:
ext.remove(instance, value, initiator or self)
def get_history(self, state, passive=False):
current = self.get(state, passive=passive)
if current is PASSIVE_NORESULT:
return (None, None, None)
else:
return _create_history(self, state, current)
def delete(self, state):
if self.key not in state.dict:
return
state.modified = True
collection = self.get_collection(state)
collection.clear_with_event()
# TODO: catch key errors, convert to attributeerror?
del state.dict[self.key]
def initialize(self, state):
"""Initialize this attribute on the given object instance with an empty collection."""
_, user_data = self._build_collection(state)
state.dict[self.key] = user_data
return user_data
def append(self, state, value, initiator, passive=False):
if initiator is self:
return
collection = self.get_collection(state, passive=passive)
if collection is PASSIVE_NORESULT:
state.get_pending(self.key).append(value)
self.fire_append_event(state, value, initiator)
else:
collection.append_with_event(value, initiator)
def remove(self, state, value, initiator, passive=False):
if initiator is self:
return
collection = self.get_collection(state, passive=passive)
if collection is PASSIVE_NORESULT:
state.get_pending(self.key).remove(value)
self.fire_remove_event(state, value, initiator)
else:
collection.remove_with_event(value, initiator)
def set(self, state, value, initiator):
"""Set a value on the given object.
`initiator` is the ``InstrumentedAttribute`` that initiated the
``set()` operation and is used to control the depth of a circular
setter operation.
"""
if initiator is self:
return
# we need a CollectionAdapter to adapt the incoming value to an
# assignable iterable. pulling a new collection first so that
# an adaptation exception does not trigger a lazy load of the
# old collection.
new_collection, user_data = self._build_collection(state)
new_values = list(new_collection.adapt_like_to_iterable(value))
old = self.get(state)
# ignore re-assignment of the current collection, as happens
# implicitly with in-place operators (foo.collection |= other)
if old is value:
return
if self.key not in state.committed_state:
state.committed_state[self.key] = self.copy(old)
old_collection = self.get_collection(state, old)
idset = util.IdentitySet
constants = idset(old_collection or []).intersection(new_values or [])
additions = idset(new_values or []).difference(constants)
removals = idset(old_collection or []).difference(constants)
for member in new_values or ():
if member in additions:
new_collection.append_with_event(member)
elif member in constants:
new_collection.append_without_event(member)
state.dict[self.key] = user_data
state.modified = True
# mark all the orphaned elements as detached from the parent
if old_collection:
for member in removals:
old_collection.remove_with_event(member)
old_collection.unlink(old)
def set_committed_value(self, state, value):
"""Set an attribute value on the given instance and 'commit' it.
Loads the existing collection from lazy callables in all cases.
"""
collection, user_data = self._build_collection(state)
if value:
for item in value:
collection.append_without_event(item)
state.callables.pop(self.key, None)
state.dict[self.key] = user_data
if self.key in state.pending:
# pending items. commit loaded data, add/remove new data
state.committed_state[self.key] = list(value or [])
added = state.pending[self.key].added_items
removed = state.pending[self.key].deleted_items
for item in added:
collection.append_without_event(item)
for item in removed:
collection.remove_without_event(item)
del state.pending[self.key]
elif self.key in state.committed_state:
# no pending items. remove committed state if any.
# (this can occur with an expired attribute)
del state.committed_state[self.key]
return user_data
def _build_collection(self, state):
"""build a new, blank collection and return it wrapped in a CollectionAdapter."""
user_data = self.collection_factory()
collection = collections.CollectionAdapter(self, state, user_data)
return collection, user_data
def get_collection(self, state, user_data=None, passive=False):
"""retrieve the CollectionAdapter associated with the given state.
Creates a new CollectionAdapter if one does not exist.
"""
if user_data is None:
user_data = self.get(state, passive=passive)
if user_data is PASSIVE_NORESULT:
return user_data
try:
return getattr(user_data, '_sa_adapter')
except AttributeError:
# TODO: this codepath never occurs, and this
# except/initialize should be removed
collections.CollectionAdapter(self, state, user_data)
return getattr(user_data, '_sa_adapter')
class GenericBackrefExtension(interfaces.AttributeExtension):
"""An extension which synchronizes a two-way relationship.
A typical two-way relationship is a parent object containing a
list of child objects, where each child object references the
parent. The other are two objects which contain scalar references
to each other.
"""
def __init__(self, key):
self.key = key
def set(self, instance, child, oldchild, initiator):
if oldchild is child:
return
if oldchild is not None:
# With lazy=None, there's no guarantee that the full collection is
# present when updating via a backref.
impl = getattr(oldchild.__class__, self.key).impl
try:
impl.remove(oldchild._state, instance, initiator, passive=True)
except (ValueError, KeyError, IndexError):
pass
if child is not None:
getattr(child.__class__, self.key).impl.append(child._state, instance, initiator, passive=True)
def append(self, instance, child, initiator):
getattr(child.__class__, self.key).impl.append(child._state, instance, initiator, passive=True)
def remove(self, instance, child, initiator):
if child is not None:
getattr(child.__class__, self.key).impl.remove(child._state, instance, initiator, passive=True)
class ClassState(object):
"""tracks state information at the class level."""
def __init__(self):
self.mappers = {}
self.attrs = {}
self.has_mutable_scalars = False
class InstanceState(object):
"""tracks state information at the instance level."""
def __init__(self, obj):
self.class_ = obj.__class__
self.obj = weakref.ref(obj, self.__cleanup)
self.dict = obj.__dict__
self.committed_state = {}
self.modified = False
self.callables = {}
self.parents = {}
self.pending = {}
self.appenders = {}
self.instance_dict = None
self.runid = None
def __cleanup(self, ref):
# tiptoe around Python GC unpredictableness
instance_dict = self.instance_dict
if instance_dict is None:
return
instance_dict = instance_dict()
if instance_dict is None or instance_dict._mutex is None:
return
# the mutexing here is based on the assumption that gc.collect()
# may be firing off cleanup handlers in a different thread than that
# which is normally operating upon the instance dict.
instance_dict._mutex.acquire()
try:
try:
self.__resurrect(instance_dict)
except:
# catch app cleanup exceptions. no other way around this
# without warnings being produced
pass
finally:
instance_dict._mutex.release()
def _check_resurrect(self, instance_dict):
instance_dict._mutex.acquire()
try:
return self.obj() or self.__resurrect(instance_dict)
finally:
instance_dict._mutex.release()
def get_pending(self, key):
if key not in self.pending:
self.pending[key] = PendingCollection()
return self.pending[key]
def is_modified(self):
if self.modified:
return True
elif self.class_._class_state.has_mutable_scalars:
for attr in _managed_attributes(self.class_):
if hasattr(attr.impl, 'check_mutable_modified') and attr.impl.check_mutable_modified(self):
return True
else:
return False
else:
return False
def __resurrect(self, instance_dict):
if self.is_modified():
# store strong ref'ed version of the object; will revert
# to weakref when changes are persisted
obj = new_instance(self.class_, state=self)
self.obj = weakref.ref(obj, self.__cleanup)
self._strong_obj = obj
obj.__dict__.update(self.dict)
self.dict = obj.__dict__
return obj
else:
del instance_dict[self.dict['_instance_key']]
return None
def __getstate__(self):
return {'committed_state':self.committed_state, 'pending':self.pending, 'parents':self.parents, 'modified':self.modified, 'instance':self.obj(), 'expired_attributes':getattr(self, 'expired_attributes', None), 'callables':self.callables}
def __setstate__(self, state):
self.committed_state = state['committed_state']
self.parents = state['parents']
self.pending = state['pending']
self.modified = state['modified']
self.obj = weakref.ref(state['instance'])
self.class_ = self.obj().__class__
self.dict = self.obj().__dict__
self.callables = state['callables']
self.runid = None
self.appenders = {}
if state['expired_attributes'] is not None:
self.expire_attributes(state['expired_attributes'])
def initialize(self, key):
getattr(self.class_, key).impl.initialize(self)
def set_callable(self, key, callable_):
self.dict.pop(key, None)
self.callables[key] = callable_
def __call__(self):
"""__call__ allows the InstanceState to act as a deferred
callable for loading expired attributes, which is also
serializable.
"""
instance = self.obj()
unmodified = self.unmodified
self.class_._class_state.deferred_scalar_loader(instance, [
attr.impl.key for attr in _managed_attributes(self.class_) if
attr.impl.accepts_scalar_loader and
attr.impl.key in self.expired_attributes and
attr.impl.key in unmodified
])
for k in self.expired_attributes:
self.callables.pop(k, None)
self.expired_attributes.clear()
return ATTR_WAS_SET
def unmodified(self):
"""a set of keys which have no uncommitted changes"""
return util.Set([
attr.impl.key for attr in _managed_attributes(self.class_) if
attr.impl.key not in self.committed_state
and (not hasattr(attr.impl, 'commit_to_state') or not attr.impl.check_mutable_modified(self))
])
unmodified = property(unmodified)
def expire_attributes(self, attribute_names):
if not hasattr(self, 'expired_attributes'):
self.expired_attributes = util.Set()
if attribute_names is None:
for attr in _managed_attributes(self.class_):
self.dict.pop(attr.impl.key, None)
self.expired_attributes.add(attr.impl.key)
if attr.impl.accepts_scalar_loader:
self.callables[attr.impl.key] = self
self.committed_state = {}
else:
for key in attribute_names:
self.dict.pop(key, None)
self.committed_state.pop(key, None)
self.expired_attributes.add(key)
if getattr(self.class_, key).impl.accepts_scalar_loader:
self.callables[key] = self
def reset(self, key):
"""remove the given attribute and any callables associated with it."""
self.dict.pop(key, None)
self.callables.pop(key, None)
def commit_attr(self, attr, value):
if hasattr(attr, 'commit_to_state'):
attr.commit_to_state(self, value)
else:
self.committed_state.pop(attr.key, None)
self.pending.pop(attr.key, None)
self.appenders.pop(attr.key, None)
def commit(self, keys):
"""commit all attributes named in the given list of key names.
This is used by a partial-attribute load operation to mark committed those attributes
which were refreshed from the database.
"""
if self.class_._class_state.has_mutable_scalars:
for key in keys:
attr = getattr(self.class_, key).impl
if hasattr(attr, 'commit_to_state') and attr.key in self.dict:
attr.commit_to_state(self, self.dict[attr.key])
else:
self.committed_state.pop(attr.key, None)
self.pending.pop(key, None)
self.appenders.pop(key, None)
else:
for key in keys:
self.committed_state.pop(key, None)
self.pending.pop(key, None)
self.appenders.pop(key, None)
def commit_all(self):
"""commit all attributes unconditionally.
This is used after a flush() or a regular instance load or refresh operation
to mark committed all populated attributes.
"""
self.committed_state = {}
self.modified = False
self.pending = {}
self.appenders = {}
if self.class_._class_state.has_mutable_scalars:
for attr in _managed_attributes(self.class_):
if hasattr(attr.impl, 'commit_to_state') and attr.impl.key in self.dict:
attr.impl.commit_to_state(self, self.dict[attr.impl.key])
# remove strong ref
self._strong_obj = None
class WeakInstanceDict(UserDict.UserDict):
"""similar to WeakValueDictionary, but wired towards 'state' objects."""
def __init__(self, *args, **kw):
self._wr = weakref.ref(self)
# RLock because the mutex is used by a cleanup
# handler, which can be called at any time (including within an already mutexed block)
self._mutex = threading.RLock()
UserDict.UserDict.__init__(self, *args, **kw)
def __getitem__(self, key):
state = self.data[key]
o = state.obj()
if o is None:
o = state._check_resurrect(self)
if o is None:
raise KeyError, key
return o
def __contains__(self, key):
try:
state = self.data[key]
o = state.obj()
if o is None:
o = state._check_resurrect(self)
except KeyError:
return False
return o is not None
def has_key(self, key):
return key in self
def __repr__(self):
return "<InstanceDict at %s>" % id(self)
def __setitem__(self, key, value):
if key in self.data:
self._mutex.acquire()
try:
if key in self.data:
self.data[key].instance_dict = None
finally:
self._mutex.release()
self.data[key] = value._state
value._state.instance_dict = self._wr
def __delitem__(self, key):
state = self.data[key]
state.instance_dict = None
del self.data[key]
def get(self, key, default=None):
try:
state = self.data[key]
except KeyError:
return default
else:
o = state.obj()
if o is None:
# This should only happen
return default
else:
return o
def items(self):
L = []
for key, state in self.data.items():
o = state.obj()
if o is not None:
L.append((key, o))
return L
def iteritems(self):
for state in self.data.itervalues():
value = state.obj()
if value is not None:
yield value._instance_key, value
def iterkeys(self):
return self.data.iterkeys()
def __iter__(self):
return self.data.iterkeys()
def __len__(self):
return len(self.values())
def itervalues(self):
for state in self.data.itervalues():
instance = state.obj()
if instance is not None:
yield instance
def values(self):
L = []
for state in self.data.values():
o = state.obj()
if o is not None:
L.append(o)
return L
def popitem(self):
raise NotImplementedError()
def pop(self, key, *args):
raise NotImplementedError()
def setdefault(self, key, default=None):
raise NotImplementedError()
def update(self, dict=None, **kwargs):
raise NotImplementedError()
def copy(self):
raise NotImplementedError()
def all_states(self):
return self.data.values()
class StrongInstanceDict(dict):
def all_states(self):
return [o._state for o in self.values()]
def _create_history(attr, state, current):
original = state.committed_state.get(attr.key, NEVER_SET)
if hasattr(attr, 'get_collection'):
current = attr.get_collection(state, current)
if original is NO_VALUE:
return (list(current), [], [])
elif original is NEVER_SET:
return ([], list(current), [])
else:
collection = util.OrderedIdentitySet(current)
s = util.OrderedIdentitySet(original)
return (list(collection.difference(s)), list(collection.intersection(s)), list(s.difference(collection)))
else:
if current is NO_VALUE:
if original not in [None, NEVER_SET, NO_VALUE]:
deleted = [original]
else:
deleted = []
return ([], [], deleted)
elif original is NO_VALUE:
return ([current], [], [])
elif original is NEVER_SET or attr.is_equal(current, original) is True: # dont let ClauseElement expressions here trip things up
return ([], [current], [])
else:
if original is not None:
deleted = [original]
else:
deleted = []
return ([current], [], deleted)
class PendingCollection(object):
"""stores items appended and removed from a collection that has not been loaded yet.
When the collection is loaded, the changes present in PendingCollection are applied
to produce the final result.
"""
def __init__(self):
self.deleted_items = util.IdentitySet()
self.added_items = util.OrderedIdentitySet()
def append(self, value):
if value in self.deleted_items:
self.deleted_items.remove(value)
self.added_items.add(value)
def remove(self, value):
if value in self.added_items:
self.added_items.remove(value)
self.deleted_items.add(value)
def _managed_attributes(class_):
"""return all InstrumentedAttributes associated with the given class_ and its superclasses."""
return chain(*[cl._class_state.attrs.values() for cl in class_.__mro__[:-1] if hasattr(cl, '_class_state')])
def get_history(state, key, **kwargs):
return getattr(state.class_, key).impl.get_history(state, **kwargs)
def get_as_list(state, key, passive=False):
"""return an InstanceState attribute as a list,
regardless of it being a scalar or collection-based
attribute.
returns None if passive=True and the getter returns
PASSIVE_NORESULT.
"""
attr = getattr(state.class_, key).impl
x = attr.get(state, passive=passive)
if x is PASSIVE_NORESULT:
return None
elif hasattr(attr, 'get_collection'):
return attr.get_collection(state, x)
elif isinstance(x, list):
return x
else:
return [x]
def has_parent(class_, instance, key, optimistic=False):
return getattr(class_, key).impl.hasparent(instance._state, optimistic=optimistic)
def _create_prop(class_, key, uselist, callable_, typecallable, useobject, mutable_scalars, **kwargs):
if kwargs.pop('dynamic', False):
from sqlalchemy.orm import dynamic
return dynamic.DynamicAttributeImpl(class_, key, typecallable, **kwargs)
elif uselist:
return CollectionAttributeImpl(class_, key, callable_, typecallable, **kwargs)
elif useobject:
return ScalarObjectAttributeImpl(class_, key, callable_,**kwargs)
elif mutable_scalars:
return MutableScalarAttributeImpl(class_, key, callable_, **kwargs)
else:
return ScalarAttributeImpl(class_, key, callable_, **kwargs)
def manage(instance):
"""initialize an InstanceState on the given instance."""
if not hasattr(instance, '_state'):
instance._state = InstanceState(instance)
def new_instance(class_, state=None):
"""create a new instance of class_ without its __init__() method being called.
Also initializes an InstanceState on the new instance.
"""
s = class_.__new__(class_)
if state:
s._state = state
else:
s._state = InstanceState(s)
return s
def _init_class_state(class_):
if not '_class_state' in class_.__dict__:
class_._class_state = ClassState()
def register_class(class_, extra_init=None, on_exception=None, deferred_scalar_loader=None):
# do a sweep first, this also helps some attribute extensions
# (like associationproxy) become aware of themselves at the
# class level
for key in dir(class_):
getattr(class_, key, None)
_init_class_state(class_)
class_._class_state.deferred_scalar_loader=deferred_scalar_loader
oldinit = None
doinit = False
def init(instance, *args, **kwargs):
if not hasattr(instance, '_state'):
instance._state = InstanceState(instance)
if extra_init:
extra_init(class_, oldinit, instance, args, kwargs)
try:
if doinit:
oldinit(instance, *args, **kwargs)
elif args or kwargs:
# simulate error message raised by object(), but don't copy
# the text verbatim
raise TypeError("default constructor for object() takes no parameters")
except:
if on_exception:
on_exception(class_, oldinit, instance, args, kwargs)
raise
# override oldinit
oldinit = class_.__init__
if oldinit is None or not hasattr(oldinit, '_oldinit'):
init._oldinit = oldinit
class_.__init__ = init
# if oldinit is already one of our 'init' methods, replace it
elif hasattr(oldinit, '_oldinit'):
init._oldinit = oldinit._oldinit
class_.__init = init
oldinit = oldinit._oldinit
if oldinit is not None:
doinit = oldinit is not object.__init__
try:
init.__name__ = oldinit.__name__
init.__doc__ = oldinit.__doc__
except:
# cant set __name__ in py 2.3 !
pass
def unregister_class(class_):
if hasattr(class_, '__init__') and hasattr(class_.__init__, '_oldinit'):
if class_.__init__._oldinit is not None:
class_.__init__ = class_.__init__._oldinit
else:
delattr(class_, '__init__')
if '_class_state' in class_.__dict__:
_class_state = class_.__dict__['_class_state']
for key, attr in _class_state.attrs.iteritems():
if key in class_.__dict__:
delattr(class_, attr.impl.key)
delattr(class_, '_class_state')
def register_attribute(class_, key, uselist, useobject, callable_=None, proxy_property=None, mutable_scalars=False, **kwargs):
_init_class_state(class_)
typecallable = kwargs.pop('typecallable', None)
if isinstance(typecallable, InstrumentedAttribute):
typecallable = None
comparator = kwargs.pop('comparator', None)
if key in class_.__dict__ and isinstance(class_.__dict__[key], InstrumentedAttribute):
# this currently only occurs if two primary mappers are made for the same class.
# TODO: possibly have InstrumentedAttribute check "entity_name" when searching for impl.
# raise an error if two attrs attached simultaneously otherwise
return
if proxy_property:
inst = ProxiedAttribute(key, proxy_property, comparator=comparator)
else:
inst = InstrumentedAttribute(_create_prop(class_, key, uselist, callable_, useobject=useobject,
typecallable=typecallable, mutable_scalars=mutable_scalars, **kwargs), comparator=comparator)
setattr(class_, key, inst)
class_._class_state.attrs[key] = inst
def unregister_attribute(class_, key):
class_state = class_._class_state
if key in class_state.attrs:
del class_._class_state.attrs[key]
delattr(class_, key)
def init_collection(instance, key):
"""Initialize a collection attribute and return the collection adapter."""
attr = getattr(instance.__class__, key).impl
state = instance._state
user_data = attr.initialize(state)
return attr.get_collection(state, user_data)
| {
"repo_name": "santisiri/popego",
"path": "envs/ALPHA-POPEGO/lib/python2.5/site-packages/SQLAlchemy-0.4.3-py2.5.egg/sqlalchemy/orm/attributes.py",
"copies": "1",
"size": "43456",
"license": "bsd-3-clause",
"hash": -1148292408318795000,
"line_mean": 34.5613747954,
"line_max": 244,
"alpha_frac": 0.613701215,
"autogenerated": false,
"ratio": 4.284334023464458,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5398035238464458,
"avg_score": null,
"num_lines": null
} |
import weakref, threading
import UserDict
from sqlalchemy import util
from sqlalchemy.orm import interfaces, collections
from sqlalchemy.orm.mapper import class_mapper
from sqlalchemy import exceptions
PASSIVE_NORESULT = object()
ATTR_WAS_SET = object()
NO_VALUE = object()
class InstrumentedAttribute(interfaces.PropComparator):
"""public-facing instrumented attribute."""
def __init__(self, impl, comparator=None):
"""Construct an InstrumentedAttribute.
comparator
a sql.Comparator to which class-level compare/math events will be sent
"""
self.impl = impl
self.comparator = comparator
def __set__(self, obj, value):
self.impl.set(obj._state, value, None)
def __delete__(self, obj):
self.impl.delete(obj._state)
def __get__(self, obj, owner):
if obj is None:
return self
return self.impl.get(obj._state)
def get_history(self, obj, **kwargs):
return self.impl.get_history(obj._state, **kwargs)
def clause_element(self):
return self.comparator.clause_element()
def expression_element(self):
return self.comparator.expression_element()
def operate(self, op, *other, **kwargs):
return op(self.comparator, *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
return op(other, self.comparator, **kwargs)
def hasparent(self, instance, optimistic=False):
return self.impl.hasparent(instance._state, optimistic=optimistic)
property = property(lambda s: class_mapper(s.impl.class_).get_property(s.impl.key),
doc="the MapperProperty object associated with this attribute")
class AttributeImpl(object):
"""internal implementation for instrumented attributes."""
def __init__(self, class_, manager, key, callable_, trackparent=False, extension=None, compare_function=None, mutable_scalars=False, **kwargs):
"""Construct an AttributeImpl.
class_
the class to be instrumented.
manager
AttributeManager managing this class
key
string name of the attribute
callable_
optional function which generates a callable based on a parent
instance, which produces the "default" values for a scalar or
collection attribute when it's first accessed, if not present already.
trackparent
if True, attempt to track if an instance has a parent attached to it
via this attribute
extension
an AttributeExtension object which will receive
set/delete/append/remove/etc. events
compare_function
a function that compares two values which are normally assignable to this
attribute
mutable_scalars
if True, the values which are normally assignable to this attribute can mutate,
and need to be compared against a copy of their original contents in order to
detect changes on the parent instance
"""
self.class_ = class_
self.manager = manager
self.key = key
self.callable_ = callable_
self.trackparent = trackparent
self.mutable_scalars = mutable_scalars
if mutable_scalars:
class_._sa_has_mutable_scalars = True
self.copy = None
if compare_function is None:
self.is_equal = lambda x,y: x == y
else:
self.is_equal = compare_function
self.extensions = util.to_list(extension or [])
def commit_to_state(self, state, value=NO_VALUE):
"""commit the object's current state to its 'committed' state."""
if value is NO_VALUE:
if self.key in state.dict:
value = state.dict[self.key]
if value is not NO_VALUE:
state.committed_state[self.key] = self.copy(value)
def hasparent(self, state, optimistic=False):
"""Return the boolean value of a `hasparent` flag attached to the given item.
The `optimistic` flag determines what the default return value
should be if no `hasparent` flag can be located.
As this function is used to determine if an instance is an
*orphan*, instances that were loaded from storage should be
assumed to not be orphans, until a True/False value for this
flag is set.
An instance attribute that is loaded by a callable function
will also not have a `hasparent` flag.
"""
return state.parents.get(id(self), optimistic)
def sethasparent(self, state, value):
"""Set a boolean flag on the given item corresponding to
whether or not it is attached to a parent object via the
attribute represented by this ``InstrumentedAttribute``.
"""
state.parents[id(self)] = value
def get_history(self, state, passive=False):
current = self.get(state, passive=passive)
if current is PASSIVE_NORESULT:
return None
return AttributeHistory(self, state, current, passive=passive)
def set_callable(self, state, callable_, clear=False):
"""Set a callable function for this attribute on the given object.
This callable will be executed when the attribute is next
accessed, and is assumed to construct part of the instances
previously stored state. When its value or values are loaded,
they will be established as part of the instance's *committed
state*. While *trackparent* information will be assembled for
these instances, attribute-level event handlers will not be
fired.
The callable overrides the class level callable set in the
``InstrumentedAttribute` constructor.
"""
if clear:
self.clear(state)
if callable_ is None:
self.initialize(state)
else:
state.callables[self] = callable_
def _get_callable(self, state):
if self in state.callables:
return state.callables[self]
elif self.callable_ is not None:
return self.callable_(state.obj())
else:
return None
def reset(self, state):
"""Remove any per-instance callable functions corresponding to
this ``InstrumentedAttribute``'s attribute from the given
object, and remove this ``InstrumentedAttribute``'s attribute
from the given object's dictionary.
"""
try:
del state.callables[self]
except KeyError:
pass
self.clear(state)
def clear(self, state):
"""Remove this ``InstrumentedAttribute``'s attribute from the given object's dictionary.
Subsequent calls to ``getattr(obj, key)`` will raise an
``AttributeError`` by default.
"""
try:
del state.dict[self.key]
except KeyError:
pass
def check_mutable_modified(self, state):
return False
def initialize(self, state):
"""Initialize this attribute on the given object instance with an empty value."""
state.dict[self.key] = None
return None
def get(self, state, passive=False):
"""Retrieve a value from the given object.
If a callable is assembled on this object's attribute, and
passive is False, the callable will be executed and the
resulting value will be set as the new value for this attribute.
"""
try:
return state.dict[self.key]
except KeyError:
# if an instance-wide "trigger" was set, call that
# and start again
if state.trigger:
state.call_trigger()
return self.get(state, passive=passive)
callable_ = self._get_callable(state)
if callable_ is not None:
if passive:
return PASSIVE_NORESULT
value = callable_()
if value is not ATTR_WAS_SET:
return self.set_committed_value(state, value)
else:
return state.dict[self.key]
else:
# Return a new, empty value
return self.initialize(state)
def append(self, state, value, initiator):
self.set(state, value, initiator)
def remove(self, state, value, initiator):
self.set(state, None, initiator)
def set(self, state, value, initiator):
raise NotImplementedError()
def set_committed_value(self, state, value):
"""set an attribute value on the given instance and 'commit' it.
this indicates that the given value is the "persisted" value,
and history will be logged only if a newly set value is not
equal to this value.
this is typically used by deferred/lazy attribute loaders
to set object attributes after the initial load.
"""
if state.committed_state is not None:
self.commit_to_state(state, value)
# remove per-instance callable, if any
state.callables.pop(self, None)
state.dict[self.key] = value
return value
def set_raw_value(self, state, value):
state.dict[self.key] = value
return value
def fire_append_event(self, state, value, initiator):
state.modified = True
if self.trackparent and value is not None:
self.sethasparent(value._state, True)
obj = state.obj()
for ext in self.extensions:
ext.append(obj, value, initiator or self)
def fire_remove_event(self, state, value, initiator):
state.modified = True
if self.trackparent and value is not None:
self.sethasparent(value._state, False)
obj = state.obj()
for ext in self.extensions:
ext.remove(obj, value, initiator or self)
def fire_replace_event(self, state, value, previous, initiator):
state.modified = True
if self.trackparent:
if value is not None:
self.sethasparent(value._state, True)
if previous is not None:
self.sethasparent(previous._state, False)
obj = state.obj()
for ext in self.extensions:
ext.set(obj, value, previous, initiator or self)
class ScalarAttributeImpl(AttributeImpl):
"""represents a scalar-holding InstrumentedAttribute."""
def __init__(self, class_, manager, key, callable_, trackparent=False, extension=None, copy_function=None, compare_function=None, mutable_scalars=False, **kwargs):
super(ScalarAttributeImpl, self).__init__(class_, manager, key,
callable_, trackparent=trackparent, extension=extension,
compare_function=compare_function, mutable_scalars=mutable_scalars, **kwargs)
if copy_function is None:
copy_function = self.__copy
self.copy = copy_function
def __copy(self, item):
# scalar values are assumed to be immutable unless a copy function
# is passed
return item
def delete(self, state):
old = self.get(state)
del state.dict[self.key]
self.fire_remove_event(state, old, self)
def check_mutable_modified(self, state):
if self.mutable_scalars:
h = self.get_history(state, passive=True)
if h is not None and h.is_modified():
state.modified = True
return True
else:
return False
else:
return False
def set(self, state, value, initiator):
"""Set a value on the given object.
`initiator` is the ``InstrumentedAttribute`` that initiated the
``set()` operation and is used to control the depth of a circular
setter operation.
"""
if initiator is self:
return
# if an instance-wide "trigger" was set, call that
if state.trigger:
state.call_trigger()
old = self.get(state)
state.dict[self.key] = value
self.fire_replace_event(state, value, old, initiator)
type = property(lambda self: self.property.columns[0].type)
class CollectionAttributeImpl(AttributeImpl):
"""A collection-holding attribute that instruments changes in membership.
InstrumentedCollectionAttribute holds an arbitrary, user-specified
container object (defaulting to a list) and brokers access to the
CollectionAdapter, a "view" onto that object that presents consistent
bag semantics to the orm layer independent of the user data implementation.
"""
def __init__(self, class_, manager, key, callable_, typecallable=None, trackparent=False, extension=None, copy_function=None, compare_function=None, **kwargs):
super(CollectionAttributeImpl, self).__init__(class_, manager,
key, callable_, trackparent=trackparent, extension=extension,
compare_function=compare_function, **kwargs)
if copy_function is None:
copy_function = self.__copy
self.copy = copy_function
if typecallable is None:
typecallable = list
self.collection_factory = \
collections._prepare_instrumentation(typecallable)
self.collection_interface = \
util.duck_type_collection(self.collection_factory())
def __copy(self, item):
return [y for y in list(collections.collection_adapter(item))]
def delete(self, state):
if self.key not in state.dict:
return
state.modified = True
collection = self.get_collection(state)
collection.clear_with_event()
del state.dict[self.key]
def initialize(self, state):
"""Initialize this attribute on the given object instance with an empty collection."""
_, user_data = self._build_collection(state)
state.dict[self.key] = user_data
return user_data
def append(self, state, value, initiator):
if initiator is self:
return
collection = self.get_collection(state)
collection.append_with_event(value, initiator)
def remove(self, state, value, initiator):
if initiator is self:
return
collection = self.get_collection(state)
collection.remove_with_event(value, initiator)
def set(self, state, value, initiator):
"""Set a value on the given object.
`initiator` is the ``InstrumentedAttribute`` that initiated the
``set()` operation and is used to control the depth of a circular
setter operation.
"""
if initiator is self:
return
setting_type = util.duck_type_collection(value)
if value is None or setting_type != self.collection_interface:
raise exceptions.ArgumentError(
"Incompatible collection type on assignment: %s is not %s-like" %
(type(value).__name__, self.collection_interface.__name__))
if hasattr(value, '_sa_adapter'):
value = list(getattr(value, '_sa_adapter'))
elif setting_type == dict:
value = value.values()
# if an instance-wide "trigger" was set, call that
if state.trigger:
state.call_trigger()
old = self.get(state)
old_collection = self.get_collection(state, old)
new_collection, user_data = self._build_collection(state)
self._load_collection(state, value or [], emit_events=True,
collection=new_collection)
state.dict[self.key] = user_data
state.modified = True
# mark all the old elements as detached from the parent
if old_collection:
old_collection.clear_with_event()
old_collection.unlink(old)
def set_committed_value(self, state, value):
"""Set an attribute value on the given instance and 'commit' it."""
collection, user_data = self._build_collection(state)
self._load_collection(state, value or [], emit_events=False,
collection=collection)
value = user_data
if state.committed_state is not None:
self.commit_to_state(state, value)
# remove per-instance callable, if any
state.callables.pop(self, None)
state.dict[self.key] = value
return value
def _build_collection(self, state):
user_data = self.collection_factory()
collection = collections.CollectionAdapter(self, state, user_data)
return collection, user_data
def _load_collection(self, state, values, emit_events=True, collection=None):
collection = collection or self.get_collection(state)
if values is None:
return
elif emit_events:
for item in values:
collection.append_with_event(item)
else:
for item in values:
collection.append_without_event(item)
def get_collection(self, state, user_data=None):
if user_data is None:
user_data = self.get(state)
try:
return getattr(user_data, '_sa_adapter')
except AttributeError:
collections.CollectionAdapter(self, state, user_data)
return getattr(user_data, '_sa_adapter')
class GenericBackrefExtension(interfaces.AttributeExtension):
"""An extension which synchronizes a two-way relationship.
A typical two-way relationship is a parent object containing a
list of child objects, where each child object references the
parent. The other are two objects which contain scalar references
to each other.
"""
def __init__(self, key):
self.key = key
def set(self, obj, child, oldchild, initiator):
if oldchild is child:
return
if oldchild is not None:
# With lazy=None, there's no guarantee that the full collection is
# present when updating via a backref.
impl = getattr(oldchild.__class__, self.key).impl
try:
impl.remove(oldchild._state, obj, initiator)
except (ValueError, KeyError, IndexError):
pass
if child is not None:
getattr(child.__class__, self.key).impl.append(child._state, obj, initiator)
def append(self, obj, child, initiator):
getattr(child.__class__, self.key).impl.append(child._state, obj, initiator)
def remove(self, obj, child, initiator):
getattr(child.__class__, self.key).impl.remove(child._state, obj, initiator)
class InstanceState(object):
"""tracks state information at the instance level."""
__slots__ = 'class_', 'obj', 'dict', 'committed_state', 'modified', 'trigger', 'callables', 'parents', 'instance_dict', '_strong_obj'
def __init__(self, obj):
self.class_ = obj.__class__
self.obj = weakref.ref(obj, self.__cleanup)
self.dict = obj.__dict__
self.committed_state = None
self.modified = False
self.trigger = None
self.callables = {}
self.parents = {}
self.instance_dict = None
def __cleanup(self, ref):
if self.instance_dict is None or self.instance_dict() is None:
return
instance_dict = self.instance_dict()
# the mutexing here is based on the assumption that gc.collect()
# may be firing off cleanup handlers in a different thread than that
# which is normally operating upon the instance dict.
instance_dict._mutex.acquire()
try:
# if instance_dict de-refed us, or it called our
# _resurrect, return
if self.instance_dict is None or self.instance_dict() is None or self.obj() is not None:
return
self.__resurrect(instance_dict)
finally:
instance_dict._mutex.release()
def _check_resurrect(self, instance_dict):
instance_dict._mutex.acquire()
try:
return self.obj() or self.__resurrect(instance_dict)
finally:
instance_dict._mutex.release()
def __resurrect(self, instance_dict):
if self.modified or self.class_._sa_attribute_manager._is_modified(self):
# store strong ref'ed version of the object; will revert
# to weakref when changes are persisted
obj = self.class_._sa_attribute_manager.new_instance(self.class_, state=self)
self.obj = weakref.ref(obj, self.__cleanup)
self._strong_obj = obj
obj.__dict__.update(self.dict)
self.dict = obj.__dict__
return obj
else:
del instance_dict[self.dict['_instance_key']]
return None
def __getstate__(self):
return {'committed_state':self.committed_state, 'parents':self.parents, 'modified':self.modified, 'instance':self.obj()}
def __setstate__(self, state):
self.committed_state = state['committed_state']
self.parents = state['parents']
self.modified = state['modified']
self.obj = weakref.ref(state['instance'])
self.class_ = self.obj().__class__
self.dict = self.obj().__dict__
self.callables = {}
self.trigger = None
def call_trigger(self):
trig = self.trigger
self.trigger = None
trig()
def commit(self, manager, obj):
self.committed_state = {}
self.modified = False
for attr in manager.managed_attributes(obj.__class__):
attr.impl.commit_to_state(self)
# remove strong ref
self._strong_obj = None
def rollback(self, manager, obj):
if not self.committed_state:
manager._clear(obj)
else:
for attr in manager.managed_attributes(obj.__class__):
if attr.impl.key in self.committed_state:
if not hasattr(attr.impl, 'get_collection'):
obj.__dict__[attr.impl.key] = self.committed_state[attr.impl.key]
else:
collection = attr.impl.get_collection(self)
collection.clear_without_event()
for item in self.committed_state[attr.impl.key]:
collection.append_without_event(item)
else:
if attr.impl.key in self.dict:
del self.dict[attr.impl.key]
class InstanceDict(UserDict.UserDict):
"""similar to WeakValueDictionary, but wired towards 'state' objects."""
def __init__(self, *args, **kw):
self._wr = weakref.ref(self)
# RLock because the mutex is used by a cleanup
# handler, which can be called at any time (including within an already mutexed block)
self._mutex = threading.RLock()
UserDict.UserDict.__init__(self, *args, **kw)
def __getitem__(self, key):
state = self.data[key]
o = state.obj() or state._check_resurrect(self)
if o is None:
raise KeyError, key
return o
def __contains__(self, key):
try:
state = self.data[key]
o = state.obj() or state._check_resurrect(self)
except KeyError:
return False
return o is not None
def has_key(self, key):
return key in self
def __repr__(self):
return "<InstanceDict at %s>" % id(self)
def __setitem__(self, key, value):
if key in self.data:
self._mutex.acquire()
try:
if key in self.data:
self.data[key].instance_dict = None
finally:
self._mutex.release()
self.data[key] = value._state
value._state.instance_dict = self._wr
def __delitem__(self, key):
state = self.data[key]
state.instance_dict = None
del self.data[key]
def get(self, key, default=None):
try:
state = self.data[key]
except KeyError:
return default
else:
o = state.obj()
if o is None:
# This should only happen
return default
else:
return o
def items(self):
L = []
for key, state in self.data.items():
o = state.obj()
if o is not None:
L.append((key, o))
return L
def iteritems(self):
for state in self.data.itervalues():
value = state.obj()
if value is not None:
yield value._instance_key, value
def iterkeys(self):
return self.data.iterkeys()
def __iter__(self):
return self.data.iterkeys()
def __len__(self):
return len(self.values())
def itervalues(self):
for state in self.data.itervalues():
obj = state.obj()
if obj is not None:
yield obj
def values(self):
L = []
for state in self.data.values():
o = state.obj()
if o is not None:
L.append(o)
return L
def popitem(self):
raise NotImplementedError()
def pop(self, key, *args):
raise NotImplementedError()
def setdefault(self, key, default=None):
raise NotImplementedError()
def update(self, dict=None, **kwargs):
raise NotImplementedError()
def copy(self):
raise NotImplementedError()
class AttributeHistory(object):
"""Calculate the *history* of a particular attribute on a
particular instance.
"""
def __init__(self, attr, state, current, passive=False):
self.attr = attr
# get the "original" value. if a lazy load was fired when we got
# the 'current' value, this "original" was also populated just
# now as well (therefore we have to get it second)
if state.committed_state:
original = state.committed_state.get(attr.key, None)
else:
original = None
if hasattr(attr, 'get_collection'):
self._current = current
s = util.Set(original or [])
self._added_items = []
self._unchanged_items = []
self._deleted_items = []
if current:
collection = attr.get_collection(state, current)
for a in collection:
if a in s:
self._unchanged_items.append(a)
else:
self._added_items.append(a)
for a in s:
if a not in self._unchanged_items:
self._deleted_items.append(a)
else:
self._current = [current]
if attr.is_equal(current, original) is True:
self._unchanged_items = [current]
self._added_items = []
self._deleted_items = []
else:
self._added_items = [current]
if original is not None:
self._deleted_items = [original]
else:
self._deleted_items = []
self._unchanged_items = []
def __iter__(self):
return iter(self._current)
def is_modified(self):
return len(self._deleted_items) > 0 or len(self._added_items) > 0
def added_items(self):
return self._added_items
def unchanged_items(self):
return self._unchanged_items
def deleted_items(self):
return self._deleted_items
class AttributeManager(object):
"""Allow the instrumentation of object attributes."""
def __init__(self):
# will cache attributes, indexed by class objects
self._inherited_attribute_cache = weakref.WeakKeyDictionary()
self._noninherited_attribute_cache = weakref.WeakKeyDictionary()
def clear_attribute_cache(self):
self._attribute_cache.clear()
def rollback(self, *obj):
"""Retrieve the committed history for each object in the given
list, and rolls back the attributes each instance to their
original value.
"""
for o in obj:
o._state.rollback(self, o)
def _clear(self, obj):
for attr in self.managed_attributes(obj.__class__):
try:
del obj.__dict__[attr.impl.key]
except KeyError:
pass
def commit(self, *obj):
"""Establish the "committed state" for each object in the given list."""
for o in obj:
o._state.commit(self, o)
def managed_attributes(self, class_):
"""Return a list of all ``InstrumentedAttribute`` objects
associated with the given class.
"""
try:
# TODO: move this collection onto the class itself?
return self._inherited_attribute_cache[class_]
except KeyError:
if not isinstance(class_, type):
raise TypeError(repr(class_) + " is not a type")
inherited = [v for v in [getattr(class_, key, None) for key in dir(class_)] if isinstance(v, InstrumentedAttribute)]
self._inherited_attribute_cache[class_] = inherited
return inherited
def noninherited_managed_attributes(self, class_):
try:
# TODO: move this collection onto the class itself?
return self._noninherited_attribute_cache[class_]
except KeyError:
if not isinstance(class_, type):
raise TypeError(repr(class_) + " is not a type")
noninherited = [v for v in [getattr(class_, key, None) for key in list(class_.__dict__)] if isinstance(v, InstrumentedAttribute)]
self._noninherited_attribute_cache[class_] = noninherited
return noninherited
def is_modified(self, obj):
return self._is_modified(obj._state)
def _is_modified(self, state):
if state.modified:
return True
elif getattr(state.class_, '_sa_has_mutable_scalars', False):
for attr in self.managed_attributes(state.class_):
if getattr(attr.impl, 'mutable_scalars', False) and attr.impl.check_mutable_modified(state):
return True
else:
return False
else:
return False
def get_history(self, obj, key, **kwargs):
"""Return a new ``AttributeHistory`` object for the given
attribute on the given object.
"""
return getattr(obj.__class__, key).impl.get_history(obj._state, **kwargs)
def get_as_list(self, obj, key, passive=False):
"""Return an attribute of the given name from the given object.
If the attribute is a scalar, return it as a single-item list,
otherwise return a collection based attribute.
If the attribute's value is to be produced by an unexecuted
callable, the callable will only be executed if the given
`passive` flag is False.
"""
attr = getattr(obj.__class__, key).impl
state = obj._state
x = attr.get(state, passive=passive)
if x is PASSIVE_NORESULT:
return []
elif hasattr(attr, 'get_collection'):
return list(attr.get_collection(state, x))
elif isinstance(x, list):
return x
else:
return [x]
def trigger_history(self, obj, callable):
"""Clear all managed object attributes and places the given
`callable` as an attribute-wide *trigger*, which will execute
upon the next attribute access, after which the trigger is
removed.
"""
s = obj._state
self._clear(obj)
s.committed_state = None
s.trigger = callable
def untrigger_history(self, obj):
"""Remove a trigger function set by trigger_history.
Does not restore the previous state of the object.
"""
obj._state.trigger = None
def has_trigger(self, obj):
"""Return True if the given object has a trigger function set
by ``trigger_history()``.
"""
return obj._state.trigger is not None
def reset_instance_attribute(self, obj, key):
"""Remove any per-instance callable functions corresponding to
given attribute `key` from the given object, and remove this
attribute from the given object's dictionary.
"""
attr = getattr(obj.__class__, key)
attr.impl.reset(obj._state)
def is_class_managed(self, class_, key):
"""Return True if the given `key` correponds to an
instrumented property on the given class.
"""
return hasattr(class_, key) and isinstance(getattr(class_, key), InstrumentedAttribute)
def has_parent(self, class_, obj, key, optimistic=False):
return getattr(class_, key).impl.hasparent(obj._state, optimistic=optimistic)
def init_instance_attribute(self, obj, key, callable_=None, clear=False):
"""Initialize an attribute on an instance to either a blank
value, cancelling out any class- or instance-level callables
that were present, or if a `callable` is supplied set the
callable to be invoked when the attribute is next accessed.
"""
getattr(obj.__class__, key).impl.set_callable(obj._state, callable_, clear=clear)
def _create_prop(self, class_, key, uselist, callable_, typecallable, **kwargs):
"""Create a scalar property object, defaulting to
``InstrumentedAttribute``, which will communicate change
events back to this ``AttributeManager``.
"""
if kwargs.pop('dynamic', False):
from sqlalchemy.orm import dynamic
return dynamic.DynamicAttributeImpl(class_, self, key, typecallable, **kwargs)
elif uselist:
return CollectionAttributeImpl(class_, self, key,
callable_,
typecallable,
**kwargs)
else:
return ScalarAttributeImpl(class_, self, key, callable_,
**kwargs)
def manage(self, obj):
if not hasattr(obj, '_state'):
obj._state = InstanceState(obj)
def new_instance(self, class_, state=None):
"""create a new instance of class_ without its __init__() method being called."""
s = class_.__new__(class_)
if state:
s._state = state
else:
s._state = InstanceState(s)
return s
def register_class(self, class_, extra_init=None, on_exception=None):
"""decorate the constructor of the given class to establish attribute
management on new instances."""
# do a sweep first, this also helps some attribute extensions
# (like associationproxy) become aware of themselves at the
# class level
self.unregister_class(class_)
oldinit = None
doinit = False
class_._sa_attribute_manager = self
def init(instance, *args, **kwargs):
instance._state = InstanceState(instance)
if extra_init:
extra_init(class_, oldinit, instance, args, kwargs)
if doinit:
try:
oldinit(instance, *args, **kwargs)
except:
if on_exception:
on_exception(class_, oldinit, instance, args, kwargs)
raise
# override oldinit
oldinit = class_.__init__
if oldinit is None or not hasattr(oldinit, '_oldinit'):
init._oldinit = oldinit
class_.__init__ = init
# if oldinit is already one of our 'init' methods, replace it
elif hasattr(oldinit, '_oldinit'):
init._oldinit = oldinit._oldinit
class_.__init = init
oldinit = oldinit._oldinit
if oldinit is not None:
doinit = oldinit is not object.__init__
try:
init.__name__ = oldinit.__name__
init.__doc__ = oldinit.__doc__
except:
# cant set __name__ in py 2.3 !
pass
def unregister_class(self, class_):
if hasattr(class_, '__init__') and hasattr(class_.__init__, '_oldinit'):
if class_.__init__._oldinit is not None:
class_.__init__ = class_.__init__._oldinit
else:
delattr(class_, '__init__')
for attr in self.noninherited_managed_attributes(class_):
delattr(class_, attr.impl.key)
self._inherited_attribute_cache.pop(class_,None)
self._noninherited_attribute_cache.pop(class_,None)
def register_attribute(self, class_, key, uselist, callable_=None, **kwargs):
"""Register an attribute at the class level to be instrumented
for all instances of the class.
"""
# firt invalidate the cache for the given class
# (will be reconstituted as needed, while getting managed attributes)
self._inherited_attribute_cache.pop(class_, None)
self._noninherited_attribute_cache.pop(class_, None)
typecallable = kwargs.pop('typecallable', None)
if isinstance(typecallable, InstrumentedAttribute):
typecallable = None
comparator = kwargs.pop('comparator', None)
setattr(class_, key, InstrumentedAttribute(self._create_prop(class_, key, uselist, callable_,
typecallable=typecallable, **kwargs), comparator=comparator))
def set_raw_value(self, instance, key, value):
getattr(instance.__class__, key).impl.set_raw_value(instance._state, value)
def set_committed_value(self, instance, key, value):
getattr(instance.__class__, key).impl.set_committed_value(instance._state, value)
def init_collection(self, instance, key):
"""Initialize a collection attribute and return the collection adapter."""
attr = getattr(instance.__class__, key).impl
state = instance._state
user_data = attr.initialize(state)
return attr.get_collection(state, user_data)
| {
"repo_name": "santisiri/popego",
"path": "envs/ALPHA-POPEGO/lib/python2.5/site-packages/SQLAlchemy-0.4.0-py2.5.egg/sqlalchemy/orm/attributes.py",
"copies": "1",
"size": "39285",
"license": "bsd-3-clause",
"hash": 5839364163224691000,
"line_mean": 35.0082493126,
"line_max": 167,
"alpha_frac": 0.5847779051,
"autogenerated": false,
"ratio": 4.517594296228151,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007564350654939339,
"num_lines": 1091
} |
# Attribute --> Stat bonus/penalty mapping methods
def mgt_to_atk(mgt):
if mgt <= 4:
return "1d2"
elif mgt <= 8:
return "1d4"
elif mgt <= 10:
return "1d6"
elif mgt <= 12:
return "1d8"
elif mgt <= 14:
return "1d10"
elif mgt <= 16:
return "1d12"
elif mgt <= 18:
return "1d6+1d8"
elif mgt <= 20:
return "2d8"
elif mgt <= 22:
return "3d6"
elif mgt <= 25:
return "1d20"
elif mgt > 25:
return "2d12"
def mgt_to_carry_strength(mgt):
if mgt < 10:
return 0
else:
diff = mgt - 7
bonus = diff/3
return -bonus
def mgt_con_to_def(MGT, CON):
total = MGT + CON
if total < 10:
return "1d4"
elif total < 20:
return "1d6"
elif total < 30:
return "1d8"
elif total < 40:
return "1d12"
elif total < 50:
return "3d6"
elif total >= 50:
return "2d12"
def dex_to_move(DEX):
return DEX/3
def dex_to_evade(DEX):
diff = DEX - 10
if diff < -5:
return -2
elif diff < 0:
return -1
elif diff < 3:
return 0
else:
return diff/3
def dex_to_hit(DEX):
diff = DEX - 10
if diff < -5:
return -2
elif diff < 0:
return -1
elif diff < 3:
return 0
else:
return diff/3
def dex_to_accuracy(dex):
diff = dex - 10
if diff < -5:
return -2
elif diff < 0:
return -1
elif diff < 3:
return 0
else:
return diff/3
def art_to_evade(art):
return art/7
def art_to_hit(art):
if art <= 5:
return -1
elif art <=10:
return 0
else:
diff = art-6
return diff/4
def art_to_spell_fail(ART):
return 25/ART
def art_to_casting_speed(ART):
return ART/3
def int_to_spell_fail(INT):
return 25/INT
def int_to_craft(INT):
return INT/2
def dex_art_to_craft(DEX,ART):
total = DEX+ART
return total/5
def div_to_magic_attack(DIV):
if DIV <= 4:
return "1d2"
elif DIV <= 8:
return "1d4"
elif DIV <= 10:
return "1d6"
elif DIV <= 12:
return "1d8"
elif DIV <= 14:
return "1d10"
elif DIV <= 16:
return "1d12"
elif DIV <= 18:
return "1d6+1d8"
elif DIV <= 20:
return "2d8"
elif DIV <= 22:
return "3d6"
elif DIV <= 25:
return "1d20"
elif DIV > 25:
return "2d12"
def div_art_to_magic_defense(DIV,ART):
total = DIV+ART
if total < 10:
return "1d4"
elif total < 20:
return "1d6"
elif total < 30:
return "1d8"
elif total < 40:
return "1d12"
elif total < 50:
return "3d6"
elif total >= 50:
return "2d12"
def con_to_maxHP(con):
diff = con - 10
if diff < 0:
return diff
else:
return diff*2
def con_to_resistance(CON):
if CON <= 3:
return -2
elif CON <= 7:
return -1
elif CON <= 10:
return 0
else:
return (CON-8)/3
| {
"repo_name": "mouseroot/Otrera",
"path": "Code/attMappings.py",
"copies": "1",
"size": "2527",
"license": "apache-2.0",
"hash": 5631716055785649000,
"line_mean": 14.0416666667,
"line_max": 50,
"alpha_frac": 0.6086268302,
"autogenerated": false,
"ratio": 2.178448275862069,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.32870751060620695,
"avg_score": null,
"num_lines": null
} |
# Attribute TIMEX2 TIMEX3
#---------------------------------
# ID None tid ('t' + ID)
# value val value
# mod mod mod
# type set type
# (only if type=set)
# freq None freq
# quant None quant
# comment comment comment
# temporal_function None temporalFunction
# role None functionInDocument
# begin_timex None beginPoint (ID of begin_timex)
# end_timex None endPoint (ID of end_timex)
# context None anchorTimeID (ID of context)
class Timex(object):
""" A temporal expression """
def __init__(self, type=None, value=None, id=None):
""" Initialise a timex object with some optional values """
self.type = type
self.value = value
self.id = id
self.mod = None
self.freq = None
self.quant = None
self.comment = None
self.temporal_function = False
self.document_role = None
self.begin_timex = None
self.end_timex = None
self.context = None
self.non_consuming = False
def add_timex_ids(ts):
"""
Goes through all TIMEXs and adds IDs to the timexes, if they don't exist
already. Each ID is an integer, and is guaranteed to be unique in this set
of timexes.
"""
# go through all timexes and collect current IDs
ids = set([t.id for t in ts])
# start an incrementing counter, then skip any IDs that already exist
i = 1
for t in ts:
# Only add IDs to Timexes which currently have none
if t.id is None:
# Find next free ID
while i in ids:
i += 1
t.id = i
ids.add(i) | {
"repo_name": "jo-fu/TimeLineCurator",
"path": "ternip/timex.py",
"copies": "3",
"size": "1837",
"license": "mit",
"hash": -3481613535321094700,
"line_mean": 30.8392857143,
"line_max": 78,
"alpha_frac": 0.5209580838,
"autogenerated": false,
"ratio": 3.984815618221258,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6005773702021258,
"avg_score": null,
"num_lines": null
} |
"""Attribute validatior"""
import flask
import ggrc.models
import ggrc.access_control
from ggrc import db
from ggrc.utils import benchmark
from ggrc.models.reflection import AttributeInfo
class AttributeValidator(object):
"""Adds methods needed for attribute name vaidation"""
# pylint: disable=too-few-public-methods
@classmethod
def _get_reserved_names(cls, definition_type):
"""Get a list of all attribute names in all objects.
On first call this function computes all possible names that can be used by
any model and stores them in a static frozen set. All later calls just get
this set.
Returns:
frozen set containing all reserved attribute names for the current
object.
"""
# pylint: disable=protected-access
# The _inflector is a false positive in our app.
with benchmark("Generate a list of all reserved attribute names"):
if not cls._reserved_names.get(definition_type):
definition_map = {model._inflector.table_singular: model
for model in ggrc.models.all_models.all_models}
definition_map.update({model._inflector.model_singular: model
for model in ggrc.models.all_models.all_models})
definition_model = definition_map.get(definition_type)
if not definition_model:
raise ValueError("Invalid definition type")
aliases = AttributeInfo.gather_aliases(definition_model)
cls._reserved_names[definition_type] = frozenset(
(value["display_name"] if isinstance(
value, dict) else value).lower()
for value in aliases.values() if value
)
return cls._reserved_names[definition_type]
@classmethod
def _get_global_cad_names(cls, definition_type):
"""Get names of global cad for a given object"""
cad = ggrc.models.custom_attribute_definition.CustomAttributeDefinition
definition_types = [definition_type]
if definition_type == "assessment_template":
definition_types.append("assessment")
if not getattr(flask.g, "global_cad_names", set()):
query = db.session.query(cad.title, cad.id).filter(
cad.definition_type.in_(definition_types),
cad.definition_id.is_(None)
)
flask.g.global_cad_names = {name.lower(): id_ for name, id_ in query}
return flask.g.global_cad_names
@classmethod
def _get_custom_roles(cls, definition_type):
"""Get all access control role names for the given object type"""
if not getattr(flask.g, "global_role_names", set()):
role = ggrc.access_control.role
query = db.session.query(
role.AccessControlRole.name,
role.AccessControlRole.id).filter(
role.AccessControlRole.object_type == definition_type
)
flask.g.global_role_names = {name.lower(): id_ for name, id_ in query}
return flask.g.global_role_names
| {
"repo_name": "AleksNeStu/ggrc-core",
"path": "src/ggrc/models/mixins/attributevalidator.py",
"copies": "1",
"size": "2906",
"license": "apache-2.0",
"hash": 2971662961646752000,
"line_mean": 38.2702702703,
"line_max": 79,
"alpha_frac": 0.6754989677,
"autogenerated": false,
"ratio": 3.9972489683631363,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5172747936063136,
"avg_score": null,
"num_lines": null
} |
"""Attribute-value matrix."""
from matcher import Matcher
from pymachine.machine import Machine
from pyparsing import *
import logging
class AVM(object):
TYPE, REQUIRED, DEFAULT, VALUE = xrange(4)
RREQ, ROPT, RNEG = xrange(1, -2, -1)
def __init__(self, name):
self.name = name
self.__data = {} # {key: [type, required, default_value, value]}
self.bool_expr = None
self.bool_str = None
def add_attribute(self, key, datatype, required=ROPT, default_value=None):
"""
Adds a new attribute to the "matrix".
@param required can take three values:
RREQ: required,
ROPT: optional,
RNEG: must not to be filled.
"""
if required not in [AVM.RREQ, AVM.ROPT, AVM.RNEG]:
raise ValueError("required must be one of RREQ, ROPT, RNEG, not " +
repr(required))
if not isinstance(datatype, Matcher):
raise ValueError("datatype must be a Matcher, not " +
type(datatype))
self.__data[key] = [datatype, required, default_value, default_value]
def printname(self):
return self.name
def set_satisfaction(self, bool_str):
self.bool_str = bool_str
boolOperand = Word(alphas + '_') | oneOf("True False")
self.bool_expr = operatorPrecedence( boolOperand,
[
("not", 1, opAssoc.RIGHT, self.notop),
("or", 2, opAssoc.LEFT, self.orop),
("and", 2, opAssoc.LEFT, self.andop),
])
def satisfied(self):
"""Returns @c True, if all required arguments are filled in."""
if self.bool_expr is not None:
return self.bool_expr.parseString(self.bool_str)[0]
else:
for value in self.__data.values():
if ((value[AVM.REQUIRED] == AVM.RREQ and value[AVM.VALUE] is None)
or
(value[AVM.REQUIRED] == AVM.RNEG and value[AVM.VALUE] is not None)):
return False
else:
return True
def get_attribute(self, key):
"""Returns the whole attribute data tuple for @p key."""
return self.__data[key]
def get_field(self, key, field):
"""
Returns the specified field from the data tuple for @p key. The valid
values for @p field are @c TYPE, @c REQUIRED, @c DEFAULT and @c VALUE.
"""
return self.__data[key][field]
def get_dict(self):
"""Returns the attribute-value dictionary in a Python dict."""
ret = dict((k, v[AVM.VALUE]) for k, v in self.__data.iteritems())
ret['__NAME__'] = self.name
return ret
def get_basic_dict(self):
"""
Returns the attribute-value dictionary in a Python dict, all Machine
values replaced by their printnames.
"""
ret = self.get_dict()
for k, v in ret.iteritems():
if isinstance(v, Machine):
ret[k] = unicode(v)
return ret
def clear(self):
keys = self.__data.keys()
for key in keys:
datatype, required, default_value, _ = self.__data[key]
self.__data[key] = [datatype, required, default_value, default_value]
def __getitem__(self, key):
"""Gets the current value of an attribute."""
return self.__data[key][AVM.VALUE]
def __setitem__(self, key, value):
"""Sets the current value of an attribute."""
self.__data[key][AVM.VALUE] = value
def __iter__(self):
"""Iterates through the attribute keys."""
return self.__data.__iter__()
def __unicode__(self):
return u'{' + u', '.join(u'{0}: {1}'.format(key, self[key]) for key in self) + u'}'
# -------------------------- bool functions for satisfied() ----------------
def andop(self, t):
args = t[0][0::2]
for a in args:
if isinstance(a,basestring):
if a in set(['True', 'False']):
v = bool(a)
else:
v = self[a] is not None
else:
v = bool(a)
if not v:
return False
return True
def orop(self, t):
args = t[0][0::2]
for a in args:
if isinstance(a,basestring):
if a in set(['True', 'False']):
v = bool(a)
else:
v = self[a] is not None
else:
v = bool(a)
if v:
return True
return False
def notop(self, t):
arg = t[0][1]
if isinstance(arg,basestring):
if arg in set(['True', 'False']):
v = bool(arg)
else:
v = self[arg] is not None
else:
v = bool(arg)
return not v
| {
"repo_name": "Eszti/pymachine",
"path": "src/pymachine/avm.py",
"copies": "3",
"size": "4928",
"license": "mit",
"hash": 502940020530876500,
"line_mean": 31.8533333333,
"line_max": 91,
"alpha_frac": 0.5073051948,
"autogenerated": false,
"ratio": 4.059308072487644,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6066613267287644,
"avg_score": null,
"num_lines": null
} |
# attribution https://thihara.github.io/Django-Req-Parsing/
import time
from random import randint
import logging
from django.http import HttpResponseBadRequest
from django.utils.deprecation import MiddlewareMixin
from django.http import HttpResponse
from gwells.settings.base import get_env_variable
logger = logging.getLogger(__name__)
class GWellsRequestParsingMiddleware(MiddlewareMixin):
def get(self, request, **kwargs):
pass
def post(self, request, **kwargs):
pass
def put(self, request, **kwargs):
request.PUT = request.POST
def delete(self, request, **kwargs):
pass
def process_request(self, request, **kwargs):
_method = request.POST.get('_method')
if _method:
if _method.upper() == 'GET':
self.get(request)
elif _method.upper() == 'PUT':
self.put(request)
elif _method.upper() == 'POST':
self.post(request)
elif _method.upper() == 'DELETE':
self.delete(request)
else:
message = 'Unsupported _method: ' + _method
return HttpResponse(message, status=500)
| {
"repo_name": "bcgov/gwells",
"path": "app/backend/gwells/middleware.py",
"copies": "1",
"size": "1200",
"license": "apache-2.0",
"hash": 395920648164291260,
"line_mean": 26.2727272727,
"line_max": 59,
"alpha_frac": 0.6091666667,
"autogenerated": false,
"ratio": 4.270462633451957,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5379629300151957,
"avg_score": null,
"num_lines": null
} |
import pdb
class ENTSfeatures():
def __init__(self, subcell_info, odds_dict, proteins, domine_interactions,
protein_domain_dict, protein_gene_dict, entreztoensembl,
domain_cols, svm_subcell_cols, svm_detail_cols):
"""Object to return a feature vector for a given protein pair in Entrez format"""
#store all required data
#for information about how this is generated
#see the "Inspecting ENTS code" notebook in the opencast-bio repository
self.subcell_info = subcell_info
self.odds_dict = odds_dict
self.proteins = proteins
self.domine_interactions = domine_interactions
self.protein_domain_dict = protein_domain_dict
self.protein_gene_dict = protein_gene_dict
self.entreztoensembl = entreztoensembl
self.domain_cols = domain_cols
self.svm_subcell_cols = svm_subcell_cols
self.svm_detail_cols = svm_detail_cols
return None
def getfeaturevector(self, protein1, protein2):
"""prototype function to retreive ENTS feature vectors for a given protein pair"""
keys = self.subcell_info.keys()
# Configure svm columns for organism
gene = keys[0]
delete_svm_pred_cols = [x for x in self.svm_subcell_cols if x not in self.subcell_info[gene]['predictions'].keys()]
delete_svm_detail_cols = [x for x in self.svm_detail_cols if x not in self.subcell_info[gene]['svm_info'].keys()]
for col in delete_svm_pred_cols: svm_subcell_cols.remove(col)
for col in delete_svm_detail_cols: svm_detail_cols.remove(col)
# Make the true interactions part of the file
try:
domains = self.makeDomainString(self.protein_domain_dict[protein1], self.protein_domain_dict[protein2])
except:
key1 = [x for x in self.protein_domain_dict.keys() if protein1.startswith(x.split('.')[0])]
key2 = [x for x in self.protein_domain_dict.keys() if protein2.startswith(x.split('.')[0])]
if len(key1) == 0 or len(key2) == 0:
return None
else:
domains = self.makeDomainString(self.protein_domain_dict[key1[0]], self.protein_domain_dict[key2[0]])
# Get the subcellular localization part of the string
try: subcells = self.makeSubcellularDict(protein1, protein2)
except KeyError:
key1 = [x for x in self.protein_domain_dict.keys() if protein1.startswith(x.split('.')[0])]
key2 = [x for x in self.protein_domain_dict.keys() if protein2.startswith(x.split('.')[0])]
if len(key1) == 0 or len(key2) == 0:
return None
else:
try:
subcells = self.makeSubcellularDict(key1[0],key2[0])
except:
#just in case
return None
return domains + subcells
def makeSubcellularDict(self,protein1, protein2):
gene1 = self.protein_gene_dict[protein1]
gene2 = self.protein_gene_dict[protein2]
svm_line1 = [self.subcell_info[gene1]['predictions'][k] for k in self.svm_subcell_cols]
svm_line2 = [self.subcell_info[gene2]['predictions'][k] for k in self.svm_subcell_cols]
svm_line1 += [self.subcell_info[gene1]['svm_info'][k] for k in self.svm_detail_cols]
svm_line2 += [self.subcell_info[gene2]['svm_info'][k] for k in self.svm_detail_cols]
return svm_line1 + svm_line2
def getTwoListCombos(self, list1, list2):
returnList = []
for i in xrange(len(list1)):
for j in xrange(len(list2)):
if sorted([list1[i],list2[j]]) in returnList: continue
else: returnList.append(sorted([list1[i],list2[j]]))
return returnList
def makeDomainString(self, domains1, domains2):
domain_dict = {}
# Get all potential interactions
if len(domains1) > 0 and len(domains2) > 0:
potential_domain_pairs = self.getTwoListCombos(domains1,domains2)
else: potential_domain_pairs = []
# Get the domine information
domine_pairs = list(set([tuple(sorted(x)) for x in potential_domain_pairs if tuple(sorted(x)) in self.domine_interactions]))
domain_dict['n_domine_pairs'] = len(domine_pairs)
domain_dict['highest_domine_conf'] = '0'
for pair in domine_pairs:
if self.domine_interactions[pair] == 'HC': domain_dict['highest_domine_conf'] = 'HC'
elif self.domine_interactions[pair] == 'MC' and domain_dict['highest_domine_conf'] != 'HC':
domain_dict['highest_domine_conf'] = 'MC'
elif self.domine_interactions[pair] == 'LC' and domain_dict['highest_domine_conf'] not in ['HC','MC']:
domain_dict['highest_domine_conf'] = 'LC'
# Get the odds information
############## TEST ##################
domain_dict['lowest_odds'] = 0.
domain_dict['not_observed'] = 0
domain_dict['not_observed_frac'] = 1.
################################
domain_dict['sum_odds'] = 0.
domain_dict['highest_odds'] = 0.
domain_dict['n_odds_pairs'] = 0
for pair in potential_domain_pairs:
pair = tuple(list(sorted(pair)))
# Check if in the odds dictionary. If so, update domain variables
if pair in self.odds_dict:
domain_dict['sum_odds'] += self.odds_dict[pair]
if self.odds_dict[pair] > domain_dict['highest_odds']:
domain_dict['highest_odds'] = self.odds_dict[pair]
domain_dict['n_odds_pairs'] += 1
################### TEST ################
if self.odds_dict[pair] < domain_dict['lowest_odds']: domain_dict['lowest_odds'] = self.odds_dict[pair]
else:
domain_dict['not_observed'] += 1
if domain_dict['n_odds_pairs'] + domain_dict['not_observed'] > 0:
domain_dict['not_observed_frac'] = float(domain_dict['not_observed']) / (domain_dict['n_odds_pairs'] + domain_dict['not_observed'])
#########################################
return [domain_dict[k] for k in self.domain_cols]
def highestdomineconf1ofk(self,fvector):
"""Replaces string encoding of highest_domine_conf
with 1-of-k encoding"""
if fvector[7] == '0':
fvector = fvector[:7] + [1,0,0,0] + fvector[8:]
elif fvector[7] == 'LC':
fvector = fvector[:7] + [0,1,0,0] + fvector[8:]
elif fvector[7] == 'MC':
fvector = fvector[:7] + [0,0,1,0] + fvector[8:]
elif fvector[7] == 'HC':
fvector = fvector[:7] + [0,0,0,1] + fvector[8:]
return fvector
def __getitem__(self,key):
"""Where key is a protein pair, returns a feature vector"""
#the pair should be a frozenset of Entrez IDs
pair = list(key)
if len(pair) == 1:
pair = pair*2
#convert both to ensembl
for p in self.entreztoensembl[pair[0]]:
if p in self.proteins:
protein1 = p
break
else:
raise KeyError("Unknown protein")
for p in self.entreztoensembl[pair[1]]:
if p in self.proteins:
protein2 = p
break
else:
raise KeyError("Unknown protein")
#retrieve the feature vector
fvector = self.getfeaturevector(protein1,protein2)
if fvector == None:
raise KeyError("No feature vector found for pair {0}".format(pair))
#transform feature vector
# fix string encoding of highest_domine_conf
fvector = self.highestdomineconf1ofk(fvector)
return fvector
| {
"repo_name": "gngdb/opencast-bio",
"path": "ocbio/ents.py",
"copies": "1",
"size": "8267",
"license": "mit",
"hash": 7866148477981778000,
"line_mean": 48.502994012,
"line_max": 143,
"alpha_frac": 0.5676787226,
"autogenerated": false,
"ratio": 3.5803378085751407,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9597995635802357,
"avg_score": 0.010004179074557019,
"num_lines": 167
} |
# attrz, (C) 2015 Harshavardhana.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
with open('README.rst') as file:
long_description = file.read()
version = '0.1.1'
setup(
name='attrz',
version=version,
description='attrz in Python, getfattr/setfattr cross platform implementations',
author='Harshavardhana',
author_email='harsha@harshavardhana.net',
install_requires=['xattr'],
url='https://github.com/harshavardhana/attrz.git',
license='Apache License 2.0',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules',
],
scripts=['getfattr.py', 'setfattr.py'],
long_description=long_description,
package_data={'': ['LICENSE', 'README.rst']},
include_package_data=True,
)
| {
"repo_name": "harshavardhana/attrz",
"path": "setup.py",
"copies": "1",
"size": "1832",
"license": "apache-2.0",
"hash": 5674238181315570000,
"line_mean": 36.387755102,
"line_max": 84,
"alpha_frac": 0.673580786,
"autogenerated": false,
"ratio": 3.991285403050109,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00024009603841536613,
"num_lines": 49
} |
"""A TVTK interactor scene widget. This class only uses TVTK and
Traits. It does not even use any Pyface widgets. This is nice when
you want to create a raw TVTK window but with some nice funcationality
that is similar to the widgets. It is also the base class for the
toolkit specific classes since it implements the core
functionality. See the class docs for more details.
"""
# Author: Prabhu Ramachandran <prabhu@enthought.com>
# Copyright (c) 2007-2013, Enthought, Inc.
# License: BSD Style.
import os.path
from apptools.persistence import state_pickler
from tvtk.api import tvtk
from tvtk import messenger
from tvtk.tvtk_base import vtk_color_trait
from tvtk.common import configure_input
from traits.api import HasPrivateTraits, HasTraits, Any, Int, \
Property, Instance, Event, Range, Bool, Trait, Str
from tvtk.pyface import light_manager
VTK_VER = tvtk.Version().vtk_version
######################################################################
# `TVTKScene` class.
######################################################################
class TVTKScene(HasPrivateTraits):
"""A TVTK interactor scene widget.
This widget uses a RenderWindowInteractor and therefore supports
interaction with VTK widgets. The widget uses TVTK. The widget
also supports the following:
- Save the scene to a bunch of common (and not so common) image
formats.
- save the rendered scene to the clipboard.
- adding/removing lists/tuples of actors
- setting the view to useful predefined views (just like in
MayaVi).
- If one passes `stereo=1` to the constructor, stereo rendering is
enabled. By default this is disabled. Changing the stereo trait
has no effect during runtime.
- One can disable rendering by setting `disable_render` to True.
"""
# The version of this class. Used for persistence.
__version__ = 0
###########################################################################
# Traits.
###########################################################################
# Turn on/off stereo rendering. This is set on initialization and
# has no effect once the widget is realized.
stereo = Bool(False)
# Perform line smoothing for all renderered lines. This produces
# much nicer looking lines but renders slower. This setting works
# only when called before the first render.
line_smoothing = Bool(False)
# Perform point smoothing for all renderered points. This
# produces much nicer looking points but renders slower. This
# setting works only when called before the first render.
point_smoothing = Bool(False)
# Perform polygon smoothing (anti-aliasing) for all rendered
# polygons. This produces much nicer looking points but renders
# slower. This setting works only when called before the first
# render.
polygon_smoothing = Bool(False)
# Enable parallel projection. This trait is synchronized with
# that of the camera.
parallel_projection = Bool(False, desc='if the camera uses parallel projection')
# Disable rendering.
disable_render = Bool(False, desc='if rendering is to be disabled')
# Enable off-screen rendering. This allows a user to render the
# scene to an image without the need to have the window active.
# For example, the application can be minimized and the saved
# scene should be generated correctly. This is handy for batch
# scripts and the like. This works under Win32. Under Mac OS X
# and Linux it requires a recent VTK version (later than Oct 2005
# and ideally later than March 2006) to work correctly.
off_screen_rendering = Bool(False, desc='if off-screen rendering is enabled')
# The background color of the window. This is really a shadow
# trait of the renderer's background. Delegation does not seem to
# work nicely for this.
background = Trait(vtk_color_trait((0.5, 0.5, 0.5)),
desc='the background color of the window')
# The default foreground color of any actors. This basically
# saves the preference and actors will listen to changes --
# the scene itself does not use this.
foreground = Trait(vtk_color_trait((1.0, 1.0, 1.0)),
desc='the default foreground color of actors')
# The magnification to use when generating images from the render
# window.
magnification = Range(1, 2048, 1,
desc='the magnification used when the screen is saved to an image')
# Specifies the number of frames to use for anti-aliasing when
# saving a scene. This basically increases
# `self.render_window.aa_frames` in order to produce anti-aliased
# figures when a scene is saved to an image. It then restores the
# `aa_frames` in order to get interactive rendering rates.
anti_aliasing_frames = Range(0, 20, 8, desc='number of frames to use for anti-aliasing when saving a scene')
# Default JPEG quality.
jpeg_quality = Range(10, 100, 95, desc='the quality of the JPEG image to produce')
# Default JPEG progressive setting.
jpeg_progressive = Bool(True, desc='if the generated JPEG should be progressive')
# The light manager.
light_manager = Instance(light_manager.LightManager, record=True)
# Is the scene busy or not.
busy = Property(Bool, record=False)
########################################
# Events
# Lifecycle events: there are no opening/opened events since the
# control is actually created in __init__.
# The control is going to be closed.
closing = Event(record=False)
# The control has been closed.
closed = Event(record=False)
# Event fired when an actor is added to the scene.
actor_added = Event(record=False)
# Event fired when any actor is removed from the scene.
actor_removed = Event(record=False)
########################################
# Properties.
# The interactor used by the scene.
interactor = Property(Instance(tvtk.GenericRenderWindowInteractor))
# The render_window.
render_window = Property(Instance(tvtk.RenderWindow))
# The renderer.
renderer = Property(Instance(tvtk.Renderer))
# The camera.
camera = Property(Instance(tvtk.Camera))
# The control to mimic the Widget behavior.
control = Any
########################################
# Private traits.
# A recorder for script recording.
recorder = Instance(HasTraits, record=False, transient=True)
# Cached last camera state.
_last_camera_state = Any(transient=True)
_camera_observer_id = Int(transient=True)
_script_id = Str(transient=True)
# The renderer instance.
_renderer = Instance(tvtk.Renderer)
_renwin = Instance(tvtk.RenderWindow)
_interactor = Instance(tvtk.RenderWindowInteractor)
_camera = Instance(tvtk.Camera)
_busy_count = Int(0)
###########################################################################
# 'object' interface.
###########################################################################
def __init__(self, parent=None, **traits):
""" Initializes the object. """
# Base class constructor.
super(TVTKScene, self).__init__(**traits)
# Used to set the view of the scene.
self._def_pos = 1
self.control = self._create_control(parent)
self._renwin.update_traits()
def __get_pure_state__(self):
"""Allows us to pickle the scene."""
# The control attribute is not picklable since it is a VTK
# object so we remove it.
d = self.__dict__.copy()
for x in ['control', '_renwin', '_interactor', '_camera',
'_busy_count', '__sync_trait__', 'recorder',
'_last_camera_state', '_camera_observer_id',
'_script_id', '__traits_listener__']:
d.pop(x, None)
# Additionally pickle these.
d['camera'] = self.camera
return d
def __getstate__(self):
return state_pickler.dumps(self)
def __setstate__(self, str_state):
# This method is unnecessary since this object will almost
# never be pickled by itself and only via an object that
# contains it, therefore __init__ will be called when the
# scene is constructed. However, setstate is defined just for
# completeness.
state_pickler.set_state(self, state_pickler.loads_state(str_state))
###########################################################################
# 'event' interface.
###########################################################################
def _closed_fired(self):
self.light_manager = None
self._interactor = None
###########################################################################
# 'Scene' interface.
###########################################################################
def render(self):
""" Force the scene to be rendered. Nothing is done if the
`disable_render` trait is set to True."""
if not self.disable_render:
self._renwin.render()
def add_actors(self, actors):
""" Adds a single actor or a tuple or list of actors to the
renderer."""
# Reset the zoom if this is the first actor.
reset_zoom = (len(self._renderer.actors) == 0 and len(self._renderer.volumes)==0)
if hasattr(actors, '__iter__'):
for actor in actors:
self._renderer.add_actor(actor)
else:
self._renderer.add_actor(actors)
self.actor_added = actors
if reset_zoom:
self.reset_zoom()
else:
self.render()
def remove_actors(self, actors):
""" Removes a single actor or a tuple or list of actors from
the renderer."""
if hasattr(actors, '__iter__'):
for actor in actors:
self._renderer.remove_actor(actor)
else:
self._renderer.remove_actor(actors)
self.actor_removed = actors
self.render()
# Conevenience methods.
add_actor = add_actors
remove_actor = remove_actors
def add_widgets(self, widgets, enabled=True):
"""Adds a single 3D widget or a sequence of widgets to the renderer.
If `enabled` is True the widget is also enabled once it is added."""
if not hasattr(widgets, '__iter__'):
widgets = [widgets]
iren = self._interactor
for widget in widgets:
widget.interactor = iren
widget.enabled = enabled
self.render()
def remove_widgets(self, widgets):
"""Removes a single 3D widget or a sequence of widgets from the
renderer."""
if not hasattr(widgets, '__iter__'):
widgets = [widgets]
iren = self._interactor
for widget in widgets:
if widget.interactor is not None:
widget.enabled = False
widget.interactor = None
self.render()
def close(self):
"""Close the scene cleanly. This ensures that the scene is
shutdown cleanly. This should be called if you are getting
async errors when closing a scene from a UI. This is based on
the observations of Charl Botha here:
http://public.kitware.com/pipermail/vtkusers/2008-May/095291.html
"""
# Return if we are already closed.
if self._renwin is None:
return
# Fire the "closing" event.
self.closing = True
# Disable any renders through traits listner callbacks.
self.disable_render = True
# Remove sync trait listeners.
self.sync_trait('background', self._renderer, remove=True)
self.sync_trait('parallel_projection', self.camera, remove=True)
self.sync_trait('off_screen_rendering', self._renwin, remove=True)
# Remove all the renderer's props.
self._renderer.remove_all_view_props()
# Set the renderwindow to release all resources and the OpenGL
# context.
self._renwin.finalize()
# Disconnect the interactor from the renderwindow.
self._interactor.render_window = None
# Remove the reference to the render window.
del self._renwin
# Fire the "closed" event.
self.closed = True
def x_plus_view(self):
"""View scene down the +X axis. """
self._update_view(self._def_pos, 0, 0, 0, 0, 1)
self._record_methods('x_plus_view()')
def x_minus_view(self):
"""View scene down the -X axis. """
self._update_view(-self._def_pos, 0, 0, 0, 0, 1)
self._record_methods('x_minus_view()')
def z_plus_view(self):
"""View scene down the +Z axis. """
self._update_view(0, 0, self._def_pos, 0, 1, 0)
self._record_methods('z_plus_view()')
def z_minus_view(self):
"""View scene down the -Z axis. """
self._update_view(0, 0, -self._def_pos, 0, 1, 0)
self._record_methods('z_minus_view()')
def y_plus_view(self):
"""View scene down the +Y axis. """
self._update_view(0, self._def_pos, 0, 1, 0, 0)
self._record_methods('y_plus_view()')
def y_minus_view(self):
"""View scene down the -Y axis. """
self._update_view(0, -self._def_pos, 0, 1, 0, 0)
self._record_methods('y_minus_view()')
def isometric_view(self):
"""Set the view to an iso-metric view. """
self._update_view(self._def_pos, self._def_pos, self._def_pos,
0, 0, 1)
self._record_methods('isometric_view()')
def reset_zoom(self):
"""Reset the camera so everything in the scene fits."""
self._renderer.reset_camera()
self.render()
self._record_methods('reset_zoom()')
def save(self, file_name, size=None, **kw_args):
"""Saves rendered scene to one of several image formats
depending on the specified extension of the filename.
If an additional size (2-tuple) argument is passed the window
is resized to the specified size in order to produce a
suitably sized output image. Please note that when the window
is resized, the window may be obscured by other widgets and
the camera zoom is not reset which is likely to produce an
image that does not reflect what is seen on screen.
Any extra keyword arguments are passed along to the respective
image format's save method.
"""
ext = os.path.splitext(file_name)[1]
meth_map = {'.ps': 'ps', '.bmp': 'bmp', '.tiff': 'tiff',
'.png': 'png', '.jpg': 'jpg', '.jpeg': 'jpg',
'.iv': 'iv', '.wrl': 'vrml', '.vrml':'vrml',
'.oogl': 'oogl', '.rib': 'rib', '.obj': 'wavefront',
'.eps': 'gl2ps', '.pdf':'gl2ps', '.tex': 'gl2ps',
'.x3d': 'x3d', '.pov': 'povray'}
if ext.lower() not in meth_map.keys():
raise ValueError, \
'Unable to find suitable image type for given file extension.'
meth = getattr(self, 'save_' + meth_map[ext])
if size is not None:
orig_size = self.get_size()
self.set_size(size)
meth(file_name, **kw_args)
self.set_size(orig_size)
self._record_methods('save(%r, %r)'%(file_name, size))
else:
meth(file_name, **kw_args)
self._record_methods('save(%r)'%(file_name))
def save_ps(self, file_name):
"""Saves the rendered scene to a rasterized PostScript image.
For vector graphics use the save_gl2ps method."""
if len(file_name) != 0:
w2if = tvtk.WindowToImageFilter(read_front_buffer=
not self.off_screen_rendering)
w2if.magnification = self.magnification
self._lift()
w2if.input = self._renwin
ex = tvtk.PostScriptWriter()
ex.file_name = file_name
configure_input(ex, w2if)
self._exporter_write(ex)
def save_bmp(self, file_name):
"""Save to a BMP image file."""
if len(file_name) != 0:
w2if = tvtk.WindowToImageFilter(read_front_buffer=
not self.off_screen_rendering)
w2if.magnification = self.magnification
self._lift()
w2if.input = self._renwin
ex = tvtk.BMPWriter()
ex.file_name = file_name
configure_input(ex, w2if)
self._exporter_write(ex)
def save_tiff(self, file_name):
"""Save to a TIFF image file."""
if len(file_name) != 0:
w2if = tvtk.WindowToImageFilter(read_front_buffer=
not self.off_screen_rendering)
w2if.magnification = self.magnification
self._lift()
w2if.input = self._renwin
ex = tvtk.TIFFWriter()
ex.file_name = file_name
configure_input(ex, w2if)
self._exporter_write(ex)
def save_png(self, file_name):
"""Save to a PNG image file."""
if len(file_name) != 0:
w2if = tvtk.WindowToImageFilter(read_front_buffer=
not self.off_screen_rendering)
w2if.magnification = self.magnification
self._lift()
w2if.input = self._renwin
ex = tvtk.PNGWriter()
ex.file_name = file_name
configure_input(ex, w2if)
self._exporter_write(ex)
def save_jpg(self, file_name, quality=None, progressive=None):
"""Arguments: file_name if passed will be used, quality is the
quality of the JPEG(10-100) are valid, the progressive
arguments toggles progressive jpegs."""
if len(file_name) != 0:
if not quality and not progressive:
quality, progressive = self.jpeg_quality, self.jpeg_progressive
w2if = tvtk.WindowToImageFilter(read_front_buffer=
not self.off_screen_rendering)
w2if.magnification = self.magnification
self._lift()
w2if.input = self._renwin
ex = tvtk.JPEGWriter()
ex.quality = quality
ex.progressive = progressive
ex.file_name = file_name
configure_input(ex, w2if)
self._exporter_write(ex)
def save_iv(self, file_name):
"""Save to an OpenInventor file."""
if len(file_name) != 0:
ex = tvtk.IVExporter()
self._lift()
ex.input = self._renwin
ex.file_name = file_name
self._exporter_write(ex)
def save_vrml(self, file_name):
"""Save to a VRML file."""
if len(file_name) != 0:
ex = tvtk.VRMLExporter()
self._lift()
ex.input = self._renwin
ex.file_name = file_name
self._exporter_write(ex)
def save_oogl(self, file_name):
"""Saves the scene to a Geomview OOGL file. Requires VTK 4 to
work."""
if len(file_name) != 0:
ex = tvtk.OOGLExporter()
self._lift()
ex.input = self._renwin
ex.file_name = file_name
self._exporter_write(ex)
def save_rib(self, file_name, bg=0, resolution=None, resfactor=1.0):
"""Save scene to a RenderMan RIB file.
Keyword Arguments:
file_name -- File name to save to.
bg -- Optional background option. If 0 then no background is
saved. If non-None then a background is saved. If left alone
(defaults to None) it will result in a pop-up window asking
for yes/no.
resolution -- Specify the resolution of the generated image in
the form of a tuple (nx, ny).
resfactor -- The resolution factor which scales the resolution.
"""
if resolution is None:
# get present window size
Nx, Ny = self.render_window.size
else:
try:
Nx, Ny = resolution
except TypeError:
raise TypeError, \
"Resolution (%s) should be a sequence with two elements"%resolution
if len(file_name) == 0:
return
f_pref = os.path.splitext(file_name)[0]
ex = tvtk.RIBExporter()
ex.size = int(resfactor*Nx), int(resfactor*Ny)
ex.file_prefix = f_pref
ex.texture_prefix = f_pref + "_tex"
self._lift()
ex.render_window = self._renwin
ex.background = bg
if VTK_VER[:3] in ['4.2', '4.4']:
# The vtkRIBExporter is broken in respect to VTK light
# types. Therefore we need to convert all lights into
# scene lights before the save and later convert them
# back.
########################################
# Internal functions
def x3to4(x):
# convert 3-vector to 4-vector (w=1 -> point in space)
return (x[0], x[1], x[2], 1.0 )
def x4to3(x):
# convert 4-vector to 3-vector
return (x[0], x[1], x[2])
def cameralight_transform(light, xform, light_type):
# transform light by 4x4 matrix xform
origin = x3to4(light.position)
focus = x3to4(light.focal_point)
neworigin = xform.multiply_point(origin)
newfocus = xform.multiply_point(focus)
light.position = x4to3(neworigin)
light.focal_point = x4to3(newfocus)
light.light_type = light_type
########################################
save_lights_type=[]
for light in self.light_manager.lights:
save_lights_type.append(light.source.light_type)
# Convert lights to scene lights.
cam = self.camera
xform = tvtk.Matrix4x4()
xform.deep_copy(cam.camera_light_transform_matrix)
for light in self.light_manager.lights:
cameralight_transform(light.source, xform, "scene_light")
# Write the RIB file.
self._exporter_write(ex)
# Now re-convert lights to camera lights.
xform.invert()
for i,light in enumerate(self.light_manager.lights):
cameralight_transform(light.source, xform, save_lights_type[i])
# Change the camera position. Otherwise VTK would render
# one broken frame after the export.
cam.roll(0.5)
cam.roll(-0.5)
else:
self._exporter_write(ex)
def save_wavefront(self, file_name):
"""Save scene to a Wavefront OBJ file. Two files are
generated. One with a .obj extension and another with a .mtl
extension which contains the material proerties.
Keyword Arguments:
file_name -- File name to save to
"""
if len(file_name) != 0:
ex = tvtk.OBJExporter()
self._lift()
ex.input = self._renwin
f_pref = os.path.splitext(file_name)[0]
ex.file_prefix = f_pref
self._exporter_write(ex)
def save_gl2ps(self, file_name, exp=None):
"""Save scene to a vector PostScript/EPS/PDF/TeX file using
GL2PS. If you choose to use a TeX file then note that only
the text output is saved to the file. You will need to save
the graphics separately.
Keyword Arguments:
file_name -- File name to save to.
exp -- Optionally configured vtkGL2PSExporter object.
Defaults to None and this will use the default settings with
the output file type chosen based on the extention of the file
name.
"""
# Make sure the exporter is available.
if not hasattr(tvtk, 'GL2PSExporter'):
msg = "Saving as a vector PS/EPS/PDF/TeX file using GL2PS is "\
"either not supported by your version of VTK or "\
"you have not configured VTK to work with GL2PS -- read "\
"the documentation for the vtkGL2PSExporter class."
print msg
return
if len(file_name) != 0:
f_prefix, f_ext = os.path.splitext(file_name)
ex = None
if exp:
ex = exp
if not isinstance(exp, tvtk.GL2PSExporter):
msg = "Need a vtkGL2PSExporter you passed a "\
"%s"%exp.__class__.__name__
raise TypeError, msg
ex.file_prefix = f_prefix
else:
ex = tvtk.GL2PSExporter()
# defaults
ex.file_prefix = f_prefix
if f_ext == ".ps":
ex.file_format = 'ps'
elif f_ext == ".tex":
ex.file_format = 'tex'
elif f_ext == ".pdf":
ex.file_format = 'pdf'
else:
ex.file_format = 'eps'
ex.sort = 'bsp'
ex.compress = 1
ex.edit_traits(kind='livemodal')
self._lift()
ex.render_window = self._renwin
if ex.write3d_props_as_raster_image:
self._exporter_write(ex)
else:
ex.write()
def save_x3d(self, file_name):
"""Save scene to an X3D file (http://www.web3d.org/x3d/).
Keyword Arguments:
file_name -- File name to save to.
"""
# Make sure the exporter is available.
if not hasattr(tvtk, 'X3DExporter'):
msg = "Saving as a X3D file does not appear to be "\
"supported by your version of VTK."
print msg
return
if len(file_name) != 0:
ex = tvtk.X3DExporter()
ex.input = self._renwin
ex.file_name = file_name
ex.update()
ex.write()
def save_povray(self, file_name):
"""Save scene to a POVRAY (Persistance of Vision Raytracer),
file (http://www.povray.org).
Keyword Arguments:
file_name -- File name to save to.
"""
# Make sure the exporter is available.
if not hasattr(tvtk, 'POVExporter'):
msg = "Saving as a POVRAY file does not appear to be "\
"supported by your version of VTK."
print msg
return
if len(file_name) != 0:
ex = tvtk.POVExporter()
ex.input = self._renwin
if hasattr(ex, 'file_name'):
ex.file_name = file_name
else:
ex.file_prefix = os.path.splitext(file_name)[0]
ex.update()
ex.write()
def get_size(self):
"""Return size of the render window."""
return self._interactor.size
def set_size(self, size):
"""Set the size of the window."""
self._interactor.size = size
self._renwin.size = size
###########################################################################
# Properties.
###########################################################################
def _get_interactor(self):
"""Returns the vtkRenderWindowInteractor of the parent class"""
return self._interactor
def _get_render_window(self):
"""Returns the scene's render window."""
return self._renwin
def _get_renderer(self):
"""Returns the scene's renderer."""
return self._renderer
def _get_camera(self):
""" Returns the active camera. """
return self._renderer.active_camera
def _get_busy(self):
return self._busy_count > 0
def _set_busy(self, value):
"""The `busy` trait is either `True` or `False`. However,
this could be problematic since we could have two methods
`foo` and `bar that both set `scene.busy = True`. As soon as
`bar` is done it sets `busy` back to `False`. This is wrong
since the UI is still busy as `foo` is not done yet. We
therefore store the number of busy calls and either increment
it or decrement it and change the state back to `False` only
when the count is zero.
"""
bc = self._busy_count
if value:
bc += 1
else:
bc -= 1
bc = max(0, bc)
self._busy_count = bc
if bc == 1:
self.trait_property_changed('busy', False, True)
if bc == 0:
self.trait_property_changed('busy', True, False)
###########################################################################
# Non-public interface.
###########################################################################
def _create_control(self, parent):
""" Create the toolkit-specific control that represents the widget. """
# Create the renderwindow.
renwin = self._renwin = tvtk.RenderWindow()
# If we are doing offscreen rendering we set the window size to
# (1,1) so the window does not appear at all
if self.off_screen_rendering:
renwin.size = (1,1)
renwin.set(point_smoothing=self.point_smoothing,
line_smoothing=self.line_smoothing,
polygon_smoothing=self.polygon_smoothing)
# Create a renderer and add it to the renderwindow
self._renderer = tvtk.Renderer()
renwin.add_renderer(self._renderer)
self._interactor = tvtk.RenderWindowInteractor(render_window=renwin)
# Save a reference to our camera so it is not GC'd -- needed for
# the sync_traits to work.
self._camera = self.camera
# Sync various traits.
self._renderer.background = self.background
self.sync_trait('background', self._renderer)
self._renderer.on_trait_change(self.render, 'background')
self._camera.parallel_projection = self.parallel_projection
self.sync_trait('parallel_projection', self._camera)
renwin.off_screen_rendering = self.off_screen_rendering
self.sync_trait('off_screen_rendering', self._renwin)
self.render_window.on_trait_change(self.render, 'off_screen_rendering')
self.render_window.on_trait_change(self.render, 'stereo_render')
self.render_window.on_trait_change(self.render, 'stereo_type')
self.camera.on_trait_change(self.render, 'parallel_projection')
self._interactor.initialize()
self._interactor.render()
self.light_manager = light_manager.LightManager(self)
if self.off_screen_rendering:
# We want the default size to be the normal (300, 300).
# Setting the size now should not resize the window if
# offscreen is working properly in VTK.
renwin.size = (300, 300)
return self._interactor
def _lift(self):
"""Lift the window to the top. Useful when saving screen to an
image."""
return
def _exporter_write(self, ex):
"""Abstracts the exporter's write method."""
# Bumps up the anti-aliasing frames when the image is saved so
# that the saved picture looks nicer.
rw = self.render_window
aa_frames = rw.aa_frames
rw.aa_frames = self.anti_aliasing_frames
rw.render()
ex.update()
ex.write()
# Set the frames back to original setting.
rw.aa_frames = aa_frames
rw.render()
def _update_view(self, x, y, z, vx, vy, vz):
"""Used internally to set the view."""
camera = self.camera
camera.focal_point = 0.0, 0.0, 0.0
camera.position = x, y, z
camera.view_up = vx, vy, vz
self._renderer.reset_camera()
self.render()
def _disable_render_changed(self, val):
if not val and self._renwin is not None:
self.render()
def _record_methods(self, calls):
"""A method to record a simple method called on self. We need a
more powerful and less intrusive way like decorators to do this.
Note that calls can be a string with new lines in which case we
interpret this as multiple calls.
"""
r = self.recorder
if r is not None:
sid = self._script_id
for call in calls.split('\n'):
r.record('%s.%s'%(sid, call))
def _record_camera_position(self, vtk_obj=None, event=None):
"""Callback to record the camera position."""
r = self.recorder
if r is not None:
state = self._get_camera_state()
lcs = self._last_camera_state
if state != lcs:
self._last_camera_state = state
sid = self._script_id
for key, value in state:
r.record('%s.camera.%s = %r'%(sid, key, value))
r.record('%s.camera.compute_view_plane_normal()'%sid)
r.record('%s.render()'%sid)
def _get_camera_state(self):
c = self.camera
state = []
state.append(('position', list(c.position)))
state.append(('focal_point', list(c.focal_point)))
state.append(('view_angle', c.view_angle))
state.append(('view_up', list(c.view_up)))
state.append(('clipping_range', list(c.clipping_range)))
return state
def _recorder_changed(self, r):
"""When the recorder is set we add an event handler so we can
record the change to the camera position after the interaction.
"""
iren = self._interactor
if r is not None:
self._script_id = r.get_script_id(self)
id = iren.add_observer('EndInteractionEvent',
messenger.send)
self._camera_observer_id = id
i_vtk = tvtk.to_vtk(iren)
messenger.connect(i_vtk, 'EndInteractionEvent',
self._record_camera_position)
else:
self._script_id = ''
iren.remove_observer(self._camera_observer_id)
i_vtk = tvtk.to_vtk(iren)
messenger.disconnect(i_vtk, 'EndInteractionEvent',
self._record_camera_position)
######################################################################
# `TVTKScene` class.
######################################################################
class TVTKWindow(HasTraits):
"""A basic TVTK window class that can be used in the MayaVi engine
for visualization without envisage/pyface etc. Technically we
could just have used the `TVTKScene` class but we want to support
the closing and activated events since they are used to notify the
MayaVi engine if the window is closed or activated. In this case
we do nothing but honour the interface.
"""
closing = Event
activated = Event
def __init__(self, **traits):
"""All the keyword arguments are passed on to the `TVTKScene`
instance created."""
self.scene = TVTKScene(**traits)
| {
"repo_name": "liulion/mayavi",
"path": "tvtk/pyface/tvtk_scene.py",
"copies": "2",
"size": "35476",
"license": "bsd-3-clause",
"hash": -1374724449480720000,
"line_mean": 37.0235798499,
"line_max": 112,
"alpha_frac": 0.5597868982,
"autogenerated": false,
"ratio": 4.122719349215572,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008515282687224295,
"num_lines": 933
} |
"""A tvtk pipeline browser.
An abstract `TreeGenerator` class defines the interface of a tree
generator. This class is responsible for generating the list of
children. Often tvtk object's children are collections of various
objects, some sequences and some simple objects. In order to provide
a unified interface to all children, all of these objects are wrapped
using the `CompositeIterable` which presents all children as a single
iterable.
`SimpleTreeGenerator` does not do extensive analysis of the passed
object in order to compute the children. `FullTreeGenerator` however
uses the algorithm that MayaVi-1.x uses and therefore generates a
large number of objects.
The `PipelineBrowser` class presents the view of the pipeline as a
tree. It allows one to specify the TreeGenerator instance. The
`TreeEditor` from the traits package is used to represent the view. A
`TVTKLeafNode` defines a node that has no children. A
`TVTKBranchNode` is a node that has children. The nodes basically
wrap up the tvtk object and present an interface suitable for the
TreeEditor.
TODO:
* When a node is selected, the actor involved could be highlighted.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
import re
# Enthought library imports.
from traits.api import HasTraits, Property, Any, Instance, \
Trait, List, Str, Dict, Python
from traitsui.api import \
TreeEditor, TreeNodeObject, ObjectTreeNode, View, Item, Group
from traitsui.menu import Menu, Action
from tvtk.api import tvtk
from tvtk import messenger
from tvtk.tvtk_base import TVTKBase
from tvtk.tvtk_base_handler import TVTKBaseHandler
from tvtk.common import camel2enthought
######################################################################
# Utility functions.
######################################################################
def is_iterable(x):
return hasattr(x, '__iter__')
def get_icon(object_name):
"""Given the name of the object, this function returns an
appropriate icon image name. If no icon is appropriate it returns
the empty string."""
# The mapping from names to icons.
icon_map = {'actor': 'actor.png',
'camera': 'camera.png',
'coordinate': 'coordinate.png',
'filter': 'filter.png',
'lookuptable': 'lookuptable.png',
'mapper': 'mapper.png',
'polydata': 'polydata.png',
'property': 'property.png',
'reader': 'reader.png',
'renderer': 'renderer.png',
'rendererwindowinteractor': 'rendererwindowinteractor.png',
'source': 'source.png',
'texture': 'texture.png',
'window': 'window.png',
'writer': 'writer.png',
}
# Lower case the name.
name = object_name.lower()
for key in icon_map:
if name.endswith(key):
return icon_map[key]
# No valid icon for this object.
return ''
######################################################################
# `TreeGenerator` class.
######################################################################
class TreeGenerator(HasTraits):
"""Encapsulates the methods that generate the tree via the
`get_children` method."""
def has_children(self, obj):
"""Returns True if object `obj` has children."""
raise NotImplementedError
def get_children(self, obj):
"""Returns a dictionary containing the children of the object
`obj`."""
raise NotImplementedError
def get_node(self, obj):
"""Get a node object representing this object."""
raise NotImplementedError
def get_nodes(self, menu):
"""Returns a list of nodes for the tree editor. The menu
entries to use for the nodes are passed as `menu`."""
raise NotImplementedError
######################################################################
# `SimpleTreeGenerator` class.
######################################################################
class SimpleTreeGenerator(TreeGenerator):
"""This particular class generates a simple pipeline
representation. Not every possible object is obtained."""
def has_children(self, obj):
"""Returns true of the object has children, false if not. This is
very specific to tvtk objects."""
if isinstance(obj, (tvtk.RenderWindow, tvtk.Renderer,
tvtk.Collection)):
return True
for attribute in ['source', 'get_input', 'input', 'mapper', 'property',
'texture', 'text_property', 'volume_property',
'lookup_table', 'producer_port', 'producer']:
if hasattr(obj, attribute):
return True
return False
def get_children(self, obj):
"""Returns the child objects of a particular tvtk object in a
dictionary, the keys are the trait names. This is used to
generate the tree in the browser."""
kids = {}
def _add_kid(key, x):
if x is None:
kids[key] = None
else:
if type(x) in (type([]), type(())):
x1 = [i for i in x if isinstance(i, TVTKBase)]
if x1:
kids[key] = x1
elif isinstance(x, TVTKBase):
kids[key] = x
if isinstance(obj, tvtk.RenderWindow):
return {'renderers':obj.renderers}
elif isinstance(obj, tvtk.Renderer):
if hasattr(obj, 'view_props'):
return {'view_props':obj.view_props,
'active_camera':obj.active_camera}
else:
return {'props':obj.props,
'active_camera':obj.active_camera}
#if isinstance(obj, tvtk.Collection):
# _add_kid(obj)
# Misc. properties.
for attribute in ['mapper', 'property', 'texture',
'text_property', 'volume_property',
'lookup_table', 'producer']:
if hasattr(obj, attribute):
_add_kid(attribute, getattr(obj, attribute))
# Check for sources and inputs.
if hasattr(obj, 'number_of_sources'):
srcs = [obj.get_source(i) \
for i in range(obj.number_of_sources)]
_add_kid('source', srcs)
elif hasattr(obj, 'source'):
_add_kid('source', obj.source)
if hasattr(obj, 'get_input'):
inputs = []
if hasattr(obj, 'number_of_input_ports'):
if obj.number_of_input_ports:
inputs = [obj.get_input(i) \
for i in range(obj.number_of_input_ports)]
else:
inputs = [obj.get_input(i) \
for i in range(obj.number_of_inputs)]
_add_kid('input', inputs)
elif hasattr(obj, 'input'):
_add_kid('input', obj.input)
if hasattr(obj, 'producer_port'):
_add_kid('producer_port', obj.producer_port)
return kids
def get_node(self, obj):
"""Get a node object representing the object passed."""
if self.has_children(obj):
return TVTKBranchNode(object=obj, tree_generator=self)
else:
return TVTKLeafNode(object=obj)
def get_nodes(self, menu):
"""Returns a list of nodes for the tree editor. The menu
entries to use are given as `menu`"""
nodes = [ObjectTreeNode(node_for=[TVTKBranchNode],
view=View(Group(Item('object', style='custom'),
show_labels=False)),
auto_open=False,
children='children', label='name', menu=menu,
rename=False, delete=False, copy=False,
insert=False),
ObjectTreeNode(node_for=[TVTKLeafNode],
view=View(Group(Item('object', style='custom'),
show_labels=False)),
auto_open=False,
label='name', menu=menu, rename=False,
delete=False, copy=False, insert=False),
ObjectTreeNode(node_for=[TVTKCollectionNode],
auto_open=True, children='children',
label='name', menu=menu, rename=False,
delete=False, copy=False, insert=False),
]
return nodes
######################################################################
# `FullTreeGenerator` class.
######################################################################
class FullTreeGenerator(SimpleTreeGenerator):
"""This particular class picks up a lot more children in the
pipeline and is similar to the code used in MayaVi-1.x's pipeline
browser."""
def __init__(self, **traits):
super(FullTreeGenerator, self).__init__(**traits)
self.last_transform = 0
def get_children(self, obj):
"""Returns the child objects of a particular tvtk object in a
dictionary, the keys are the trait names. This is used to
generate the tree in the browser."""
vtk_obj = tvtk.to_vtk(obj)
methods = self._get_methods(vtk_obj)
kids = {}
def _add_kid(key, x):
if x is None:
kids[key] = None
else:
if type(x) in (type([]), type(())):
x1 = [i for i in x if isinstance(i, TVTKBase)]
if x1:
kids[key] = x1
elif isinstance(x, TVTKBase):
if hasattr(x, '__iter__'):
# Don't add iterable objects that contain non
# acceptable nodes
if len(list(x)) and isinstance(list(x)[0], TVTKBase):
kids[key] = x
else:
kids[key] = x
for method in methods:
attr = camel2enthought(method[0])
if hasattr(obj, attr):
_add_kid(attr, getattr(obj, attr))
# Check for sources and inputs.
if hasattr(obj, 'number_of_sources'):
srcs = [obj.get_source(i) \
for i in range(obj.number_of_sources)]
_add_kid('source', srcs)
elif hasattr(obj, 'source'):
_add_kid('source', obj.source)
if hasattr(obj, 'get_input'):
inputs = []
if hasattr(obj, 'number_of_input_ports'):
if obj.number_of_input_ports:
# Sometimes not all the inputs can be retrieved using
# 'get_input', as they may be sources (for instance
# the ProbeFilter).
inputs = list()
for i in range(obj.number_of_input_ports):
try:
inputs.append(obj.get_input(i))
except TypeError:
pass
if not inputs:
inputs = [obj.get_input()]
else:
inputs = [obj.get_input(i) \
for i in range(obj.number_of_inputs)]
_add_kid('input', inputs)
elif hasattr(obj, 'input'):
_add_kid('input', obj.input)
if hasattr(obj, 'producer_port'):
_add_kid('producer_port', obj.producer_port)
return kids
def has_children(self, obj):
"""Returns true of the object has children, false if not. This is
very specific to tvtk objects."""
if isinstance(obj, (tvtk.RenderWindow, tvtk.Renderer,
tvtk.Collection)):
return True
for attribute in ['source', 'get_input', 'input', 'mapper', 'property',
'texture', 'text_property', 'volume_property',
'lookup_table', 'producer_port', 'producer']:
if hasattr(obj, attribute):
return True
# FIXME: This is inefficient. We probably should cache the
# get_children call.
if self.get_children(obj):
return True
return False
###########################################################################
# Non-public interface.
###########################################################################
def _get_methods(self, vtk_obj):
"""Obtain the various methods from the passed object."""
def _remove_method(name, methods, method_names):
"""Removes methods if they have a particular name."""
try:
idx = method_names.index(name)
except ValueError:
pass
else:
del methods[idx], method_names[idx]
return methods, method_names
# The following code basically gets the 'str' representation
# of the VTK object and parses it to obtain information about
# the object's children. It is a hack but has worked well for
# a *very* long time with MayaVi-1.x and before.
# Oops, this isn't a VTK object.
if not hasattr(vtk_obj, 'GetClassName'):
return []
methods = str(vtk_obj)
methods = methods.split("\n")
del methods[0]
# using only the first set of indented values.
patn = re.compile(" \S")
for method in methods[:]:
if patn.match(method):
if method.find(":") == -1:
methods.remove(method)
elif method[1].find("none") > -1:
methods.remove(method)
else:
methods.remove(method)
# Props/Prop is deprecated in more recent VTK releases.
for method in methods[:]:
if method.strip()[:6] == "Props:":
if hasattr(vtk_obj, "GetViewProps"):
methods.remove(method)
methods.append("ViewProps: ")
elif method.strip()[:5] == "Prop:":
if hasattr(vtk_obj, "GetViewProp"):
methods.remove(method)
methods.append("ViewProp: ")
method_names = []
for i in range(0, len(methods)):
strng = methods[i].replace(" ", "")
methods[i] = strng.split(":")
method_names.append(methods[i][0])
if re.match("vtk\w*Renderer", vtk_obj.GetClassName()):
methods.append(["ActiveCamera", ""])
if re.match("vtk\w*Assembly", vtk_obj.GetClassName()):
methods.append(["Parts", ""])
methods.append(["Volumes", ""])
methods.append(["Actors", ""])
if vtk_obj.IsA('vtkAbstractTransform'):
if self.last_transform > 0:
_remove_method('Inverse', methods, method_names)
else:
self.last_transform += 1
else:
self.last_transform = 0
# Some of these object are removed because they arent useful in
# the browser. I check for Source and Input anyway so I dont need
# them.
for name in('Output', 'FieldData', 'CellData', 'PointData',
'Source', 'Input', 'ExtentTranslator',
'Interactor', 'Lights', 'Information', 'Executive'):
_remove_method(name, methods, method_names)
return methods
######################################################################
# `CompositeIterable` class.
######################################################################
class CompositeIterable(HasTraits):
"""This class allows one to iterate over a bunch of disparate
objects treating them as one single iterable. Each of the
iterated objects is wrapped with a suitable Node class so that the
object may be used in a Traits Tree Editor.
"""
tree_generator = Instance(TreeGenerator)
def __init__(self, args, **traits):
super(CompositeIterable, self).__init__(**traits)
self.args = args
def __iter__(self):
tg = self.tree_generator
for arg in self.args:
if is_iterable(arg):
for x in arg:
yield tg.get_node(x)
else:
yield tg.get_node(arg)
def __len__(self):
x = 0
for arg in self.args:
if is_iterable(arg):
x += len(arg)
else:
x += 1
return x
######################################################################
# `TVTKLeafNode` class.
######################################################################
class TVTKLeafNode(TreeNodeObject):
"""Represents a leaf in the tree view."""
# The tvtk object being wrapped.
object = Instance(TVTKBase)
# Name to show on the view.
name = Property
# Work around problem with HasPrivateTraits.
__ = Python
def __hash__(self):
return hash(tvtk.to_vtk(self.object))
def _get_name(self):
return self.object.__class__.__name__
######################################################################
# `TreeNodeObject` interface
######################################################################
def tno_get_icon(self, node, is_expanded):
""" Returns the icon for a specified object.
"""
icon = get_icon(self.name)
if icon:
return icon
else:
return super(TVTKLeafNode, self).tno_get_icon(node, is_expanded)
######################################################################
# `TVTKBranchNode` class.
######################################################################
class TVTKBranchNode(TreeNodeObject):
"""Represents a branch in the tree view. The `children` trait
produces an iterable that represents the children of the branch.
"""
# The tvtk object being wrapped.
object = Instance(TVTKBase)
# Children of the object.
children = Property
# Name to show on the view.
name = Property
# Tree generator to use.
tree_generator = Instance(TreeGenerator)
# Cache of children.
children_cache = Dict
# Work around problem with HasPrivateTraits.
__ = Python
def __init__(self, **traits):
super(TVTKBranchNode, self).__init__(**traits)
def __del__(self):
try:
self._remove_listners()
except:
pass
def __hash__(self):
return hash(tvtk.to_vtk(self.object))
def _get_children_from_cache(self):
return [x for x in self.children_cache.values() if x is not None]
def _create_children(self):
kids = self.tree_generator.get_children(self.object)
self.children_cache = kids
self._setup_listners()
def _setup_listners(self):
object = self.object
kids = self.children_cache
for key, val in kids.items():
if isinstance(val, tvtk.Collection):
vtk_obj = tvtk.to_vtk(val)
messenger.connect(vtk_obj, 'ModifiedEvent',
self._notify_children)
else:
object.on_trait_change(self._notify_children, key)
def _remove_listners(self):
object = self.object
kids = self.children_cache
for key, val in kids.items():
if isinstance(val, tvtk.Collection):
vtk_obj = tvtk.to_vtk(val)
messenger.disconnect(vtk_obj, 'ModifiedEvent',
self._notify_children)
else:
object.on_trait_change(self._notify_children, key, remove=True)
def _notify_children(self, obj=None, name=None, old=None, new=None):
old_val = self._get_children_from_cache()
self._remove_listners()
self._create_children()
new_val = self._get_children_from_cache()
self.trait_property_changed('children', old_val, new_val)
def _get_children(self):
if not self.children_cache:
self._create_children()
kids = self._get_children_from_cache()
tg = self.tree_generator
return CompositeIterable(kids, tree_generator=tg)
def _get_name(self):
return self.object.__class__.__name__
######################################################################
# `TreeNodeObject` interface
######################################################################
def tno_get_icon(self, node, is_expanded):
""" Returns the icon for a specified object.
"""
icon = get_icon(self.name)
if icon:
return icon
else:
return super(TVTKBranchNode, self).tno_get_icon(node, is_expanded)
######################################################################
# `TVTKCollectionNode` class.
######################################################################
class TVTKCollectionNode(TreeNodeObject):
"""Represents a collection of typically unconnected roots in the
tree view.
"""
# List of child nodes.
object = List(TVTKBase)
# Children of the object.
children = Property
# Name to show on the view.
name = Str
# Tree generator to use.
tree_generator = Instance(TreeGenerator)
# Work around problem with HasPrivateTraits.
__ = Python
def __init__(self, **traits):
super(TVTKCollectionNode, self).__init__(**traits)
def _get_children(self):
tg = self.tree_generator
return CompositeIterable(self.object, tree_generator=tg)
######################################################################
# `CloseHandler` class.
######################################################################
class UICloseHandler(TVTKBaseHandler):
"""This class cleans up after the UI for the object is closed."""
# The browser associated with this UI.
browser = Any
def close(self, info, is_ok):
"""This method is invoked when the user closes the UI."""
obj = info.object
obj.on_trait_change(self.browser.render, remove=True)
return True
######################################################################
# `PipelineBrowser` class.
######################################################################
class PipelineBrowser(HasTraits):
# The tree generator to use.
tree_generator = Trait(FullTreeGenerator(),
Instance(TreeGenerator))
# The TVTK render window(s) associated with this browser.
renwins = List
# The root object to view in the pipeline. If None (default), the
# root object is the render_window of the Scene instance passed at
# object instantiation time.
root_object = List(TVTKBase)
# Private traits.
# The root of the tree to display.
_root = Any
###########################################################################
# `object` interface.
###########################################################################
def __init__(self, renwin=None, **traits):
"""Initializes the object.
Parameters
----------
- renwin: `Scene` instance. Defaults to None.
This may be passed in addition to the renwins attribute
which can be a list of scenes.
"""
super(PipelineBrowser, self).__init__(**traits)
self.ui = None
self.view = None
if renwin:
self.renwins.append(renwin)
self._root_object_changed(self.root_object)
menu = Menu(Action(name='Refresh', action='editor.update_editor'),
Action(name='Expand all', action='editor.expand_all'))
self.menu = menu
nodes = self.tree_generator.get_nodes(menu)
self.tree_editor = TreeEditor(nodes=nodes,
editable=False,
orientation='vertical',
hide_root=True,
on_dclick=self._on_dclick)
self.view = View(Group(Item(name='_root',
editor=self.tree_editor,
resizable=True),
show_labels=False,
show_border=False,
orientation='vertical'),
title='Pipeline browser',
help=False,
resizable=True, undo=False, revert=False,
width=.3, height=.3)
###########################################################################
# `PipelineBrowser` interface.
###########################################################################
def show(self, parent=None):
"""Show the tree view if not already show. If optional
`parent` widget is passed, the tree is displayed inside the
passed parent widget."""
# If UI already exists, raise it and return.
if self.ui and self.ui.control:
try:
self.ui.control.Raise()
except AttributeError:
pass
else:
return
else:
# No active ui, create one.
if parent:
self.ui = self.view.ui(self, parent=parent, kind='subpanel')
else:
self.ui = self.view.ui(self, parent=parent)
def update(self):
"""Update the tree view."""
# This is a hack.
if self.ui and self.ui.control:
try:
ed = self.ui._editors[0]
ed.update_editor()
self.ui.control.Refresh()
except (AttributeError, IndexError):
pass
# Another name for update.
refresh = update
def render(self):
"""Calls render on all render windows associated with this
browser."""
for rw in self.renwins:
rw.render()
###########################################################################
# Non-public interface.
###########################################################################
def _make_default_root(self):
tree_gen = self.tree_generator
objs = [x.render_window for x in self.renwins]
node = TVTKCollectionNode(object=objs, name="Root",
tree_generator=tree_gen)
return node
def _tree_generator_changed(self, tree_gen):
"""Traits event handler."""
if self._root:
root_obj = self._root.object
else:
root_obj = self.root_object
if root_obj:
ro = root_obj
if not hasattr(root_obj, '__len__'):
ro = [root_obj]
self._root = TVTKCollectionNode(object=ro,
name="Root",
tree_generator=tree_gen)
else:
self._root = self._make_default_root()
self.tree_editor.nodes = tree_gen.get_nodes(self.menu)
self.update()
def _root_object_changed(self, root_obj):
"""Trait handler called when the root object is assigned to."""
tg = self.tree_generator
if root_obj:
self._root = TVTKCollectionNode(object=root_obj, name="Root",
tree_generator=tg)
else:
self._root = self._make_default_root()
self.root_object = self._root.object
self.update()
def _root_object_items_changed(self, list_event):
"""Trait handler called when the items of the list change."""
self._root_object_changed(self.root_object)
def _on_dclick(self, obj):
"""Callback that is called when nodes are double-clicked."""
if hasattr(obj, 'object') and hasattr(obj.object, 'edit_traits'):
object = obj.object
view = object.trait_view()
view.handler = UICloseHandler(browser=self)
object.on_trait_change(self.render)
ui = object.edit_traits(view=view)
######################################################################
# Test cases.
######################################################################
def main(instantiate_gui=True):
"""Simple test case."""
from tvtk.tools import ivtk
v = ivtk.viewer(browser=False, instantiate_gui=instantiate_gui)
cs = tvtk.ConeSource()
m = tvtk.PolyDataMapper(input=cs.output)
a = tvtk.Actor(mapper=m)
v.scene.add_actor(a)
v.scene.reset_zoom()
b = PipelineBrowser(v.scene)
b.show()
return v, b, a
if __name__ == '__main__':
from pyface.api import GUI
gui = GUI()
main(instantiate_gui=False)
gui.start_event_loop()
| {
"repo_name": "alexandreleroux/mayavi",
"path": "tvtk/pipeline/browser.py",
"copies": "3",
"size": "29233",
"license": "bsd-3-clause",
"hash": 9132788576032677000,
"line_mean": 35.54125,
"line_max": 79,
"alpha_frac": 0.502445866,
"autogenerated": false,
"ratio": 4.592772977219167,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6595218843219167,
"avg_score": null,
"num_lines": null
} |
""" A TVTK render window scene UI plugin. """
# Enthought library imports.
from envisage.api import Plugin
from traits.api import List
class SceneUIPlugin(Plugin):
""" A TVTK render window scene UI plugin.
This is the plugin that contributes actions, menus, preferences pages
etc.
"""
# Extension point Ids.
ACTION_SETS = 'envisage.ui.workbench.action_sets'
PREFERENCES_PAGES = 'envisage.ui.workbench.preferences_pages'
#### 'IPlugin' interface ##################################################
# The plugin's name (suitable for displaying to the user).
name = 'TVTK Scene UI Plugin'
# Our ID.
id = 'tvtk.scene_ui'
#### Extension points offered by this plugin ##############################
# None.
#### Contributions to extension points made by this plugin ################
action_sets = List(contributes_to=ACTION_SETS)
def _action_sets_default(self):
""" Trait initializer. """
from tvtk.plugins.scene.ui.scene_ui_action_set import (
SceneUIActionSet
)
return [SceneUIActionSet]
preferences_pages = List(contributes_to=PREFERENCES_PAGES)
def _preferences_pages_default(self):
""" Trait initializer. """
from tvtk.plugins.scene.ui.scene_preferences_page import (
ScenePreferencesPage
)
return [ScenePreferencesPage]
#### EOF ######################################################################
| {
"repo_name": "liulion/mayavi",
"path": "tvtk/plugins/scene/ui/scene_ui_plugin.py",
"copies": "5",
"size": "1491",
"license": "bsd-3-clause",
"hash": -6605028382514468000,
"line_mean": 25.1578947368,
"line_max": 79,
"alpha_frac": 0.5761234071,
"autogenerated": false,
"ratio": 4.176470588235294,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7252593995335295,
"avg_score": null,
"num_lines": null
} |
""" A TVTK scene editor. """
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from apptools.preferences.api import get_default_preferences
from tvtk.pyface.tvtk_scene import TVTKScene
from tvtk.pyface.api import DecoratedScene
from pyface.workbench.api import Editor
from traits.api import Instance
#### Handy functions ##########################################################
def _id_generator():
""" Return an ever-increasing number useful for creating unique Ids. """
n = 1
while True:
yield(n)
n += 1
_id_generator = _id_generator()
class SceneEditor(Editor):
""" A TVTK scene editor. """
#### 'SceneEditor' interface ##############################################
# The TVTK scene object.
scene = Instance(TVTKScene)
###########################################################################
# 'IWorkbenchPart' interface.
###########################################################################
#### Trait initializers ###################################################
def _id_default(self):
""" Trait initializer. """
return self.name
def _name_default(self):
""" Trait initializer. """
return 'TVTK Scene %d' % (next(_id_generator))
#### Methods ##############################################################
def create_control(self, parent):
""" Create the toolkit-specific control that represents the editor. """
# We hold a reference to the scene itself to make sure it does not get
# garbage collected (because we only return the scene's 'control' not
# the scene itself). The scene is also referenced by the scene manager.
self.scene = self._create_decorated_scene(parent)
self.scene.render()
return self.scene.control
def destroy_control(self):
""" Destroy the toolkit-specific control that represents the
editor.
"""
if self.scene is not None:
# Close the scene to cleanly shut it down.
self.scene.close()
# Call the parent method.
return super(SceneEditor, self).destroy_control()
###########################################################################
# Private interface.
###########################################################################
def _create_decorated_scene(self, parent):
""" Create a new decorated scene. """
pref = get_default_preferences()
stereo = eval(pref.get('tvtk.scene.stereo'))
scene = DecoratedScene(parent, stereo=stereo)
# Set the scene's traits to preference values.
scene.magnification = \
eval(pref.get('tvtk.scene.magnification'))
fg = eval(pref.get('tvtk.scene.foreground_color'))
bg = eval(pref.get('tvtk.scene.background_color'))
scene.foreground = fg
scene.background = bg
# FIXME: This seems necessary for some strange reason, if not
# the actual background of the renderer never gets set even
# though the renderer and the scene's background are synced.
scene.renderer.background = scene.background
return scene
#### EOF ######################################################################
| {
"repo_name": "dmsurti/mayavi",
"path": "tvtk/plugins/scene/scene_editor.py",
"copies": "2",
"size": "3381",
"license": "bsd-3-clause",
"hash": 1895553751413019100,
"line_mean": 31.5096153846,
"line_max": 79,
"alpha_frac": 0.5226264419,
"autogenerated": false,
"ratio": 4.722067039106145,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6244693481006145,
"avg_score": null,
"num_lines": null
} |
"""A two dimensional table having row and column headers."""
from __future__ import print_function
from __future__ import absolute_import
import copy
from .display import H2ODisplay
from .utils.shared_utils import _is_list_of_lists, can_use_pandas
class H2OTwoDimTable(object):
"""A class representing an 2D table (for pretty printing output)."""
def __init__(self, row_header=None, col_header=None, col_types=None,
table_header=None, raw_cell_values=None,
col_formats=None, cell_values=None, table_description=None):
self.row_header = row_header
self.col_header = col_header
self.col_types = col_types
self.table_header = table_header
self.cell_values = cell_values if cell_values else self._parse_values(raw_cell_values, col_types)
self.col_formats = col_formats
self.table_description = table_description
def as_data_frame(self):
if can_use_pandas():
import pandas
pandas.options.display.max_colwidth = 70
return pandas.DataFrame(self.cell_values,columns=self.col_header)
return self
def show(self, header=True):
#if h2o.can_use_pandas():
# import pandas
# pandas.options.display.max_rows = 20
# print pandas.DataFrame(self.cell_values,columns=self.col_header)
# return
print()
if header:
print(self.table_header + ":", end=' ')
if self.table_description: print(self.table_description)
print()
table = copy.deepcopy(self.cell_values)
nr=0
if _is_list_of_lists(table): nr = len(table) # only set if we truly have multiple rows... not just one long row :)
if nr > 20: # create a truncated view of the table, first/last 5 rows
trunc_table =[]
trunc_table += [ v for v in table[:5]]
trunc_table.append(["---"]*len(table[0]))
trunc_table += [v for v in table[(nr-5):]]
table = trunc_table
H2ODisplay(table, self.col_header, numalign="left", stralign="left")
if nr > 20 and can_use_pandas(): print('\nSee the whole table with table.as_data_frame()')
def __repr__(self):
self.show()
return ""
def _parse_values(self, values, types):
if self.col_header[0] is None:
self.col_header = self.col_header[1:]
types = types[1:]
values = values[1:]
for col_index, column in enumerate(values):
for row_index, row_value in enumerate(column):
if types[col_index] == 'integer':
values[col_index][row_index] = "" if row_value is None else int(float(row_value))
elif types[col_index] in ['double', 'float', 'long']:
values[col_index][row_index] = "" if row_value is None else float(row_value)
else: # string?
continue
return list(zip(*values)) # transpose the values! <3 splat ops
def __getitem__(self, item):
if item in self.col_header: # single col selection returns list
return list(zip(*self.cell_values))[self.col_header.index(item)]
elif isinstance(item, slice): # row selection if item is slice returns H2OTwoDimTable
self.cell_values = [self.cell_values[ii] for ii in xrange(*item.indices(len(self.cell_values)))]
return self
elif isinstance(item, list) and set(item).issubset(self.col_header): # multiple col selection returns list of cols
return [list(zip(*self.cell_values))[self.col_header.index(i)] for i in item]
else:
raise TypeError('can not support getting item for ' + str(item))
def __setitem__(self, key, value):
cols = list(zip(*self.cell_values))
if len(cols[0]) != len(value): raise ValueError('value must be same length as columns')
if key not in self.col_header:
self.col_header.append(key)
cols.append(tuple(value))
else:
cols[self.col_header.index(key)] = value
self.cell_values = [list(x) for x in zip(*cols)] | {
"repo_name": "YzPaul3/h2o-3",
"path": "h2o-py/h2o/two_dim_table.py",
"copies": "1",
"size": "3819",
"license": "apache-2.0",
"hash": -717793542187626100,
"line_mean": 39.6382978723,
"line_max": 119,
"alpha_frac": 0.6554071747,
"autogenerated": false,
"ratio": 3.446750902527076,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9513176305522211,
"avg_score": 0.017796354340973077,
"num_lines": 94
} |
"""A two-dimension grid of numeric values, used for input and output to a stencil kernel.
"""
import numpy
import math
class StencilGrid(object):
def __init__(self, size):
self.dim = len(size)
self.data = numpy.zeros(size)
self.shape = size
self.ghost_depth = 1
self.set_grid_variables()
self.set_interior()
# add default neighbor definition
self.set_default_neighbor_definition()
# want this to be indexable
def __getitem__(self, x):
return self.data[x]
def __setitem__(self, x, y):
self.data[x] = y
def set_grid_variables(self):
self.grid_variables = ["DIM"+str(x) for x in range(0,self.dim)]
def set_interior(self):
"""
Sets the number of interior points in each dimension
"""
self.interior = [x-2*self.ghost_depth for x in self.shape]
def set_default_neighbor_definition(self):
"""
Sets the default for neighbors[0] and neighbors[1]. Note that neighbors[1]
does not include the center point.
"""
self.neighbor_definition = []
self.neighbor_definition.append([tuple([0 for x in range(self.dim)])])
self.neighbor_definition.append([])
for x in range(self.dim):
for y in [0, 1, -1]:
tmp = list(self.neighbor_definition[0][0])
tmp[x] += y
tmp = tuple(tmp)
if tmp != self.neighbor_definition[0][0]:
self.neighbor_definition[1].append(tmp)
def interior_points(self):
"""
Iterator over the interior points of the grid. Only executed
in pure Python mode; in SEJITS mode, it should be executed only
in the translated language/library.
"""
import itertools
all_dims = [range(self.ghost_depth,self.shape[x]-self.ghost_depth) for x in range(0,self.dim)]
for item in itertools.product(*all_dims):
yield tuple(item)
def border_points(self):
"""
Iterator over the border points of a grid. Only executed in pure Python
mode; in SEJITS mode, it should be executed only in the translated
language/library.
"""
# TODO
return []
def neighbors(self, center, neighbors_id):
"""
Returns the list of neighbors with the given neighbors_id. By
default, IDs 0 and 1 give the list consisting of all
points at a distance of 0 and 1 from the center point,
respectively. Uses neighbor_definition to determine what the
neighbors are.
"""
# return tuples for each neighbor
for neighbor in self.neighbor_definition[neighbors_id]:
yield tuple(map(lambda a,b: a+b, list(center), list(neighbor)))
def __repr__(self):
return self.data.__repr__()
def distance(x,y):
return math.sqrt(sum([(x[i]-y[i])**2 for i in range(0,len(x))]))
| {
"repo_name": "mbdriscoll/asp-old",
"path": "specializers/stencil/stencil_grid.py",
"copies": "2",
"size": "2970",
"license": "bsd-3-clause",
"hash": 6041666372925128000,
"line_mean": 30.935483871,
"line_max": 102,
"alpha_frac": 0.5912457912,
"autogenerated": false,
"ratio": 3.9705882352941178,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5561834026494118,
"avg_score": null,
"num_lines": null
} |
"""A two-dimension grid of numeric values, used for input and output to a stencil kernel.
"""
import numpy
import math
class StencilGrid(object):
def __init__(self, size):
self.dim = len(size)
self.data = numpy.zeros(size)
self.shape = size
self.ghost_depth = 1
self.set_grid_variables()
self.set_interior()
# add default neighbor definition
self.neighbor_definition = []
self.set_default_neighbor_definition()
# want this to be indexable
def __getitem__(self, x):
return self.data[x]
def __setitem__(self, x, y):
self.data[x] = y
def set_grid_variables(self):
self.grid_variables = ["DIM"+str(x) for x in range(0,self.dim)]
def set_interior(self):
"""
Sets the number of interior points in each dimension
"""
self.interior = [x-2*self.ghost_depth for x in self.shape]
def set_default_neighbor_definition(self):
"""
Sets the default for neighbors[0] and neighbors[1]. Note that neighbors[1]
does not include the center point.
"""
self.neighbor_definition = []
self.neighbor_definition.append([tuple([0 for x in range(self.dim)])])
self.neighbor_definition.append([])
for x in range(self.dim):
for y in [0, 1, -1]:
tmp = list(self.neighbor_definition[0][0])
tmp[x] += y
tmp = tuple(tmp)
if tmp != self.neighbor_definition[0][0]:
self.neighbor_definition[1].append(tmp)
def interior_points(self):
"""
Iterator over the interior points of the grid. Only executed
in pure Python mode; in SEJITS mode, it should be executed only
in the translated language/library.
"""
import itertools
all_dims = [range(self.ghost_depth,self.shape[x]-self.ghost_depth) for x in range(0,self.dim)]
for item in itertools.product(*all_dims):
yield tuple(item)
def border_points(self):
"""
Iterator over the border points of a grid. Only executed in pure Python
mode; in SEJITS mode, it should be executed only in the translated
language/library.
"""
# TODO
return []
def neighbors(self, center, neighbors_id):
"""
Returns the list of neighbors with the given neighbors_id. By
default, IDs 0 and 1 give the list consisting of all
points at a distance of 0 and 1 from the center point,
respectively. Uses neighbor_definition to determine what the
neighbors are.
"""
# import pprint
# print( "neighbors_id %s" % neighbors_id )
# pprint.pprint(self.neighbor_definition)
# return tuples for each neighbor
for neighbor in self.neighbor_definition[neighbors_id]:
yield tuple(map(lambda a,b: a+b, list(center), list(neighbor)))
def __repr__(self):
return self.data.__repr__()
| {
"repo_name": "mbdriscoll/ctree",
"path": "examples/stencil_grid/stencil_grid.py",
"copies": "2",
"size": "3046",
"license": "bsd-2-clause",
"hash": -1937327913148585200,
"line_mean": 31.0631578947,
"line_max": 102,
"alpha_frac": 0.5912672357,
"autogenerated": false,
"ratio": 4.06675567423231,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.565802290993231,
"avg_score": null,
"num_lines": null
} |
"""A two-way, reliable communication channel.
Byte arrays can be read and written; asyncio based implementation
"""
import struct
import asyncio
from abc import ABCMeta, abstractmethod
import saltchannel.util as util
from .exceptions import ComException, BadPeer
class ByteChannel(metaclass=ABCMeta):
def __init__(self, loop=None):
# if channel will be used in sync context, need to create local asyncio loop
# since internally it may still use asyncio
self.loop = util.force_event_loop(loop=loop)
@abstractmethod
async def read(self):
pass
@abstractmethod
async def write(self, msg, *args, is_last=False):
pass
# @abstractmethod
def read_sync(self):
pass
# @abstractmethod
def write_sync(self, msg, *args, is_last=False):
pass
class AsyncioChannel(ByteChannel, metaclass=util.Syncizer):
def __init__(self, reader, writer, loop=None):
"""
reader - instance of dev/client_server_a/SaltChannelStreamReader()
writer - instance of dev/client_server_a/SaltChannelStreamWriter()
"""
super().__init__(loop=loop)
self.reader = reader
self.writer = writer
async def read(self):
return await self.reader.read_msg()
async def write(self, msg, *args, is_last=False):
for m in (msg,) + args:
self.writer.write_msg(m)
await self.writer.drain()
def close(self):
self.writer.close()
class SocketChannel(ByteChannel):
def __init__(self, sock):
self.sock = sock
async def read(self):
return self.read_sync() # blocking version inside!
async def write(self, msg, *args, is_last=False):
self.write_sync(msg, *args, is_last=is_last) # blocking version inside!
def read_sync(self):
try:
len_buf, success = self.recvall(4)
if not success:
raise ComException("Unable to recv size prefix. NOT all requested data were obtained")
msg_len = struct.unpack('<i', len_buf)
msg, success = self.recvall(msg_len[0])
if not success:
raise ComException("Unable to recv msg. NOT all requested data were obtained")
return msg
except Exception as e:
raise ComException(e)
def write_sync(self, message, *args, is_last=False):
raw = bytearray()
try:
for msg in (message,) + args:
raw.extend(b''.join([struct.pack('<i', len(msg)), bytes(msg)]))
self.sock.sendall(bytes(raw))
# [TODO] do we need to close socket here if is_last == True ?
except Exception as e:
raise ComException(e)
def recvall(self, count):
buf = bytearray()
while count:
newbuf = self.sock.recv(count)
if not newbuf: return (bytes(buf), False)
buf.extend(newbuf)
count -= len(newbuf)
return (bytes(buf), True) | {
"repo_name": "assaabloy-ppi/salt-channel-python",
"path": "saltchannel/channel.py",
"copies": "1",
"size": "2995",
"license": "mit",
"hash": -7243546980260357000,
"line_mean": 29.5714285714,
"line_max": 101,
"alpha_frac": 0.6060100167,
"autogenerated": false,
"ratio": 3.998664886515354,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003007060874477962,
"num_lines": 98
} |
"""A `Type` and `Op` classes to work with numpy.ndarrays symbolically."""
from __future__ import absolute_import, print_function, division
from six.moves import builtins
import sys
import warnings
import numpy
from six import integer_types
from six.moves import xrange
import numbers
import theano
from theano.compat import izip
from theano.configparser import config
from theano import gof
from theano.gof import Apply, Constant, Op, Variable
from theano.tensor import elemwise
from theano.tensor.var import (AsTensorError, TensorVariable,
TensorConstant,
_tensor_py_operators)
from theano.tensor.type import TensorType, values_eq_approx_always_true
from theano.tensor.type_other import NoneConst
from theano import scalar as scal
from functools import partial
from theano import compile, printing
from theano.printing import pprint, min_informative_str
# For history
from theano.compile import Rebroadcast, Shape, shape
# We use these exceptions as well.
import theano.scalar.sharedvar
from theano.gradient import grad_undefined
from theano.gradient import grad_not_implemented
from theano.gradient import DisconnectedType
# set up the external interface
from theano.tensor.elemwise import Elemwise, DimShuffle, CAReduce, Sum
import logging
_logger = logging.getLogger("theano.tensor.basic")
__docformat__ = "restructuredtext en"
# This is needed as we will hide it later
python_complex = complex
python_any = any
python_all = all
# Define common subsets of dtypes (as strings).
complex_dtypes = list(map(str, scal.complex_types))
continuous_dtypes = list(map(str, scal.continuous_types))
float_dtypes = list(map(str, scal.float_types))
discrete_dtypes = list(map(str, scal.discrete_types))
all_dtypes = list(map(str, scal.all_types))
int_dtypes = list(map(str, scal.int_types))
uint_dtypes = list(map(str, scal.uint_types))
class ShapeError(Exception):
"""Raised when the shape cannot be computed."""
pass
def check_equal_numpy(x, y):
"""
Return True iff x and y are equal.
Checks the dtype and shape if x and y are numpy.ndarray instances.
"""
if isinstance(x, numpy.ndarray) and isinstance(y, numpy.ndarray):
return (x.dtype == y.dtype and x.shape == y.shape and
numpy.any(abs(x - y) < 1e-10))
elif (isinstance(x, numpy.random.RandomState) and
isinstance(y, numpy.random.RandomState)):
return python_all(numpy.all(a == b) for a, b in
izip(x.__getstate__(), y.__getstate__()))
else:
return x == y
compile.register_checker(check_equal_numpy)
__oplist_constructor_list = []
"""List of functions to be listed as op constructors in the oplist
(`gen_oplist`, doc/oplist.txt)."""
def constructor(f):
"""Add `f` to :doc:`oplist`.
Make `f` appear as a constructor in the oplist (`gen_oplist`,
doc/oplist.txt).
"""
__oplist_constructor_list.append(f)
return f
def __oplist_tag(thing, tag):
tags = getattr(thing, '__oplist_tags', [])
tags.append(tag)
thing.__oplist_tags = tags
if 0:
# this starts to feel like we're enumerating all the types
# the one place where this is used we should also allow for sparse
# variables
# - JB 20100226
def as_cuda_or_tensor_variable(x, name=None, ndim=None):
"""
Do the same as_tensor_variable,
but do not transfer the value on the gpu.
"""
if hasattr(x, '_as_CudaNdarrayVariable'):
# TODO: pass name and ndim arguments
return x._as_CudaNdarrayVariable()
return as_tensor_variable(x, name, ndim)
def as_tensor_variable(x, name=None, ndim=None):
"""Return `x`, transformed into a `TensorType`.
This function is often used by `make_node` methods of `Op` subclasses
to turn ndarrays, numbers, `Scalar` instances, `Apply` instances and
`TensorType` instances into valid input list elements.
Parameters
----------
x : Apply instance, Variable instance, numpy.ndarray, or number
This thing will be transformed into a `Variable` in a sensible way. An
ndarray argument will not be copied, but a list of numbers will be
copied to make an ndarray.
name : str or None
If a new `Variable` instance is created, it will be named with this
string.
ndim : None or integer
Return a Variable with this many dimensions.
Raises
------
ValueError
If an `Apply` with more than one output is fetched or
if `x` cannot be made into a Variable with `ndim` dimensions.
AsTensorError
If `x` cannot be converted to a TensorType Variable.
"""
if hasattr(x, '_as_TensorVariable'):
return x._as_TensorVariable() # TODO: pass name and ndim arguments
if isinstance(x, gof.Apply):
# use Apply's default output mechanism
if (x.op.default_output is None) and (len(x.outputs) != 1):
raise ValueError(
"It is ambiguous which output of a multi-output Op has"
" to be fetched.", x)
x = x.default_output()
if isinstance(x, Variable):
if isinstance(x.type, scal.Scalar):
x = tensor_from_scalar(x)
if not isinstance(x.type, TensorType):
raise AsTensorError(
"Variable type field must be a TensorType.", x, x.type)
if ndim is None:
return x
else:
if (x.type.ndim > ndim):
# strip off leading broadcastable dimensions
first_non_broadcastable = [idx for idx in xrange(x.ndim)
if not x.broadcastable[idx]][0]
x = x.dimshuffle(list(range(x.ndim))[first_non_broadcastable:])
if x.ndim > ndim:
raise ValueError(
'TensorType could not be cast to have %i dimensions'
% ndim, x.type
)
return x
elif (x.type.ndim < ndim):
return shape_padleft(x, n_ones=(ndim - x.type.ndim))
else:
return x
if isinstance(x, (tuple, list)) and python_any(isinstance(xi, Variable)
for xi in x):
try:
return stack(x)
except (TypeError, ValueError):
pass
if isinstance(x, bool):
raise AsTensorError(
"Cannot cast True or False as a tensor variable. Please use 1 or "
"0. This error might be caused by using the == operator on "
"Variables. v == w does not do what you think it does, "
"use theano.tensor.eq(v, w) instead.")
try:
return constant(x, name=name, ndim=ndim)
except TypeError:
try:
str_x = str(x)
except Exception:
str_x = repr(x)
raise AsTensorError("Cannot convert %s to TensorType" % str_x, type(x))
# this has a different name, because _as_tensor_variable is the
# function which ops use to upcast their arguments... this
# internal-use function is a good place to put debugging stuff, better
# than the global astensor.
_as_tensor_variable = as_tensor_variable
as_tensor = as_tensor_variable
class NumpyAutocaster(object):
"""
This class is used to cast python ints and floats to numpy arrays.
The behavior when called on scalar `x` depends on `config.cast_policy`:
- 'numpy' will simply use the same type as found by `numpy.asarray(x)`.
- 'numpy+floatX' will do the same, except it will use float32 instead
of float64 if `x` is a Python float and `config.floatX` is set to
'float32' (note that if `x` is a numpy scalar whose data type is
float64, it is not modified since we assume the user is purposedly
using float64).
- 'custom' lets one define a tuple of data types such that:
- if `x` is already a numpy scalar and its data type is in this
tuple, then it is returned unchanged;
- otherwise, the first data type in this tuple that can represent
`x` without loss of precision will be used, unless `x` is a float
and 'float32' is in the tuple (in which case `x` is cast as a
float32);
- if no data type can represent `x` without loss of precision, then
the last data type in the tuple will be used.
Parameters
----------
dtypes: tuple of strings
The ordered list of preferred data types (only used when
`config.cast_policy` is set to 'custom', see the `NumpyAutocaster`
help for details).
"""
def __init__(self, dtypes):
self.dtypes = tuple(dtypes)
def __call__(self, x):
# Make sure we only deal with scalars.
assert (isinstance(x, integer_types) or
isinstance(x, float) or
(isinstance(x, numpy.ndarray) and x.ndim == 0))
if config.cast_policy == 'numpy':
return numpy.asarray(x)
elif config.cast_policy == 'numpy+floatX':
rval = numpy.asarray(x)
if ((not hasattr(x, 'dtype') and
rval.dtype in ('float64', 'float32') and
rval.dtype != config.floatX)):
rval = theano._asarray(rval, dtype=config.floatX)
return rval
# The following is the original code, corresponding to the 'custom'
# option for `config.cast_policy`.
assert config.cast_policy == 'custom'
try:
# Pass through numpy scalars, since they are already typed on
# purpose typically.
if str(x.dtype) in self.dtypes:
# No need to cast `x` into a new dtype. Note that we still
# need to convert it into an array, because it may not be
# one already (e.g. if x == numpy.float64(1.1)).
return numpy.asarray(x)
except AttributeError:
# Means `x` has no 'dtype' attribute.
pass
# unsafe downcast of float64 variables when config.floatX == 'float32'
# recall: float is numpy.float
if ((isinstance(x, float) and
config.floatX in self.dtypes and
config.floatX != 'float64')):
return theano._asarray(x, dtype=config.floatX)
# Don't autocast to float16 unless config.floatX is float16
try_dtypes = [d for d in self.dtypes
if config.floatX == 'float16' or d != 'float16']
for dtype in try_dtypes:
x_ = theano._asarray(x, dtype=dtype)
if numpy.all(x == x_):
break
# returns either an exact x_==x, or the last cast x_
return x_
autocast_int = NumpyAutocaster(('int8', 'int16', 'int32', 'int64'))
autocast_float = NumpyAutocaster(('float16', 'float32', 'float64'))
# autocast_float dtypes might be manipulated in tensor.__init__
#
# Note: it's a bit weird for a compiler to automatically downcast
# literals like this, and it might have implications for efficiency
# when mixing types. For example when you add 1.0 + dmatrix(), the
# 1.0 could be converted to float32, and require upcasting for the +
# operation at every position in the dmatrix. using
# theano._asarray(1.0, dtype='float64') will circumvent this
# autocasting, and in future, our ops might be smarter about factoring
# out upcasts. The advantage of this mechanism is to combine it with
# floatX so that 1.0 + xmatrix() will always have the same type as the
# xmatrix().
#
class autocast_float_as(object):
"""
Temporarily adjust autocasting behavior.
This class makes it possible to temporarily and locally adjust autocasting
behavior when `config.cast_policy` is set to 'custom'.
If `config.cast_policy` is not 'custom', an exception is raised.
This class might be convenient in some code, but it definitely
helps to test the autocasting mechanism.
Examples
--------
>>> with autocast_float_as('float32'):
... assert (fvector() + 1.1).dtype == 'float32' # temporary downcasting
>>> assert (fvector() + 1.1).dtype == 'float64' # back to default behaviour
"""
def __init__(self, *dtypes):
self.dtypes = dtypes
assert config.cast_policy == 'custom'
def __enter__(self):
assert config.cast_policy == 'custom'
self.old_dtypes = autocast_float.dtypes
autocast_float.dtypes = self.dtypes
def __exit__(self, *args):
assert config.cast_policy == 'custom'
autocast_float.dtypes = self.old_dtypes
def constant_or_value(x, rtype, name=None, ndim=None, dtype=None):
"""Return a symbolic `Constant` with value `x`.
Raises
------
TypeError
`x` could not be converted to a numpy.ndarray.
ValueError
`x` could not be expanded to have ndim dimensions.
"""
if dtype is not None:
# in this case, the semantics are that the caller is forcing the dtype
x_ = theano._asarray(x, dtype=dtype)
else:
# In this case, this function should infer the dtype according to the
# autocasting rules. See autocasting above.
x_ = None
if rtype is TensorConstant and isinstance(x, integer_types):
try:
x_ = autocast_int(x)
except OverflowError:
# This is to imitate numpy behavior which tries to fit
# bigger numbers into a uint64.
x_ = theano._asarray(x, dtype='uint64')
elif rtype is TensorConstant and isinstance(x, float):
x_ = autocast_float(x)
elif isinstance(x, numpy.ndarray):
x_ = x
# Currently we do not have a bool dtype in Theano.
# So we upcast it to uint8 to avoid breaking our interface for
# constant.
if x.dtype == 'bool':
x_ = numpy.asarray(x_, dtype='uint8')
else:
# Here x is probably a list or a tuple. If it contains a long,
# we will behave like the current NumPy version: 1.7 and below,
# it will only work if the long fits in int64. For NumPy 1.7.1+,
# it will work if the long fits in int64 or uint64.
x_ = numpy.asarray(x)
assert type(x_) in [numpy.ndarray, numpy.memmap]
bcastable = [d == 1 for d in x_.shape]
if ndim is not None:
if len(bcastable) < ndim:
bcastable = [True] * (ndim - len(bcastable)) + bcastable
elif len(bcastable) > ndim:
# TODO: strip off dimensions of size 1
raise ValueError(
'ndarray could not be cast to constant with %i dimensions' %
ndim)
assert len(bcastable) == ndim
try:
if rtype is TensorConstant:
rval = rtype(
TensorType(dtype=x_.dtype, broadcastable=bcastable),
x_.copy(),
name=name)
return rval
else:
# leave the shape out of the type
return rtype(TensorType(dtype=x_.dtype, broadcastable=bcastable),
x_, name=name)
except Exception:
raise TypeError("Could not convert %s to TensorType" % x, type(x))
def constant(x, name=None, ndim=None, dtype=None):
ret = constant_or_value(x, rtype=TensorConstant, name=name, ndim=ndim,
dtype=dtype)
# We create a small cache of frequently used constant.
# This speed up the Merge optimization for big graph.
# We want to cache all scalar to don't merge as frequently constants.
# But we don't want to cache too much stuff
# So we cache integer with dtype [u]int and float where the value is
# between -10 and 10
# We want to cache all broadcast pattern for scalar.
if not constant.enable:
return ret
sig = ret.signature()
if (sig not in constant_cache and ret.data.size == 1 and
(-10) <= ret.data <= 10 and
(ret.dtype in int_dtypes or ret.dtype in uint_dtypes or
(ret.dtype in float_dtypes and int(ret.data) == ret.data))):
constant_cache[sig] = ret
# This is needed to raise a good error to the user.
ret.cached = True
return constant_cache.get(sig, ret)
constant.enable = True
constant_cache = {}
def _obj_is_wrappable_as_tensor(x):
try:
constant(x)
return True
except TypeError:
return False
if int(config.tensor.cmp_sloppy) > 1:
# This config variable is a quick-and-dirty way to get low-precision
# comparisons. For a more precise setting of these tolerances set
# them explicitly in your user code by assigning, for example,
# "theano.tensor.basic.float32_atol = ..."
# When config.tensor.cmp_sloppy>1 we are even more sloppy. This is
# useful to test the GPU as they don't use extended precision and
# this cause some difference bigger then the normal sloppy.
float16_atol = 5e-3
float16_rtol = 1e-2
float32_atol = 5e-4
float32_rtol = 1e-3
float64_rtol = 1e-4
float64_atol = 1e-3
elif int(config.tensor.cmp_sloppy):
float16_atol = 1e-3
float16_rtol = 5e-3
float32_atol = 1e-4
float32_rtol = 1e-3
float64_rtol = 1e-4
float64_atol = 1e-3
else:
# If you change those value in test don't forget to put them back
# when the test end. Don't forget the case when the test fail.
float16_atol = 5e-4
float16_rtol = 5e-4
float32_atol = 1e-5
float32_rtol = 1e-5
# defaults in numpy.allclose
# Don't be more strict then numpy rtol
# It cause useless error.
float64_rtol = 1.0000000000000001e-05
float64_atol = 1e-8
def _get_atol_rtol(a, b):
tiny = ('float16',)
narrow = ('float32', 'complex64')
if (str(a.dtype) in tiny) or (str(b.dtype) in tiny):
atol = float16_atol
rtol = float16_rtol
elif (str(a.dtype) in narrow) or (str(b.dtype) in narrow):
atol = float32_atol
rtol = float32_rtol
else:
atol = float64_atol
rtol = float64_rtol
return atol, rtol
def _allclose(a, b, rtol=None, atol=None):
a = numpy.asarray(a)
b = numpy.asarray(b)
atol_, rtol_ = _get_atol_rtol(a, b)
if rtol is not None:
rtol_ = rtol
if atol is not None:
atol_ = atol
# Work around bug in Numpy, see
# http://projects.scipy.org/numpy/ticket/1684
if str(b.dtype) in int_dtypes and (numpy.absolute(b) < 0).any():
b = theano._asarray(b, dtype='float64')
return numpy.allclose(a, b, atol=atol_, rtol=rtol_)
class NotScalarConstantError(Exception):
"""
Raised by get_scalar_constant_value if called on something that is
not a scalar constant.
"""
class EmptyConstantError(NotScalarConstantError):
"""
Raised by get_scalar_const_value if called on something that is a
zero dimensional constant.
"""
def numpy_scalar(data):
""" Return a scalar stored in a numpy ndarray.
Raises
------
NotScalarConstantError
If the numpy ndarray is not a scalar.
"""
# handle case where data is numpy.array([])
if (data.ndim > 0 and
(len(data.shape) == 0 or
__builtins__['max'](data.shape) == 0)):
assert numpy.all(numpy.array([]) == data)
raise EmptyConstantError()
try:
numpy.complex(data) # works for all numeric scalars
return data
except Exception:
raise NotScalarConstantError(
'v.data is non-numeric, non-scalar, or has more than one'
' unique value', data)
get_scalar_constant_value_elemwises = (
scal.Cast, scal.Switch,
scal.NEQ, scal.EQ,
scal.LT, scal.GT, scal.LE, scal.GE,
scal.Sub, scal.Add, scal.Mod, scal.Mul,
scal.IntDiv, scal.TrueDiv, scal.Minimum, scal.Maximum)
def get_scalar_constant_value(orig_v, elemwise=True,
only_process_constants=False):
"""Return the constant scalar(0-D) value underlying variable `v`.
If `v` is the output of dimshuffles, fills, allocs, rebroadcasts,
cast, OutputGuard, DeepCopyOp, ScalarFromTensor, ScalarOp, Elemwise
and some pattern with Subtensor, this function digs through them.
If `v` is not some view of constant scalar data, then raise a
NotScalarConstantError.
Parameters
----------
elemwise : bool
If False, we won't try to go into elemwise. So this call is faster.
But we still investigate in Second Elemwise (as this is a substitute
for Alloc)
only_process_constants : bool
If True, we only attempt to obtain the value of `orig_v` if it's
directly constant and don't try to dig through dimshuffles, fills,
allocs, and other to figure out its value.
Notes
-----
There may be another function similar to this one in the code,
but I'm not sure where it is.
"""
v = orig_v
while True:
if v is None:
# None is not a scalar (and many uses of this function seem
# to depend on passing it None)
raise NotScalarConstantError()
if isinstance(v, (numpy.integer, integer_types, float)):
return numpy.asarray(v)
if isinstance(v, numpy.ndarray):
return numpy_scalar(v).copy()
if isinstance(v, Constant):
if getattr(v.tag, 'unique_value', None) is not None:
data = v.tag.unique_value
else:
data = v.data
return numpy_scalar(data).copy()
if not only_process_constants and getattr(v, 'owner', None):
if isinstance(v.owner.op, (Alloc, DimShuffle, Rebroadcast,
compile.ops.OutputGuard,
compile.DeepCopyOp)):
v = v.owner.inputs[0]
continue
elif isinstance(v.owner.op, theano.compile.ops.Shape_i):
if isinstance(v.owner.inputs[0], Constant):
return numpy.asarray(
v.owner.inputs[0].data.shape[v.owner.op.i])
# Don't act as the constant_folding optimization here as this
# fct is used too early in the optimization phase. This would
# mess with the stabilization optimization and be too slow.
# We put all the scalar Ops used by get_canonical_form_slice()
# to allow it to determine the broadcast pattern correctly.
elif isinstance(v.owner.op, (ScalarFromTensor, TensorFromScalar)):
return get_scalar_constant_value(v.owner.inputs[0])
elif isinstance(v.owner.op, scal.ScalarOp):
if isinstance(v.owner.op, scal.Second):
# We don't need both input to be constant for second
shp, val = v.owner.inputs
v = val
continue
if isinstance(v.owner.op, get_scalar_constant_value_elemwises):
const = [get_scalar_constant_value(i)
for i in v.owner.inputs]
ret = [[None]]
v.owner.op.perform(v.owner, const, ret)
return ret[0][0].copy()
# In fast_compile, we don't enable local_fill_to_alloc, so
# we need to investigate Second as Alloc. So elemwise
# don't disable the check for Second.
elif isinstance(v.owner.op, Elemwise):
if isinstance(v.owner.op.scalar_op, scal.Second):
# We don't need both input to be constant for second
shp, val = v.owner.inputs
v = val
continue
elif elemwise and isinstance(
v.owner.op.scalar_op,
get_scalar_constant_value_elemwises):
const = [get_scalar_constant_value(i)
for i in v.owner.inputs]
ret = [[None]]
v.owner.op.perform(v.owner, const, ret)
return ret[0][0].copy()
elif (isinstance(v.owner.op, theano.tensor.subtensor.Subtensor) and
v.ndim == 0):
if isinstance(v.owner.inputs[0], TensorConstant):
cdata = tuple(v.owner.op.get_constant_idx(v.owner.inputs))
try:
return v.owner.inputs[0].data.__getitem__(cdata).copy()
except IndexError:
raise IndexError(
str(tuple(v.owner.op.idx_list)) +
" is not a valid index into " +
str(v.owner.inputs[0].data))
# The index list 'idx_list' should have length the same
# shape as the input.
# TODO: implement the case where we take a scalar in a matrix
assert len(v.owner.op.idx_list) == v.owner.inputs[0].ndim
# Needed to make better graph in this test in
# theano/tensor/tests/test_sharedvar.py:
# test_shared_options.test_specify_shape_partial
if ((v.owner.inputs[0].owner and
isinstance(v.owner.inputs[0].owner.op, Join) and
len(v.owner.op.idx_list) == 1)):
# Ensure the Join is joining only scalar variables (so that
# the constant value can be found at the same index as the
# one used in the sub-tensor).
if python_all(var.ndim == 0 for var in
v.owner.inputs[0].owner.inputs[1:]):
idx = v.owner.op.idx_list[0]
if isinstance(idx, gof.Type):
idx = get_scalar_constant_value(v.owner.inputs[1])
# Note the '+ 1' is because the first argument to Join
# is the axis.
ret = v.owner.inputs[0].owner.inputs[idx + 1]
ret = get_scalar_constant_value(ret)
# join can cast implicitly its input in some case.
return theano._asarray(ret, dtype=v.type.dtype)
if python_all(var.ndim == 1 for var in
v.owner.inputs[0].owner.inputs[1:]):
idx = v.owner.op.idx_list[0]
if isinstance(idx, gof.Type):
idx = get_scalar_constant_value(v.owner.inputs[1])
try:
# TODO: assert joined axis is 0.
length = 0
for joined in v.owner.inputs[0].owner.inputs[1:]:
ll = get_vector_length(joined)
if idx < length + ll:
return get_scalar_constant_value(
joined[idx - length])
length += ll
except TypeError:
pass
except ValueError:
pass
elif (v.owner.inputs[0].owner and
isinstance(v.owner.inputs[0].owner.op,
theano.tensor.opt.MakeVector) and
# MakeVector normally accept only scalar as input.
# We put this check in case there is change in the future
python_all(var.ndim == 0 for var in
v.owner.inputs[0].owner.inputs) and
len(v.owner.op.idx_list) == 1):
idx = v.owner.op.idx_list[0]
if isinstance(idx, gof.Type):
idx = get_scalar_constant_value(v.owner.inputs[1])
# Python 2.4 does not support indexing with numpy.integer
# So we cast it.
idx = int(idx)
ret = v.owner.inputs[0].owner.inputs[idx]
ret = get_scalar_constant_value(ret)
# MakeVector can cast implicitly its input in some case.
return theano._asarray(ret, dtype=v.type.dtype)
# This is needed when we take the grad as the Shape op
# are not already changed into MakeVector
owner = v.owner
leftmost_parent = owner.inputs[0]
if (leftmost_parent.owner and
isinstance(leftmost_parent.owner.op,
theano.tensor.Shape)):
op = owner.op
idx_list = op.idx_list
idx = idx_list[0]
if isinstance(idx, gof.Type):
idx = get_scalar_constant_value(owner.inputs[1])
grandparent = leftmost_parent.owner.inputs[0]
gp_broadcastable = grandparent.type.broadcastable
ndim = grandparent.type.ndim
if grandparent.owner and isinstance(grandparent.owner.op,
Rebroadcast):
ggp_broadcastable = grandparent.owner.inputs[0].broadcastable
l = [b1 or b2 for b1, b2 in zip(ggp_broadcastable,
gp_broadcastable)]
gp_broadcastable = tuple(l)
assert ndim == len(gp_broadcastable)
if not (idx < len(gp_broadcastable)):
msg = ("get_scalar_constant_value detected " +
"deterministic IndexError: x.shape[%d] " +
"when x.ndim=%d.") % (idx, ndim)
if config.exception_verbosity == 'high':
msg += ' x=%s' % min_informative_str(v)
else:
msg += ' x=%s' % str(v)
raise ValueError(msg)
if gp_broadcastable[idx]:
return numpy.asarray(1)
raise NotScalarConstantError(v)
# Easy constructors
def tensor(*args, **kwargs):
name = kwargs.pop('name', None)
return TensorType(*args, **kwargs)(name=name)
def _multi(*fns):
def f2(f, *names):
if names and isinstance(names[0], integer_types):
if names == 1:
return f()
else:
return [f() for i in xrange(names[0])]
if isinstance(names, tuple):
if len(names) == 1:
names = names[0]
if len(names) == 1:
return f(names)
else:
return [f(name) for name in names]
if len(fns) == 1:
return partial(f2, fns)
else:
return [partial(f2, f) for f in fns]
cscalar = TensorType('complex64', ())
zscalar = TensorType('complex128', ())
fscalar = TensorType('float32', ())
dscalar = TensorType('float64', ())
bscalar = TensorType('int8', ())
wscalar = TensorType('int16', ())
iscalar = TensorType('int32', ())
lscalar = TensorType('int64', ())
def scalar(name=None, dtype=None):
"""Return a symbolic scalar variable.
Parameters
----------
dtype: numeric
None means to use theano.config.floatX.
name
A name to attach to this variable.
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, ())
return type(name)
scalars, fscalars, dscalars, iscalars, lscalars = _multi(
scalar, fscalar, dscalar, iscalar, lscalar)
int_types = bscalar, wscalar, iscalar, lscalar
float_types = fscalar, dscalar
complex_types = cscalar, zscalar
int_scalar_types = int_types
float_scalar_types = float_types
complex_scalar_types = complex_types
cvector = TensorType('complex64', (False, ))
zvector = TensorType('complex128', (False, ))
fvector = TensorType('float32', (False, ))
dvector = TensorType('float64', (False, ))
bvector = TensorType('int8', (False,))
wvector = TensorType('int16', (False,))
ivector = TensorType('int32', (False, ))
lvector = TensorType('int64', (False, ))
def vector(name=None, dtype=None):
"""Return a symbolic vector variable.
Parameters
----------
dtype: numeric
None means to use theano.config.floatX.
name
A name to attach to this variable
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, (False, ))
return type(name)
vectors, fvectors, dvectors, ivectors, lvectors = _multi(
vector, fvector, dvector, ivector, lvector)
int_vector_types = bvector, wvector, ivector, lvector
float_vector_types = fvector, dvector
complex_vector_types = cvector, zvector
cmatrix = TensorType('complex64', (False, False))
zmatrix = TensorType('complex128', (False, False))
fmatrix = TensorType('float32', (False, False))
dmatrix = TensorType('float64', (False, False))
bmatrix = TensorType('int8', (False, False))
wmatrix = TensorType('int16', (False, False))
imatrix = TensorType('int32', (False, False))
lmatrix = TensorType('int64', (False, False))
def matrix(name=None, dtype=None):
"""Return a symbolic matrix variable.
Parameters
----------
dtype: numeric
None means to use theano.config.floatX.
name
A name to attach to this variable.
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, (False, False))
return type(name)
matrices, fmatrices, dmatrices, imatrices, lmatrices = _multi(
matrix, fmatrix, dmatrix, imatrix, lmatrix)
int_matrix_types = bmatrix, wmatrix, imatrix, lmatrix
float_matrix_types = fmatrix, dmatrix
complex_matrix_types = cmatrix, zmatrix
crow = TensorType('complex64', (True, False))
zrow = TensorType('complex128', (True, False))
frow = TensorType('float32', (True, False))
drow = TensorType('float64', (True, False))
brow = TensorType('int8', (True, False))
wrow = TensorType('int16', (True, False))
irow = TensorType('int32', (True, False))
lrow = TensorType('int64', (True, False))
def row(name=None, dtype=None):
"""Return a symbolic row variable (ndim=2, broadcastable=[True,False]).
Parameters
----------
dtype: numeric type
None means to use theano.config.floatX.
name
A name to attach to this variable.
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, (True, False))
return type(name)
rows, frows, drows, irows, lrows = _multi(row, frow, drow, irow, lrow)
ccol = TensorType('complex64', (False, True))
zcol = TensorType('complex128', (False, True))
fcol = TensorType('float32', (False, True))
dcol = TensorType('float64', (False, True))
bcol = TensorType('int8', (False, True))
wcol = TensorType('int16', (False, True))
icol = TensorType('int32', (False, True))
lcol = TensorType('int64', (False, True))
def col(name=None, dtype=None):
"""Return a symbolic column variable (ndim=2, broadcastable=[False,True]).
Parameters
----------
dtype : numeric
None means to use theano.config.floatX.
name
A name to attach to this variable.
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, (False, True))
return type(name)
cols, fcols, dcols, icols, lcols = _multi(col, fcol, dcol, icol, lcol)
ctensor3 = TensorType('complex64', ((False,) * 3))
ztensor3 = TensorType('complex128', ((False,) * 3))
ftensor3 = TensorType('float32', ((False,) * 3))
dtensor3 = TensorType('float64', ((False,) * 3))
btensor3 = TensorType('int8', ((False,) * 3))
wtensor3 = TensorType('int16', ((False,) * 3))
itensor3 = TensorType('int32', ((False,) * 3))
ltensor3 = TensorType('int64', ((False,) * 3))
def tensor3(name=None, dtype=None):
"""Return a symbolic 3-D variable.
Parameters
----------
dtype: numeric type
None means to use theano.config.floatX.
name
A name to attach to this variable.
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, (False, False, False))
return type(name)
tensor3s, ftensor3s, dtensor3s, itensor3s, ltensor3s = _multi(
tensor3, ftensor3, dtensor3, itensor3, ltensor3)
ctensor4 = TensorType('complex64', ((False,) * 4))
ztensor4 = TensorType('complex128', ((False,) * 4))
ftensor4 = TensorType('float32', ((False,) * 4))
dtensor4 = TensorType('float64', ((False,) * 4))
btensor4 = TensorType('int8', ((False,) * 4))
wtensor4 = TensorType('int16', ((False,) * 4))
itensor4 = TensorType('int32', ((False,) * 4))
ltensor4 = TensorType('int64', ((False,) * 4))
def tensor4(name=None, dtype=None):
"""Return a symbolic 4-D variable.
Parameters
----------
dtype: numeric type
None means to use theano.config.floatX.
name
A name to attach to this variable.
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, (False, False, False, False))
return type(name)
tensor4s, ftensor4s, dtensor4s, itensor4s, ltensor4s = _multi(
tensor4, ftensor4, dtensor4, itensor4, ltensor4)
Tensor = TensorType
# This bizarre push-import avoids a circular dependency.
elemwise.as_tensor_variable = as_tensor_variable
elemwise.TensorType = TensorType
elemwise.TensorVariable = TensorVariable
elemwise.TensorConstant = TensorConstant
#########################
# Utilities
#########################
def _scal_elemwise_with_nfunc(nfunc, nin, nout):
"""
Replace a symbol definition with an elementwise version of the
corresponding scalar Op. If it is not None, the nfunc argument
should be a string such that getattr(numpy, nfunc) implements
a vectorized version of the elemwise operation. nin is the number
of inputs expected by that function, and nout is the number of
**destination** inputs it takes. That is, the function should
take nin+nout inputs. nout == 0 means that the numpy function
does not take a numpy array argument to put its result in.
"""
def construct(symbol):
symbolname = symbol.__name__
inplace = symbolname.endswith('_inplace')
if inplace:
msg = "inplace"
else:
msg = "no_inplace"
n = "Elemwise{%s,%s}" % (symbolname, msg)
if inplace:
scalar_op = getattr(scal, symbolname[:-len('_inplace')])
inplace_scalar_op = scalar_op.__class__(scal.transfer_type(0))
rval = elemwise.Elemwise(inplace_scalar_op, {0: 0}, name=n,
nfunc_spec=(nfunc and (nfunc, nin, nout)))
else:
scalar_op = getattr(scal, symbolname)
rval = elemwise.Elemwise(scalar_op, name=n,
nfunc_spec=(nfunc and (nfunc, nin, nout)))
if getattr(symbol, '__doc__', False):
rval.__doc__ = symbol.__doc__ + '\n' + rval.__doc__
# for the meaning of this see the ./epydoc script
# it makes epydoc display rval as if it were a function, not an object
rval.__epydoc_asRoutine = symbol
rval.__module__ = 'tensor'
pprint.assign(rval, printing.FunctionPrinter(symbolname))
return rval
return construct
_scal_elemwise = _scal_elemwise_with_nfunc(None, None, None)
def _pack(x):
"""
Convert x to a list if it is an iterable, otherwise wrap it in a list.
"""
try:
return list(x)
except TypeError:
return [x]
#########################
# Casting Operations
#########################
class TensorFromScalar(Op):
__props__ = ()
def make_node(self, s):
assert isinstance(s.type, scal.Scalar)
return Apply(self,
[s],
[tensor(dtype=s.type.dtype,
broadcastable=())])
def perform(self, node, inp, out_):
s, = inp
out, = out_
out[0] = numpy.asarray(s)
def infer_shape(self, node, in_shapes):
return [()]
def grad(self, inp, grads):
s, = inp
dt, = grads
if s.type.dtype in float_dtypes:
assert dt.type.dtype in float_dtypes
return [scalar_from_tensor(dt)]
# If the input dtype is an integer, then so is the output dtype,
# and the "zero" gradient can be represented in that int dtype.
# Currently, theano.grad insists that the dtype of the returned
# gradient has a float dtype, so we use floatX.
if s.type.dtype in discrete_dtypes:
return [s.zeros_like().astype(theano.config.floatX)]
raise NotImplementedError("grad not implemented for complex dtypes")
tensor_from_scalar = TensorFromScalar()
class ScalarFromTensor(Op):
__props__ = ()
def make_node(self, t):
assert isinstance(t.type, TensorType)
assert t.type.broadcastable == ()
return Apply(self,
[t],
[scal.get_scalar_type(dtype=t.type.dtype).make_variable()]
)
def perform(self, node, inp, out_):
s, = inp
out, = out_
out[0] = s.flatten()[0]
def infer_shape(self, node, in_shapes):
return [()]
def grad(self, inp, grads):
s, = inp
dt, = grads
return [tensor_from_scalar(dt)]
def R_op(self, inputs, eval_points):
if None in eval_points:
return [None]
return self.make_node(*eval_points).outputs
def c_code(self, node, name, inputs, outputs, sub):
x, = inputs
z, = outputs
fail = sub['fail']
return """
%(z)s = ((dtype_%(x)s*)(PyArray_DATA(%(x)s)))[0];
""" % locals()
def c_code_cache_version(self):
return (1,)
scalar_from_tensor = ScalarFromTensor()
# to be removed as we get the epydoc routine-documenting thing going
# -JB 20080924
def _conversion(real_value, name):
__oplist_tag(real_value, 'casting')
real_value.__module__ = 'tensor.basic'
pprint.assign(real_value, printing.FunctionPrinter(name))
return real_value
# These _conver_to_<type> functions have leading underscores to indicate that
# they should not be called directly. They do not perform sanity checks about
# what types you are casting to what. That logic is implemented by the
# `cast()` function below.
_convert_to_int8 = _conversion(
elemwise.Elemwise(scal.convert_to_int8), 'int8')
"""Cast to 8-bit integer"""
_convert_to_int16 = _conversion(
elemwise.Elemwise(scal.convert_to_int16), 'int16')
"""Cast to 16-bit integer"""
_convert_to_int32 = _conversion(
elemwise.Elemwise(scal.convert_to_int32), 'int32')
"""Cast to 32-bit integer"""
_convert_to_int64 = _conversion(
elemwise.Elemwise(scal.convert_to_int64), 'int64')
"""Cast to 64-bit integer"""
_convert_to_uint8 = _conversion(
elemwise.Elemwise(scal.convert_to_uint8), 'uint8')
"""Cast to unsigned 8-bit integer"""
_convert_to_uint16 = _conversion(
elemwise.Elemwise(scal.convert_to_uint16), 'uint16')
"""Cast to unsigned 16-bit integer"""
_convert_to_uint32 = _conversion(
elemwise.Elemwise(scal.convert_to_uint32), 'uint32')
"""Cast to unsigned 32-bit integer"""
_convert_to_uint64 = _conversion(
elemwise.Elemwise(scal.convert_to_uint64), 'uint64')
"""Cast to unsigned 64-bit integer"""
_convert_to_float16 = _conversion(
elemwise.Elemwise(scal.convert_to_float16), 'float16')
"""Cast to half-precision floating point"""
_convert_to_float32 = _conversion(
elemwise.Elemwise(scal.convert_to_float32), 'float32')
"""Cast to single-precision floating point"""
_convert_to_float64 = _conversion(
elemwise.Elemwise(scal.convert_to_float64), 'float64')
"""Cast to double-precision floating point"""
_convert_to_complex64 = _conversion(
elemwise.Elemwise(scal.convert_to_complex64), 'complex64')
"""Cast to single-precision complex"""
_convert_to_complex128 = _conversion(
elemwise.Elemwise(scal.convert_to_complex128), 'complex128')
"""Cast to double-precision complex"""
_cast_mapping = {
'int8': _convert_to_int8,
'int16': _convert_to_int16,
'int32': _convert_to_int32,
'int64': _convert_to_int64,
'uint8': _convert_to_uint8,
'uint16': _convert_to_uint16,
'uint32': _convert_to_uint32,
'uint64': _convert_to_uint64,
'float16': _convert_to_float16,
'float32': _convert_to_float32,
'float64': _convert_to_float64,
'complex64': _convert_to_complex64,
'complex128': _convert_to_complex128}
@constructor
def cast(x, dtype):
"""Symbolically cast `x` to a Tensor of type `dtype`."""
if dtype == 'floatX':
dtype = config.floatX
_x = as_tensor_variable(x)
if _x.type.dtype == dtype:
return _x
if _x.type.dtype.startswith('complex') and not dtype.startswith('complex'):
raise TypeError((
'Casting from complex to real is ambiguous: consider real(), '
'imag(), angle() or abs()'))
return _cast_mapping[dtype](x)
##########################
# Unary Operations
##########################
class MaxAndArgmax(Op):
"""
Calculate the max and argmax over a given axis or over all axes.
"""
nin = 2 # tensor, axis
nout = 2 # max val, max idx
E_axis = 'invalid axis'
__props__ = ()
def make_node(self, x, axis=None):
x = _as_tensor_variable(x)
if isinstance(axis, (integer_types, numpy.integer)):
axis = [int(axis)]
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0:
axis = [int(axis)]
elif isinstance(axis, (tuple, list, numpy.ndarray)):
axis = [int(a) for a in axis]
if axis == list(range(x.type.ndim)):
axis = None
elif isinstance(axis, Variable):
if NoneConst.equals(axis):
axis = None
elif not isinstance(axis, TensorConstant):
raise TypeError(
"MaxAndArgmax needs a constant axis. Got %s" % axis)
else:
assert (axis.dtype.startswith("int") or
axis.dtype.startswith("uint"))
if isinstance(axis.data, (integer_types, numpy.integer)) or \
(isinstance(axis.data, numpy.ndarray) and
axis.data.ndim == 0):
axis = [int(axis.data)]
elif isinstance(axis.data, (list, numpy.ndarray)):
axis = [int(i) for i in axis.data]
# Make axis entries non-negative, and sort them
if isinstance(axis, list):
for idx in xrange(len(axis)):
if axis[idx] < 0:
axis[idx] += x.type.ndim
axis.sort()
# Verify that axes are valid
all_axes = []
if isinstance(axis, list):
for ax in axis:
if ax < 0 or ax >= x.type.ndim:
raise ValueError(
'Invalid axis: %s (the number of dimensions of the '
'input is: %s)' % (ax, x.type.ndim))
if ax not in all_axes:
all_axes.append(ax)
else:
all_axes = list(range(x.ndim))
if axis is None or axis == list(range(x.type.ndim)):
axis = NoneConst.clone()
else:
axis = _as_tensor_variable(all_axes)
assert axis.ndim == 1
inputs = [x, axis]
# We keep the original broadcastable flags for dimensions on which
# we do not perform the max / argmax.
broadcastable = [b for i, b in enumerate(x.type.broadcastable)
if i not in all_axes]
outputs = [tensor(x.type.dtype, broadcastable, name='max'),
tensor('int64', broadcastable, name='argmax')]
return Apply(self, inputs, outputs)
def perform(self, node, inp, outs):
x, axes = inp
max, max_idx = outs
if axes is None:
axes = tuple(range(x.ndim))
else:
axes = tuple(int(ax) for ax in axes)
max[0] = theano._asarray(numpy.max(x, axes),
dtype=node.outputs[0].dtype)
# Numpy does not support multiple axes for argmax
# Work around
keep_axes = numpy.array([i for i in range(x.ndim) if i not in axes],
dtype='int64')
# Not-reduced axes in front
transposed_x = numpy.transpose(x, numpy.concatenate((keep_axes, axes)))
reshaped_x = transposed_x.reshape(transposed_x.shape[:len(keep_axes)] +
(-1,))
max_idx[0] = theano._asarray(numpy.argmax(reshaped_x, axis=-1),
dtype='int64')
def c_code(self, node, name, inp, out, sub):
x, axis = inp
max, argmax = out
fail = sub["fail"]
if NoneConst.equals(node.inputs[1]):
axis_code = "axis = NPY_MAXDIMS;"
else:
assert node.inputs[1].ndim == 1
# Fall back to perform() if there are multiple axes
if len(node.inputs[1].data) > 1:
raise NotImplementedError()
axis_code = """
axis = ((dtype_%(axis)s*)PyArray_DATA(%(axis)s))[0];
if(axis > PyArray_NDIM(%(x)s)-1 || axis < -PyArray_NDIM(%(x)s)){
PyErr_SetString(PyExc_ValueError,
"MaxAndArgmax, bad axis argument");
%(fail)s
}
""" % locals()
ret = """
int axis;
Py_CLEAR(%(max)s);
Py_CLEAR(%(argmax)s);//todo pass them as out parameter.
%(axis_code)s
%(max)s = (PyArrayObject*)PyArray_Max(%(x)s, axis, NULL);
if(%(max)s == NULL){
%(fail)s;
}
if(!PyArray_CheckExact(%(max)s)){
%(max)s = (PyArrayObject*)PyArray_FromAny((PyObject*)%(max)s, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL);
if(%(max)s == NULL){
%(fail)s;
}
}
%(argmax)s = (PyArrayObject*)PyArray_ArgMax(%(x)s, axis, NULL);
if(%(argmax)s == NULL){
Py_CLEAR(%(max)s);
%(fail)s;
}
if(!PyArray_CheckExact(%(argmax)s)){
%(argmax)s = (PyArrayObject*)PyArray_FromAny((PyObject*)%(argmax)s, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL);
if(%(argmax)s == NULL){
%(fail)s;
}
}
if(PyArray_TYPE(%(argmax)s) != NPY_INT64){
PyObject * tmp = PyArray_Cast(%(argmax)s, NPY_INT64);
if (NULL == tmp){
%(fail)s;
}
Py_DECREF(%(argmax)s);
%(argmax)s = (PyArrayObject*)tmp;
}
"""
return ret % locals()
def c_code_cache_version(self):
return (4,)
def infer_shape(self, node, shapes):
ishape, axis_shape = shapes
axis = node.inputs[1]
if axis.data is None:
return [(), ()]
rval = tuple([ishape[i] for (i, b) in enumerate(
node.inputs[0].type.broadcastable) if i not in axis.data])
return [rval, rval]
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return [None, None]
if not isinstance(inputs[1], theano.Constant):
raise ValueError(('R_op supported for arg_max only for '
'constant axis!'))
if inputs[1].data > 1:
raise ValueError(('R_op supported for arg_max only when '
' axis is 0 or 1'))
if inputs[0].ndim != 2:
raise ValueError(('R_op supported for arg_max only when '
' input is a matrix'))
max_vals, max_pos = self.make_node(*inputs).outputs
if inputs[1].data == 0:
return [eval_points[0][max_pos,
arange(eval_points[0].shape[1])], None]
else:
return [eval_points[0][arange(eval_points[0].shape[0]),
max_pos], None]
def grad(self, inp, grads):
# The strict sense mathematical gradient of the maximum function is
# not calculated here for it is not defined at every point where some
# coordinates are identical. However, since the latter set has null
# Lebesgue measure, the result may be interpreted as weak gradient.
# @note: This function should work correctly for L{vector}s.
# (x, y), (gz, gw)
# gz*dz/dx + gw*dw/dx, gz*dz/dy + gw*dw/dy
# gMax * dMax/dx + gArgMax * dArgMax/dx,
# gMax * dMax/daxis + gArgMax * dArgMax/daxis
# g_max has one less dimension than x, so you need to complete
# g_max to x's shape when axis=0 the broadcasting mechanism
# does it automatically
x, axis = inp
g_max, g_max_idx = grads
g_max_disconnected = isinstance(g_max.type, DisconnectedType)
g_max_idx_disconnected = isinstance(g_max_idx.type, DisconnectedType)
# if the op is totally disconnected, so are its inputs
if g_max_disconnected and g_max_idx_disconnected:
return [DisconnectedType()(), DisconnectedType()()]
axis_grad = grad_undefined(
self, 1, axis,
"argmax is not defined for non-integer axes so"
" argmax(x, axis+eps) is undefined")
# if the max is disconnected but the argmax is not,
# the gradient on its inputs is zero
if g_max_disconnected:
return [x.zeros_like(), axis_grad]
if NoneConst.equals(axis):
axis_ = list(range(x.ndim))
else:
axis_ = axis
xmax = max(x, axis_)
# Raise the g_max and xmax to the same number of dim as the input.
pattern = []
out_dim = 0
if NoneConst.equals(axis):
# We are taking the max/argmax over all dimensions.
axis = None
for i in xrange(x.ndim):
if axis is None or i in axis.data:
pattern.append('x')
else:
pattern.append(out_dim)
out_dim += 1
g_max_pad = DimShuffle(g_max.broadcastable, pattern)(g_max)
xmax_pad = DimShuffle(xmax.broadcastable, pattern)(xmax)
# Set the grad to the correct position.
g_x = eq(xmax_pad, x) * g_max_pad
return g_x, axis_grad
_max_and_argmax = MaxAndArgmax()
def makeKeepDims(x, y, axis):
"""
Reintroduces in y with length one the axes of x which have been left out
in a prior reduction of x. With this option, the resulting tensor will
broadcast correctly against the original tensor x.
"""
x = as_tensor_variable(x)
y = as_tensor_variable(y)
if axis is None:
axis = list(range(x.type.ndim))
elif isinstance(axis, (integer_types, numpy.integer)):
axis = [axis]
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0:
axis = [int(axis)]
else:
axis = [int(a) for a in axis]
newaxis = []
for a in axis:
if not isinstance(a, integer_types):
raise ValueError(
"keepdims option can be used only with constant axis")
if a < 0:
a += x.type.ndim
newaxis.append(a)
i = 0
new_dims = []
for j, _ in enumerate(x.type.broadcastable):
if j in newaxis:
new_dims.append('x')
else:
new_dims.append(i)
i += 1
return DimShuffle(y.type.broadcastable, new_dims)(y)
@constructor
def max_and_argmax(a, axis=None, keepdims=False):
"""
Returns maximum elements and their indices obtained by iterating over
given axis.
When axis is None (the default value), the max is performed
over the flattened tensor.
Parameters
----------
keepdims : bool
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
out, argout = _max_and_argmax(a, axis)
if keepdims:
out = makeKeepDims(a, out, axis)
argout = makeKeepDims(a, argout, axis)
return [out, argout]
@constructor
def max(x, axis=None, keepdims=False):
"""
Returns maximum elements obtained by iterating over given axis.
When axis is None (the default value), the max is performed
over the flattened tensor.
Parameters
----------
keepdims: bool
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
Notes
-----
We return an error as numpy when we reduce a dim with a shape of 0.
"""
# We have a choice of implementing this call with the
# CAReduce op or the MaxAndArgmax op.
# MaxAndArgmax supports grad and Rop, so we prefer to use that.
# CAReduce is faster, but optimizations will replace MaxAndArgmax[0]
# with CAReduce at compile time, so at this stage the important
# thing is supporting all user interface features, not speed.
# Some cases can be implemented only with CAReduce.
# We thus prefer to use MaxAndArgmax, if possible. It does not
# support all axis arguments, so we may need to fall back to CAReduce.
try:
out = max_and_argmax(x, axis)[0]
except Exception:
out = CAReduce(scal.maximum, axis)(x)
if keepdims:
out = makeKeepDims(x, out, axis)
return out
@constructor
def argmax(x, axis=None, keepdims=False):
"""
Returns indices of maximum elements obtained by iterating over given axis.
When axis is None (the default value), the argmax is performed
over the flattened tensor.
Parameters
----------
keepdims : bool
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
# In python (using MaxAndArgmax.perform()) this leads to a wasteful
# implementation that goes through the data twice instead of once
# but when Argmax.c_impl() is in place, it should be fine.
argout = max_and_argmax(x, axis)[1]
if keepdims:
argout = makeKeepDims(x, argout, axis)
return argout
@constructor
def min(x, axis=None, keepdims=False):
"""
Returns minimum elements obtained by iterating over given axis.
When axis is None (the default value), the min is performed
over the flattened tensor.
Parameters
----------
keepdims: bool
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
x = as_tensor_variable(x)
str_x_type = str(x.dtype)
if str_x_type.startswith('float') or str_x_type in int_dtypes:
return -max(-x, axis=axis, keepdims=keepdims)
else:
# Be careful about unsigned integers, complex
raise NotImplementedError()
@constructor
def argmin(x, axis=None, keepdims=False):
"""
Returns indices of minimum elements obtained by iterating over given axis.
When axis is None (the default value), the argmin is performed
over the flattened tensor.
Parameters
----------
keepdims: bool
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
x = as_tensor_variable(x)
str_x_type = str(x.dtype)
if str_x_type.startswith('float') or str_x_type in int_dtypes:
return argmax(-x, axis=axis, keepdims=keepdims)
else:
# Be careful about unsigned integers, complex
raise NotImplementedError()
@constructor
def smallest(*args):
"""
Return the [elementwise] smallest of a variable number of arguments.
Like python's min.
"""
if len(args) == 2:
a, b = args
return switch(a < b, a, b)
else:
return min(stack(args), axis=0)
@constructor
def largest(*args):
"""
Return the [elementwise] largest of a variable number of arguments.
Like python's max.
"""
if len(args) == 2:
a, b = args
return switch(a > b, a, b)
else:
return max(stack(args), axis=0)
##########################
# Comparison
##########################
@_scal_elemwise
def lt(a, b):
"""a < b"""
@_scal_elemwise
def gt(a, b):
"""a > b"""
@_scal_elemwise
def le(a, b):
"""a <= b"""
@_scal_elemwise
def ge(a, b):
"""a >= b"""
@_scal_elemwise
def eq(a, b):
"""a == b"""
@_scal_elemwise
def neq(a, b):
"""a != b"""
@_scal_elemwise
def isnan(a):
"""isnan(a)"""
@_scal_elemwise
def isinf(a):
"""isinf(a)"""
def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Implement Numpy's ``allclose`` on tensors.
``absolute(a - b) <= (atol + rtol * absolute(b))``
Parameters
----------
a : tensor
Input to compare.
b : tensor
Input to compare.
rtol : float
The relative tolerance parameter.
atol : float
The absolute tolerance parameter.
equal_nan: bool
Whether to consider nan's in the same place to be close.
Returns
-------
bool
A boolean value (of type int8 returned by the tensor elementwise `all`
function) whether all elements in a and b are in the tolerance range
defined above.
Notes
-----
Not a symmetric equation. See Numpy's documentation.
"""
return all(isclose(a, b, rtol, atol, equal_nan))
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Implements Numpy's ``isclose`` on tensors.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
``absolute(a - b) <= (atol + rtol * absolute(b))``
Parameters
----------
a : tensor
Input to compare.
b : tensor
Input to compare.
rtol : float
The relative tolerance parameter.
atol : float
The absolute tolerance parameter.
equal_nan : bool
Whether to consider nan's in the same place to be close
Returns
-------
int8
A boolean (int8) array where two arrays are element-wise equal
within a tolerance.
Notes
-----
Not a symmetric equation. See Numpy's documentation.
Examples
--------
>>> import theano
>>> import numpy as np
>>> a = theano._asarray([1e10, 1e-7], dtype="float64")
>>> b = theano._asarray([1.00001e10, 1e-8], dtype="float64")
>>> theano.tensor.isclose(a, b).eval()
array([1, 0], dtype=int8)
>>> a = theano._asarray([1e10, 1e-8], dtype="float64")
>>> b = theano._asarray([1.00001e10, 1e-9], dtype="float64")
>>> theano.tensor.isclose(a, b).eval()
array([1, 1], dtype=int8)
>>> a = theano._asarray([1e10, 1e-8], dtype="float64")
>>> b = theano._asarray([1.0001e10, 1e-9], dtype="float64")
>>> theano.tensor.isclose(a, b).eval()
array([0, 1], dtype=int8)
>>> a = theano._asarray([1.0, np.nan], dtype="float64")
>>> b = theano._asarray([1.0, np.nan], dtype="float64")
>>> theano.tensor.isclose(a, b).eval()
array([1, 0], dtype==int8)
>>> a = theano._asarray([1.0, np.nan], dtype="float64")
>>> b = theano._asarray([1.0, np.nan], dtype="float64")
>>> theano.tensor.isclose(a, b, equal_nan=True).eval()
array([1, 1], dtype==int8)
>>> a = theano._asarray([1.0, np.inf], dtype="float64")
>>> b = theano._asarray([1.0, -np.inf], dtype="float64")
>>> theano.tensor.isclose(a, b).eval()
array([1, 0], dtype==int8)
>>> a = theano._asarray([1.0, np.inf], dtype="float64")
>>> b = theano._asarray([1.0, np.inf], dtype="float64")
>>> theano.tensor.isclose(a, b).eval()
array([1, 1], dtype==int8)
"""
# close will be an int8 array of 1 where within tolerance
# and 0 where not within tolerance or there was a nan or inf value.
diff = abs(a - b)
tolerance = atol + rtol * abs(b)
close_prelim = le(diff, tolerance)
a_nan = isnan(a)
b_nan = isnan(b)
nans = bitwise_or(a_nan, b_nan)
a_inf = isinf(a)
b_inf = isinf(b)
infs = bitwise_or(a_inf, b_inf)
nans_or_infs = bitwise_or(nans, infs)
# close is now an array of 0's except where elements are not nan or inf
# and are withing the tolerance.
close = bitwise_and(close_prelim, bitwise_not(nans_or_infs))
# deal with signed inf values. this will make an array inf_eq of 0's
# except where inf values have the same sign.
both_infs = bitwise_and(a_inf, b_inf)
inf_signs_eq = eq(a_inf * sgn(a), b_inf * sgn(b))
inf_eq = bitwise_and(both_infs, inf_signs_eq)
# now create the potential result combining close and inf_eq
close_with_infs = bitwise_or(close, inf_eq)
# deal with comparing nan's.
if equal_nan:
both_nans = bitwise_and(a_nan, b_nan)
return bitwise_or(close_with_infs, both_nans)
# otherwise nan's aren't considered close.
else:
return close_with_infs
##########################
# Condition
##########################
@_scal_elemwise
def switch(cond, ift, iff):
"""if cond then ift else iff"""
where = switch
##########################
# Bit-wise
##########################
@_scal_elemwise
def and_(a, b):
"""bitwise a & b"""
bitwise_and = and_ # numpy name for it
@_scal_elemwise
def or_(a, b):
"""bitwise a | b"""
bitwise_or = or_ # numpy name for it
@_scal_elemwise
def xor(a, b):
"""bitwise a ^ b"""
bitwise_xor = xor # numpy name for it
@_scal_elemwise
def invert(a):
"""bitwise ~a"""
bitwise_not = invert # numpy alias for it
##########################
# Math
##########################
@_scal_elemwise
def abs_(a):
"""|`a`|
TensorVariable overloads the `TensorVariable.__abs__` operator so that
this function is called when you type abs(a).
"""
pprint.assign(abs_, printing.PatternPrinter(('|%(0)s|', -1000)))
@_scal_elemwise
def exp(a):
"""e^`a`"""
@_scal_elemwise
def exp2(a):
"""2^`a`"""
@_scal_elemwise
def expm1(a):
"""e^`a` - 1"""
@_scal_elemwise
def neg(a):
"""-a"""
# numpy.reciprocal does integer division on integer inputs
# (which is not very interesting)
@_scal_elemwise
def inv(a):
"""1.0/a"""
@_scal_elemwise
def log(a):
"""base e logarithm of a"""
@_scal_elemwise
def log2(a):
"""base 2 logarithm of a"""
@_scal_elemwise
def log10(a):
"""base 10 logarithm of a"""
@_scal_elemwise
def log1p(a):
"""log(1+a)"""
@_scal_elemwise
def sgn(a):
"""sign of a"""
@_scal_elemwise
def ceil(a):
"""ceiling of a"""
@_scal_elemwise
def floor(a):
"""floor of a"""
@_scal_elemwise
def trunc(a):
"""trunc of a"""
@constructor
def iround(a, mode="half_away_from_zero"):
"""cast(round(a,mode),'int64')"""
return cast(round(a, mode), 'int64')
@constructor
def round(a, mode="half_away_from_zero"):
"""round_mode(a) with mode in [half_away_from_zero, half_to_even]"""
if mode == "half_away_from_zero":
return round_half_away_from_zero(a)
elif mode == "half_to_even":
return round_half_to_even(a)
else:
raise Exception("round mode %s is not implemented." % mode)
@_scal_elemwise
def round_half_to_even(a):
"""round_half_to_even(a)"""
@_scal_elemwise
def round_half_away_from_zero(a):
"""round_half_away_from_zero(a)"""
@_scal_elemwise
def sqr(a):
"""square of a"""
# alias to sqr, included to maintain similarity with numpy interface
square = sqr
@_scal_elemwise
def sqrt(a):
"""square root of a"""
@_scal_elemwise
def deg2rad(a):
"""convert degree a to radian"""
@_scal_elemwise
def rad2deg(a):
"""convert radian a to degree"""
@_scal_elemwise
def cos(a):
"""cosine of a"""
@_scal_elemwise
def arccos(a):
"""arccosine of a"""
@_scal_elemwise
def sin(a):
"""sine of a"""
@_scal_elemwise
def arcsin(a):
"""arcsine of a"""
@_scal_elemwise
def tan(a):
"""tangent of a"""
@_scal_elemwise
def arctan(a):
"""arctangent of a"""
@_scal_elemwise
def arctan2(a, b):
"""arctangent of a / b"""
@_scal_elemwise
def cosh(a):
"""hyperbolic cosine of a"""
@_scal_elemwise
def arccosh(a):
"""hyperbolic arc cosine of a"""
@_scal_elemwise
def sinh(a):
"""hyperbolic sine of a"""
@_scal_elemwise
def arcsinh(a):
"""hyperbolic arc sine of a"""
@_scal_elemwise
def tanh(a):
"""hyperbolic tangent of a"""
@_scal_elemwise
def arctanh(a):
"""hyperbolic arc tangent of a"""
@_scal_elemwise
def erf(a):
"""error function"""
@_scal_elemwise
def erfc(a):
"""complementary error function"""
@_scal_elemwise
def erfcx(a):
"""scaled complementary error function"""
@_scal_elemwise
def erfinv(a):
"""inverse error function"""
@_scal_elemwise
def erfcinv(a):
"""inverse complementary error function"""
@_scal_elemwise
def gamma(a):
"""gamma function"""
@_scal_elemwise
def gammaln(a):
"""log gamma function"""
@_scal_elemwise
def psi(a):
"""derivative of log gamma function"""
@_scal_elemwise
def chi2sf(x, k):
"""chi squared survival function"""
@_scal_elemwise
def j0(a):
"""Bessel function of the 0'th kind"""
@_scal_elemwise
def j1(a):
"""Bessel function of the 1'th kind"""
@_scal_elemwise
def real(z):
"""Return real component of complex-valued tensor `z`"""
_tensor_py_operators.real = property(real)
@_scal_elemwise
def imag(z):
"""Return imaginary component of complex-valued tensor `z`"""
_tensor_py_operators.imag = property(imag)
@_scal_elemwise
def angle(z):
"""Return polar-coordinate angle of complex-valued tensor `z`"""
@_scal_elemwise # numpy.complex cannot build tensors
def complex(real, imag):
"""Return complex-valued tensor with `real` and `imag` components"""
@_scal_elemwise
def conj(z):
"""Return the complex conjugate of `z`."""
@_scal_elemwise
def complex_from_polar(abs, angle):
"""Return complex-valued tensor from polar coordinate specification."""
##########################
# Misc
##########################
# fill, _fill_inplace = _elemwise(scal.second, 'fill',
# """fill WRITEME (elemwise)""")
@_scal_elemwise
def second(a, b):
"""Create a matrix by filling the shape of a with b"""
fill = second
pprint.assign(fill, printing.FunctionPrinter('fill'))
@constructor
def ones_like(model, dtype=None):
"""equivalent of numpy.ones_like
Parameters
----------
model : tensor
dtype : data-type, optional
Returns
-------
tensor
tensor the shape of model containing ones of the type of dtype.
"""
if dtype is None:
dtype = model.type.dtype
ret = fill(model, constant(1.0, dtype=dtype))
return ret
@constructor
def zeros_like(model, dtype=None):
"""equivalent of numpy.zeros_like
Parameters
----------
model : tensor
dtype : data-type, optional
Returns
-------
tensor
tensor the shape of model containing zeros of the type of dtype.
"""
if dtype is None:
dtype = model.type.dtype
return fill(model, constant(0.0, dtype=dtype))
def zeros(shape, dtype=None):
"""
Create a Tensor filled with zeros, closer to Numpy's syntax than ``alloc``.
"""
if not isinstance(shape, (list, tuple, TensorVariable)):
shape = [shape]
if dtype is None:
dtype = config.floatX
return alloc(numpy.array(0, dtype=dtype), *shape)
def ones(shape, dtype=None):
"""
Create a Tensor filled with ones, closer to Numpy's syntax than ``alloc``.
"""
if not isinstance(shape, (list, tuple, TensorVariable)):
shape = [shape]
if dtype is None:
dtype = config.floatX
return alloc(numpy.array(1, dtype=dtype), *shape)
class Nonzero(gof.Op):
"""
Return the indices of the elements that are non-zero.
Returns a matrix of shape (ndim, number of nonzero elements) such that
element (i,j) is the index in the ith dimension of the jth non-zero
element.
Note this is different than NumPy, which returns a tuple of arrays, one for
each dimension of the input array.
Parameters
----------
a : array_like
Input array.
Returns
-------
matrix
Matrix containing the indices of the non-zero elements of a.
See Also
--------
nonzero_values : Return the non-zero elements of the input array
flatnonzero : Return the indices of the non-zero elements of the
flattened input array.
"""
__props__ = ()
def make_node(self, a):
a = as_tensor_variable(a)
if a.ndim == 0:
raise ValueError('Nonzero only supports non-scalar arrays.')
output = [TensorType(dtype='int64', broadcastable=(False, False))()]
return gof.Apply(self, [a], output)
def perform(self, node, inp, out_):
a = inp[0]
out, = out_
result_tuple = numpy.nonzero(a)
if len(result_tuple[0]) > 0:
result = numpy.vstack(result_tuple)
else:
result = numpy.zeros((len(result_tuple), 0))
out[0] = result.astype('int64')
def grad(self, inp, grads):
return [grad_undefined(self, 0, inp[0])]
_nonzero = Nonzero()
def nonzero(a, return_matrix=False):
"""
Returns one of the following:
If return_matrix is False (default, same as NumPy):
A tuple of vector arrays such that the ith element of the jth array
is the index of the ith non-zero element of the input array in the
jth dimension.
If return_matrix is True (same as Theano Op):
Returns a matrix of shape (ndim, number of nonzero elements) such
that element (i,j) is the index in the ith dimension of the jth
non-zero element.
Parameters
----------
a : array_like
Input array.
return_matrix : bool
If True, returns a symbolic matrix. If False, returns a tuple of
arrays. Defaults to False.
Returns
-------
tuple of vectors or matrix
See Also
--------
nonzero_values : Return the non-zero elements of the input array
flatnonzero : Return the indices of the non-zero elements of the
flattened input array.
"""
matrix_result = _nonzero(a)
if return_matrix:
return matrix_result
else:
if a.ndim > 0:
tuple_result = tuple([matrix_result[i] for i in xrange(a.ndim)])
else:
tuple_result = tuple([matrix_result[0]])
return tuple_result
def flatnonzero(a):
"""
Return a vector of indices that are non-zero in the flattened version of a.
This is equivalent to nonzero(a.flatten(), return_matrix=True)[0]
Parameters
----------
a : tensor
Input tensor
Returns
-------
vector
Output vector, containing the indices of the elements of `a.flatten()`
that are non-zero.
See Also
--------
nonzero : Return the indices of the non-zero elements of the input array.
nonzero_values : Return the non-zero elements of the input array
"""
if a.ndim == 0:
raise ValueError('Nonzero only supports non-scalar arrays.')
return nonzero(a.flatten(), return_matrix=True)[0]
def nonzero_values(a):
"""
Return a vector of non-zero elements contained in the input array.
The following behavior works to extract non-zero elements from an array
in NumPy but is *NOT* supported by Theano:
a[numpy.nonzero(a)]
Instead, the nonzero_values function or method should be used:
tensor.nonzero_values(a)
a.nonzero_values()
This is equivalent to the following:
a.flatten()[tensor.flatnonzero(a)]
Parameters
----------
a : tensor
Input tensor
Returns
-------
vector
Output vector, containing the non-zero elements of a.
See Also
--------
nonzero : Return the indices of the non-zero elements of the input array.
flatnonzero : Return the indices of the non-zero elements of the
flattened input array.
"""
return a.flatten()[flatnonzero(a)]
class Tri(gof.Op):
__props__ = ("dtype",)
def __init__(self, dtype=None):
if dtype is None:
dtype = config.floatX
self.dtype = dtype
def make_node(self, N, M, k):
N = as_tensor_variable(N)
M = as_tensor_variable(M)
k = as_tensor_variable(k)
return gof.Apply(
self,
[N, M, k],
[TensorType(dtype=self.dtype, broadcastable=(False, False))()])
def perform(self, node, inp, out_):
N, M, k = inp
out, = out_
out[0] = numpy.tri(N, M, k, dtype=self.dtype)
def infer_shape(self, node, in_shapes):
out_shape = [node.inputs[0], node.inputs[1]]
return [out_shape]
def grad(self, inp, grads):
return [grad_undefined(self, i, inp[i]) for i in xrange(3)]
def tri(N, M=None, k=0, dtype=None):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
Array of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
"""
if dtype is None:
dtype = config.floatX
if M is None:
M = N
op = Tri(dtype)
return op(N, M, k)
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
array, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : Same thing, only for the upper triangle.
"""
return m * tri(m.shape[0], m.shape[1], k=k, dtype=m.dtype)
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : Lower triangle of an array.
"""
return m * (1 - tri(m.shape[0], m.shape[1], k=k - 1, dtype=m.dtype))
class Eye(gof.Op):
__props__ = ("dtype", )
def __init__(self, dtype=None):
if dtype is None:
dtype = config.floatX
self.dtype = dtype
def make_node(self, n, m, k):
n = as_tensor_variable(n)
m = as_tensor_variable(m)
k = as_tensor_variable(k)
assert n.ndim == 0
assert m.ndim == 0
assert k.ndim == 0
return gof.Apply(
self,
[n, m, k],
[TensorType(dtype=self.dtype, broadcastable=(False, False))()])
def perform(self, node, inp, out_):
n, m, k = inp
out, = out_
out[0] = numpy.eye(n, m, k, dtype=self.dtype)
def infer_shape(self, node, in_shapes):
out_shape = [node.inputs[0], node.inputs[1]]
return [out_shape]
def grad(self, inp, grads):
return [grad_undefined(self, i, inp[i]) for i in xrange(3)]
def eye(n, m=None, k=0, dtype=None):
"""Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
n : int
Number of rows in the output.
m : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
"""
if dtype is None:
dtype = config.floatX
if m is None:
m = n
localop = Eye(dtype)
return localop(n, m, k)
def identity_like(x):
return eye(x.shape[0], x.shape[1], k=0, dtype=x.dtype)
class Alloc(gof.Op):
"""Create a Tensor from an initial value and a desired shape.
alloc(value, shape0, shape1, ..., shapeN)
Returns an N-dimensional tensor initialized by `value` using something
equivalent to
z = numpy.zeros(shape, value.dtype)
z += value
The result has N dimensions, has the dtype of `value` and is obtained by
broadcasting value over the output ndarray.
This Op is used to replace fill() during optimizations because after shapes
are lifted, the first argument to fill can often be pruned from the graph.
"""
__props__ = ()
def validate_shape(self, shape):
sh = [as_tensor_variable(s) for s in shape]
bcast = []
for i, s in enumerate(sh):
if config.exception_verbosity == 'high':
s_as_str = '\n' + min_informative_str(s)
else:
s_as_str = str(s)
if s.type.dtype[:3] not in ('int', 'uin'):
raise TypeError('Shape arguments to Alloc must be integers, '
'but argument %s is not for apply node: %s' %
(i, s_as_str))
if s.ndim != 0:
raise TypeError(
"Each shape dimension to Alloc must be a scalar, ",
'but dimension %s have %d dimensions for apply node: %s' %
(i, s.ndim, s_as_str))
# if s is constant 1, then we're broadcastable in that dim
try:
const_shp = get_scalar_constant_value(s)
except NotScalarConstantError:
const_shp = None
bcast.append(1 == const_shp)
return sh, bcast
def make_node(self, value, *shape):
v = as_tensor_variable(value)
sh, bcast = self.validate_shape(shape)
if v.ndim > len(sh):
raise TypeError("The Alloc value to use has more dimensions"
" than the specified dimensions",
v.ndim, len(sh))
otype = TensorType(dtype=v.dtype, broadcastable=bcast)
return gof.Apply(self, [v] + sh, [otype()])
def perform(self, node, inputs, out_):
out, = out_
v = inputs[0]
sh = tuple([int(i) for i in inputs[1:]])
if out[0] is None or out[0].shape != sh:
if v.size == 1 and v.item() == 0:
out[0] = numpy.zeros(sh, dtype=v.dtype)
else:
out[0] = numpy.empty(sh, dtype=v.dtype)
out[0][...] = v # broadcast v to fill us up
else:
# reuse the allocated memory.
out[0][...] = v # broadcast v to fill us up
def c_code(self, node, name, inp, out, sub):
vv = inp[0]
ndim = len(inp[1:])
zz, = out
fail = sub['fail']
code = """
npy_intp shape[%(ndim)s];
""" % dict(ndim=ndim)
# Initialize shape
for i, shp_i in enumerate(inp[1:]):
code += """
shape[%(i)s] = ((dtype_%(shp_i)s*) PyArray_DATA(%(shp_i)s))[0];
""" % dict(i=i, shp_i=shp_i)
code += """
int need_new_out = (NULL == %(zz)s);
for (int i = 0; i < %(ndim)s; i++)
need_new_out = (need_new_out
|| (PyArray_DIMS(%(zz)s)[i] != shape[i]));
if (need_new_out)
{
Py_XDECREF(%(zz)s);
%(zz)s = (PyArrayObject*) PyArray_SimpleNew(%(ndim)s,
shape, PyArray_TYPE((PyArrayObject*) py_%(vv)s));
if (!%(zz)s)
{
PyErr_SetString(PyExc_MemoryError, "alloc failed");
%(fail)s
}
}
// This function takes care of broadcasting
PyArray_CopyInto(%(zz)s, %(vv)s);
""" % dict(vv=vv, ndim=ndim, zz=zz, fail=fail)
return code
def c_code_cache_version(self):
return (1,)
def infer_shape(self, node, input_shapes):
return [node.inputs[1:]]
def connection_pattern(self, node):
rval = [[True]]
for ipt in node.inputs[1:]:
rval.append([False])
return rval
def grad(self, inputs, grads):
x = inputs[0]
gz = grads[0]
n_axes_to_sum = gz.ndim - x.ndim
# The number of dimensions added
axis = list(range(n_axes_to_sum))
# The broadcasted dimensions
axis_broadcasted = []
axis_kept = []
for i, (ib, gb) in enumerate(
zip(inputs[0].broadcastable,
# We need the dimensions corresponding to x
grads[0].broadcastable[-inputs[0].ndim:])):
if ib and not gb:
axis_broadcasted.append(i + n_axes_to_sum)
else:
axis_kept.append(i)
gx = gz.sum(axis=axis + axis_broadcasted)
if axis_broadcasted:
new_order = ['x'] * x.ndim
for idx, axis in enumerate(axis_kept):
new_order[axis] = idx
gx = gx.dimshuffle(new_order)
# Dimshuffle to add back the broadcasted dims
# The *elements* of the output are not connected to
# the inputs that specify the shape. If you grow the
# shape by epsilon, the existing elements do not
# change.
return [gx] + [DisconnectedType()() for i in inputs[1:]]
def __call__(self, val, *shapes, **kwargs):
"""
If the alloc would be useless, this function returns val.
If this function is called outside of a graph optimization context
(for instance, it is manually called by a user building a graph),
then we always return an Alloc node, to allow for DebugMode to check
for size mismatches.
If you always want an Alloc node, call make_node.
"""
ret = super(Alloc, self).__call__(val, *shapes, **kwargs)
try:
# It makes optimization difficult when useless allocs are thrown
# into the graph at every stage of optimization. This little logic
# tries to help at least in some cases.
if hasattr(val, 'fgraph') and (val.type == ret.type):
return val
except AttributeError:
pass
return ret
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return [None]
return self(eval_points[0], *inputs[1:], **dict(return_list=True))
def do_constant_folding(self, node):
if not getattr(node.outputs[0], 'clients', []):
# If there are no clients then there is no point doing constant
# folding.
return False
for client in node.outputs[0].clients:
if client[0] == 'output':
# If the output is a constant, it will have to be deepcopied
# each time the function is called. So we do not fold.
return False
elif (
# The following ops work inplace of their input id 0.
client[1] == 0 and
isinstance(client[0].op, (
# Ops that will work inplace on the Alloc. So if they
# get constant_folded, they would copy the
# constant and this is less efficients.
# Not doing the constant folding could also lower
# the peak memory usage, as we the "constant" won't
# always exists.
theano.tensor.subtensor.IncSubtensor,
theano.tensor.subtensor.AdvancedIncSubtensor1,
theano.tensor.subtensor.AdvancedIncSubtensor,
theano.tensor.blas.Gemv,
theano.tensor.blas_c.CGemv,
theano.tensor.blas.Ger,
theano.tensor.blas_c.CGer,
theano.tensor.blas_scipy.ScipyGer))):
return False
# If the clients is a transfer to the GPU, we don't want to
# fold. We let the Alloc being moved to the GPU, then we
# let the GPU algo decide if it need to fold it or not.
elif client[0].op.__class__.__name__.lower().startswith("gpu"):
return False
return True
alloc = Alloc()
pprint.assign(alloc, printing.FunctionPrinter('alloc'))
def transfer(var, target):
"""
Return a version of `var` transferred to `target`.
`cpu` mean a TensorType (on the CPU). Other types may define
additional targets.
Parameters
----------
var : variable
A theano variable
target : str
The target of the transfer
"""
if target == 'cpu':
return as_tensor_variable(var)
else:
for trans in transfer._others:
res = trans(var, target)
if res is not None:
return res
raise ValueError("Can't transfer to target %s" % (target,))
transfer._others = []
def register_transfer(fn):
"""
Register a transfer function for alternative targets.
Parameters
----------
fn : callable
"""
transfer._others.append(fn)
"""Create a duplicate of `a` (with duplicated storage)"""
tensor_copy = elemwise.Elemwise(scal.identity)
pprint.assign(tensor_copy, printing.IgnorePrinter())
@constructor
def sum(input, axis=None, dtype=None, keepdims=False, acc_dtype=None):
"""
Computes the sum along the given axis(es) of a tensor `input`.
When axis is None (the default value), the sum is performed
over the flattened tensor.
For full documentation see ``tensor.elemwise.Sum``.
In particular please pay attention to the important warning when using
a custom acc_dtype.
Parameters
----------
keepdims: bool
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
out = elemwise.Sum(axis=axis, dtype=dtype, acc_dtype=acc_dtype)(input)
if keepdims:
out = makeKeepDims(input, out, axis)
return out
pprint.assign(Sum(), printing.FunctionPrinter('sum'))
@constructor
def prod(input, axis=None, dtype=None, keepdims=False, acc_dtype=None,
no_zeros_in_input=False):
"""
Computes the product along the given axis(es) of a tensor `input`.
When axis is None (the default value), the product is performed
over the flattened tensor.
For full documentation see ``tensor.elemwise.Prod``.
Parameters
----------
keepdims: bool
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
out = elemwise.Prod(axis, dtype=dtype, acc_dtype=acc_dtype,
no_zeros_in_input=no_zeros_in_input)(input)
if keepdims:
out = makeKeepDims(input, out, axis)
return out
class Mean(elemwise.CAReduce):
def __init__(self, axis=None):
elemwise.CAReduce.__init__(self, scal.add, axis)
assert self.axis is None or len(self.axis) == 1
def __str__(self):
if self.axis is not None:
return "Mean{%s}" % (", ".join(str(x) for x in self.axis))
else:
return "Mean"
def _output_dtype(self, idtype):
# we want to protect against overflow
return 'float64'
def perform(self, node, inp, out):
input, = inp
output, = out
if self.axis is None:
axis = None
else:
axis = self.axis[0]
# numpy.asarray is needed as otherwise we can end up with a
# numpy scalar.
output[0] = numpy.asarray(numpy.mean(input, dtype='float64',
axis=axis))
def c_code(self, node, name, inames, onames, sub):
if self.axis is not None:
return super(Op, self).c_code(node, name, inames, onames, sub)
ret = elemwise.CAReduce.c_code(self, node, name, inames, onames, sub)
# TODO: c_code perform support only axis is None
return ret + """
*((double *)PyArray_DATA(%s)) /= PyArray_SIZE(%s);
""" % (onames[0], inames[0])
# TODO: implement the grad. When done and tested, you can make this the default
# version.
# def grad(self, (x,), (gout,)):
# import pdb;pdb.set_trace()
# return grad(mean(x, self.axis, op=False),[x])
@constructor
def mean(input, axis=None, dtype=None, op=False, keepdims=False,
acc_dtype=None):
"""
Computes the mean value along the given axis(es) of a tensor `input`.
Parameters
----------
axis : None or int or (list of int) (see `Sum`)
Compute the mean along this axis of the tensor.
None means all axes (like numpy).
dtype: None or string
Dtype to cast the result of the inner summation into.
For instance, by default, a sum of a float32 tensor will be
done in float64 (acc_dtype would be float64 by default),
but that result will be casted back in float32.
keepdims: bool
If this is set to True, the axes which are reduced are
left in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original tensor.
acc_dtype: None or string
Dtype to use for the inner summation. This will not
necessarily be the dtype of the output (in particular
if it is a discrete (int/uint) dtype, the output will
be in a float type). If None, then we use the same rules as `sum()`.
Notes
-----
For gpu, if you specify dtype=float32, everything will be done on the gpu.
"""
input = as_tensor_variable(input)
if op:
if dtype not in (None, 'float64'):
raise NotImplementedError(
'The Mean op does not support the dtype argument, '
'and will always use float64. If you want to specify '
'the dtype, call tensor.mean(..., op=False).',
dtype)
if acc_dtype not in (None, 'float64'):
raise NotImplementedError(
'The Mean op does not support the acc_dtype argument, '
'and will always use float64. If you want to specify '
'acc_dtype, call tensor.mean(..., op=False).',
dtype)
out = Mean(axis)(input)
if keepdims:
out = makeKeepDims(input, out, axis)
return out
if dtype is not None:
# The summation will be done with the specified dtype.
# sum() will complain if it is not suitable.
sum_dtype = dtype
else:
sum_dtype = None
# float16 overflows way too fast for sum
if ((sum_dtype == 'float16' or input.dtype == 'float16') and
acc_dtype != 'float16'):
sum_dtype == 'float32'
s = sum(input, axis=axis, dtype=sum_dtype, keepdims=keepdims,
acc_dtype=acc_dtype)
shp = shape(input)
# Cast shp into a float type
# TODO Once we have a consistent casting policy, we could simply
# use true_div.
if s.dtype in ('float16', 'float32', 'complex64'):
shp = cast(shp, 'float32')
else:
shp = cast(shp, 'float64')
if axis is None:
axis = list(range(input.ndim))
elif isinstance(axis, (integer_types, numpy.integer)):
axis = [axis]
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0:
axis = [int(axis)]
else:
axis = [int(a) for a in axis]
# This sequential division will possibly be optimized by Theano:
for i in axis:
s = true_div(s, shp[i])
if dtype == 'float16' or (dtype is None and input.dtype == 'float16'):
s = cast(s, 'float16')
s.name = 'mean'
return s
@constructor
def var(input, axis=None, keepdims=False):
"""
Computes the variance along the given axis(es) of a tensor `input`.
Parameters
----------
axis: None or int or (list of int) (see `Sum`)
Compute the variance along this axis of the tensor.
None means all axes (like numpy).
keepdims : bool
If this is set to True, the axes which are reduced are
left in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original tensor.
Notes
-----
It uses the two-pass algorithm for more stable results.
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
There exist other implementations that are even more stable, but probably
slower.
"""
input_ndim = input.type.ndim
if axis is None:
axis = list(range(input_ndim))
elif isinstance(axis, (integer_types, numpy.integer)):
axis = [axis]
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0:
axis = [int(axis)]
else:
axis = [int(a) for a in axis]
# compute the axis-wise mean
mean_input = mean(input, axis, keepdims=True)
# center the input
centered_input = input - mean_input
# return the mean sqr
v = mean((centered_input ** 2), axis, keepdims=keepdims)
v.name = 'var'
return v
@constructor
def std(input, axis=None, keepdims=False):
"""
Computes the standard deviation along the given axis(es) of a tensor `input`.
Parameters
----------
axis : None or int or (list of int) (see `Sum`)
Compute the standard deviation along this axis of the tensor.
None means all axes (like numpy).
keepdims : bool
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result will
broadcast correctly against the original tensor.
Notes
-----
It calls `var()` and `var()` uses the two-pass algorithm for more stable
results.
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
There exist other implementations that are even more stable, but probably
slower.
"""
ret = sqrt(var(input=input, axis=axis, keepdims=keepdims))
ret.name = 'std'
return ret
class Default(gof.Op):
"""
Takes an input x and a default value.
If the input is not None, a reference to it is returned.
If the input is None, a copy of the default value is returned instead.
The input and the default must have exactly the same type.
"""
view_map = {0: [0]}
__props__ = ()
def make_node(self, x, default):
x, default = as_tensor_variable(x), as_tensor_variable(default)
if x.type != default.type:
raise TypeError('Both default() arguments must have same type',
x, default)
return gof.Apply(self, [x, default], [default.type()])
def perform(self, node, inp, out_):
x, default = inp
out, = out_
if x is None:
# why copy? Theano can't yet understand out[0] being a view of
# either x or y, so we can be a view of x, but only a copy of y.
out[0] = default.copy()
else:
out[0] = x
default = Default()
setdefault = default # legacy
##########################
# Arithmetics
##########################
@_scal_elemwise
def maximum(x, y):
"""elemwise maximum. See max for the maximum in one tensor"""
# see decorator for function body
@_scal_elemwise
def minimum(x, y):
"""elemwise minimum. See min for the minimum in one tensor"""
# see decorator for function body
def div_proxy(x, y):
"""Proxy for either true_div or int_div, depending on types of x, y."""
f = scal.int_or_true_div(
as_tensor_variable(x).dtype in discrete_dtypes,
as_tensor_variable(y).dtype in discrete_dtypes)
if f is scal.int_div:
return int_div(x, y)
else:
return true_div(x, y)
def divmod(x, y):
"""elementvise divmod, using floor_div and mod_check"""
return floor_div(x, y), mod_check(x, y)
@_scal_elemwise
def add(a, *other_terms):
"""elementwise addition"""
# see decorator for function body
@_scal_elemwise
def sub(a, b):
"""elementwise subtraction"""
# see decorator for function body
@_scal_elemwise
def mul(a, *other_terms):
"""elementwise multiplication"""
# see decorator for function body
@_scal_elemwise
def true_div(a, b):
"""elementwise [true] division (inverse of multiplication)"""
# see decorator for function body
@_scal_elemwise
def int_div(a, b):
"""elementwise [floor] division (inverse of multiplication)"""
# see decorator for function body
# floor_div and int_div are the same thing
floor_div = int_div
def ceil_intdiv(a, b):
"""
Safely compute ceil(float_division(a, b)).
Works for all dtypes, but mostly useful when a and b are int.
"""
# If a and b are int with not many significant bits, we could
# cast them to float to avoid doing the modulo. We do not know if this
# is faster or not. But this is not safe for int64 as the cast will
# lose precision.
# e.g.: cast(cast(a, scalar.upcast(a, 'float32')) / b, scal.upcast(a, b))
# We cast for the case when a and b are uint*. Otherwise neq will
# force their upcast to int.
div = int_div(a, b)
ret = cast(neq(a % b, 0), div.dtype) + div
assert ret.dtype == scal.upcast(div.owner.inputs[0], div.owner.inputs[1])
return ret
def mod_check(x, y):
"""Make sure we do not try to use complex numbers."""
if ((as_tensor_variable(x).dtype in complex_dtypes or
as_tensor_variable(y).dtype in complex_dtypes)):
# Currently forbidden.
raise scal.Mod.complex_error
else:
return mod(x, y)
@_scal_elemwise
def mod(a, b):
"""elementwise modulo"""
# see decorator for function body
@_scal_elemwise
def pow(a, b):
"""elementwise power"""
# see decorator for function body
@_scal_elemwise
def clip(x, min, max):
"""
Clip x to be between min and max.
Notes
-----
When `x` is equal to the boundaries, the output is considered
to be `x`, so at these points, the gradient of the cost wrt the output
will be propagated to `x`, not to `min` nor `max`. In other words,
on these points, the gradient wrt `x` will be equal to the gradient wrt
the output, and the gradient wrt `min` and `max` will be zero.
"""
# see decorator for function body
# for grep: clamp, bound
pprint.assign(add, printing.OperatorPrinter('+', -2, 'either'))
pprint.assign(mul, printing.OperatorPrinter('*', -1, 'either'))
pprint.assign(sub, printing.OperatorPrinter('-', -2, 'left'))
pprint.assign(neg, printing.OperatorPrinter('-', 0, 'either'))
pprint.assign(true_div, printing.OperatorPrinter('/', -1, 'left'))
pprint.assign(int_div, printing.OperatorPrinter('//', -1, 'left'))
pprint.assign(pow, printing.OperatorPrinter('**', 1, 'right'))
##########################
# View Operations
##########################
def extract_constant(x, elemwise=True, only_process_constants=False):
"""
This function is basically a call to tensor.get_scalar_constant_value.
The main difference is the behaviour in case of failure. While
get_scalar_constant_value raises an TypeError, this function returns x,
as a tensor if possible. If x is a ScalarVariable from a
scalar_from_tensor, we remove the conversion. If x is just a
ScalarVariable, we convert it to a tensor with tensor_from_scalar.
"""
try:
x = get_scalar_constant_value(x,
elemwise,
only_process_constants)
except NotScalarConstantError:
pass
if ((isinstance(x, scal.ScalarVariable) or
isinstance(x, scal.sharedvar.ScalarSharedVariable))):
if x.owner and isinstance(x.owner.op, ScalarFromTensor):
x = x.owner.inputs[0]
else:
x = tensor_from_scalar(x)
return x
def transpose(x, axes=None):
"""
Reorder the dimensions of x. (Default: reverse them)
This is a macro around dimshuffle that matches the numpy.transpose function.
"""
if axes is None:
axes = list(range((x.ndim - 1), -1, -1))
ret = DimShuffle(x.broadcastable, axes, inplace=False)(x)
if x.name and axes == list(range((x.ndim - 1), -1, -1)):
ret.name = x.name + '.T'
return ret
def batched_dot(a, b):
"""
Compute the batched dot product of two variables:
batched_dot(a, b)[i] = dot(a[i], b[i])
Note that this batched_dot function does one of three things, in the
following sequence:
1. If either a or b is a vector, it returns the batched elementwise
product without calling the Theano BatchedDot op.
2. If both a and b have either 2 or 3 dimensions, it calls Theano's
BatchedDot op on a and b.
3. If either a or b has more than 3 dimensions, it calls Theano's
batched_tensordot function with appropriate axes. The
batched_tensordot function expresses high-dimensional batched
dot products in terms of batched matrix-matrix dot products, so
it may be possible to futherize optimize for performance.
"""
a, b = as_tensor_variable(a), as_tensor_variable(b)
if a.ndim == 0:
raise TypeError("a must have at least one (batch) axis")
elif b.ndim == 0:
raise TypeError("b must have at least one (batch) axis")
elif a.ndim == 1:
return a.dimshuffle(*([0] + ["x"] * (b.ndim - 1))) * b
elif b.ndim == 1:
return a * b.dimshuffle(*([0] + ["x"] * (a.ndim - 1)))
elif a.ndim > 3 or b.ndim > 3:
return batched_tensordot(
a, b, [[a.ndim - 1], [numpy.maximum(1, b.ndim - 2)]])
else:
# avoid circular import
return theano.tensor.blas.BatchedDot()(a, b)
def batched_tensordot(x, y, axes=2):
"""
Compute a batched tensordot product.
A hybrid of batched_dot and tensordot, this function computes the
tensordot product between the two tensors, by iterating over the
first dimension to perform a sequence of tensordots.
Parameters
----------
x : tensor
A Tensor with sizes e.g.: for 3D (dim1, dim3, dim2)
y : tensor
A Tensor with sizes e.g.: for 3D (dim1, dim2, dim4)
axes: int or array-like of length 2
If an integer, the number of axes to sum over.
If an array, it must have two array elements containing the axes to sum
over in each tensor.
If an integer i, it is converted to an array containing
the last i dimensions of the first tensor and the first
i dimensions of the second tensor (excluding the first
(batch) dimension):
axes = [list(range(a.ndim - i, b.ndim)), list(range(1,i+1))]
If an array, its two elements must contain compatible axes
of the two tensors. For example, [[1, 2], [2, 4]] means sum
over the 2nd and 3rd axes of a and the 3rd and 5th axes of b.
(Remember axes are zero-indexed!) The 2nd axis of a and the
3rd axis of b must have the same shape; the same is true for
the 3rd axis of a and the 5th axis of b.
Like tensordot, this function uses a series of dimshuffles and
reshapes to reduce the tensor dot product to a matrix or vector
dot product. Finally, it calls batched_dot to compute the result.
"""
return _tensordot_as_dot(x, y, axes, dot=batched_dot, batched=True)
def split(x, splits_size, n_splits, axis=0):
the_split = Split(n_splits)
return the_split(x, axis, splits_size)
class Split(Op):
"""Partition a `TensorVariable` along some axis.
Examples
--------
>>> x = vector()
>>> splits = lvector()
You have to declare right away how many split_points there will be.
>>> ra, rb, rc = split(x, splits, n_splits = 3, axis = 0)
>>> f = function([x, splits], [ra, rb, rc])
>>> a, b, c = f([0,1,2,3,4,5], [3, 2, 1])
a == [0,1,2]
b == [3, 4]
c == [5]
"""
len_splits = None
"""A Split instance will have this many outputs, and require that
the splits argument to `perform` have exactly this many elements.
"""
__props__ = ("len_splits",)
def __init__(self, len_splits):
self.len_splits = int(len_splits)
def __str__(self):
return self.__class__.__name__ + "{%s}" % self.len_splits
def make_node(self, x, axis, splits):
"""WRITEME"""
x = as_tensor_variable(x)
axis = as_tensor_variable(axis)
splits = as_tensor_variable(splits)
if splits.type not in int_vector_types:
raise TypeError('splits must have type tensor.lvector',
splits.type)
if axis.type not in int_types:
raise TypeError('axis must have type lscalar', axis.type)
# # The following lines are necessary if we allow splits of zero
# if isinstance(axis, gof.Constant):
# x = unbroadcast(x, int(axis.data))
# else:
# x = unbroadcast(x, *range(x.type.ndim))
inputs = [x, axis, splits]
outputs = [x.type() for i in xrange(self.len_splits)]
return Apply(self, inputs, outputs)
def perform(self, node, inputs, outputs):
"""WRITEME"""
x, axis, splits = inputs
# in python 2.4, x.shape[numpy.asarray(1)] don't work.
if sys.version_info[0:2] == (2, 4) and axis.size == 1:
axis = int(axis)
try:
len_along_axis = x.shape[axis]
except:
raise ValueError('Split.perform() with axis=(%s) is invalid'
' for x.shape==(%s)'
% (axis, x.shape))
if len(splits) != self.len_splits:
raise ValueError('In Split.perform(), len(splits) != len_splits.',
(len(splits), self.len_splits))
if numpy.sum(splits) != len_along_axis:
raise ValueError('The splits sum to %s, expected %s' %
(numpy.sum(splits), len_along_axis))
if python_any([nb < 0 for nb in splits]):
raise ValueError('Split: you tried to make an ndarray with a '
'negative number of elements.')
# Checking is done, let's roll the splitting algorithm!
# Basically we step along the given axis of x, extracting
# subtensors of size splits[i] as we go along.
general_key = [slice(None, None, None) for s in x.shape]
lower_idx = 0
for i in xrange(self.len_splits):
upper_idx = lower_idx + splits[i]
general_key[axis] = slice(lower_idx, upper_idx, None)
outputs[i][0] = x.__getitem__(tuple(general_key)).copy()
lower_idx = upper_idx
def infer_shape(self, node, in_shapes):
axis = node.inputs[1]
splits = node.inputs[2]
shp_x, shp_axis, shp_splits = in_shapes
out_shapes = []
for i in xrange(self.len_splits):
temp = as_tensor_variable(shp_x)
temp = theano.tensor.subtensor.set_subtensor(temp[axis], splits[i])
temp = [temp[i] for i in xrange(len(shp_x))]
out_shapes.append(temp)
return out_shapes
def grad(self, inputs, g_outputs):
"""Join the gradients along the axis that was used to split x."""
x, axis, n = inputs
outputs = self(*inputs, **dict(return_list=True))
# If all the output gradients are disconnected, then so are the inputs
if python_all([isinstance(g.type, DisconnectedType)
for g in g_outputs]):
return [DisconnectedType()(),
grad_undefined(self, 1, axis),
grad_undefined(self, 2, n)]
# Else, we have to make them zeros before joining them
new_g_outputs = []
for o, g in zip(outputs, g_outputs):
if isinstance(g.type, DisconnectedType):
new_g_outputs.append(o.zeros_like())
else:
new_g_outputs.append(g)
return [join(axis, *new_g_outputs),
grad_undefined(self, 1, axis),
grad_undefined(self, 2, n)]
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return [None for i in self.len_splits]
return self.make_node(eval_points[0], *inputs[1:]).outputs
def addbroadcast(x, *axes):
"""
Make the input broadcastable in the specified axes.
For example, addbroadcast(x, 0) will make the first dimension of
x broadcastable. When performing the function, if the length of
x along that dimension is not 1, a ValueError will be raised.
We apply the opt here not to pollute the graph especially during
the gpu optimization
Parameters
----------
x : tensor_like
Input theano tensor.
axis : an int or an iterable object such as list or tuple of int values
The dimension along which the tensor x should be broadcastable.
If the length of x along these dimensions is not 1, a ValueError will
be raised.
Returns
-------
tensor
A theano tensor, which is broadcastable along the specified dimensions.
"""
rval = Rebroadcast(*[(axis, True) for axis in axes])(x)
return theano.tensor.opt.apply_rebroadcast_opt(rval)
def unbroadcast(x, *axes):
"""
Make the input impossible to broadcast in the specified axes.
For example, addbroadcast(x, 0) will make the first dimension
of x broadcastable. When performing the function, if the length
of x along that dimension is not 1, a ValueError will be raised.
We apply the opt here not to pollute the graph especially during
the gpu optimization
Parameters
----------
x : tensor_like
Input theano tensor.
axis : an int or an iterable object such as list or tuple of int values
The dimension along which the tensor x should be unbroadcastable.
If the length of x along these dimensions is not 1, a ValueError will
be raised.
Returns
-------
tensor
A theano tensor, which is unbroadcastable along the specified dimensions.
"""
rval = Rebroadcast(*[(axis, False) for axis in axes])(x)
return theano.tensor.opt.apply_rebroadcast_opt(rval)
def patternbroadcast(x, broadcastable):
"""
Make the input adopt a specific broadcasting pattern.
Broadcastable must be iterable. For example,
patternbroadcast(x, (True, False)) will make the first
dimension of x broadcastable and the second dimension
not broadcastable, so x will now be a row.
We apply the opt here not to pollute the graph especially during the gpu
optimization.
Parameters
----------
x : tensor_like
Input theano tensor.
broadcastable : an iterable object such as list or tuple of bool values
A set of boolean values indicating whether a dimension should be
broadcastable or not. If the length of x along these dimensions is
not 1, a ValueError will be raised.
Returns
-------
tensor
A theano tensor, which is unbroadcastable along the specified dimensions.
"""
rval = Rebroadcast(*[(i, broadcastable[i])
for i in xrange(len(broadcastable))])(x)
return theano.tensor.opt.apply_rebroadcast_opt(rval)
class Join(Op):
"""
Concatenate multiple `TensorVariable`s along some axis.
The axis must be given as first argument. All tensors must have the same
shape along all dimensions other than this axis.
Of course, TensorVariable instances do not have a shape, so this error
cannot be caught until runtime. See `perform()`.
See Also
--------
stack : For joins involving scalar values
Examples
--------
>>> x, y, z = tensor.matrix(), tensor.matrix(), tensor.matrix()
>>> u = tensor.vector()
>>> r = join(0, x, y, z)
>>> c = join(1, x, y, z)
>>> join(2, x, y, z) # WRONG: the axis has to be an index into the shape
>>> join(0, x, u) # WRONG: joined tensors must have the same rank
"""
check_input = False
__props__ = ()
def make_node(self, *axis_and_tensors):
"""
Parameters
----------
axis: an Int or integer-valued Variable
tensors
A variable number (but not zero) of tensors to
concatenate along the specified axis. These tensors must have
the same shape along all dimensions other than this axis.
Returns
-------
A symbolic Variable
It has the same ndim as the input tensors, and the most inclusive
dtype.
"""
axis, tensors = axis_and_tensors[0], axis_and_tensors[1:]
if not tensors:
raise ValueError('Cannot join an empty list of tensors')
as_tensor_variable_args = [as_tensor_variable(x) for x in tensors]
dtypes = [x.type.dtype for x in as_tensor_variable_args]
out_dtype = scal.upcast(*dtypes)
def output_maker(bcastable):
return tensor(dtype=out_dtype, broadcastable=bcastable)
return self._make_node_internal(
axis, tensors, as_tensor_variable_args, output_maker)
def _make_node_internal(self, axis, tensors,
as_tensor_variable_args, output_maker):
if not python_all(targs.type.ndim for targs
in as_tensor_variable_args):
raise TypeError('Join cannot handle arguments of dimension 0.'
' For joining scalar values, see @stack')
# Handle single-tensor joins immediately.
if len(as_tensor_variable_args) == 1:
bcastable = list(as_tensor_variable_args[0].type.broadcastable)
else:
# When the axis is fixed, a dimension should be
# broadcastable if at least one of the inputs is
# broadcastable on that dimension (see justification below),
# except for the axis dimension.
# Initialize bcastable all false, and then fill in some trues with
# the loops.
bcastable = [False] * len(
as_tensor_variable_args[0].type.broadcastable)
ndim = len(bcastable)
# Axis can also be a constant
if not isinstance(axis, integer_types):
try:
# Note : `get_scalar_constant_value` returns a ndarray not
# an int
axis = int(get_scalar_constant_value(axis))
except NotScalarConstantError:
pass
if isinstance(axis, integer_types):
# Basically, broadcastable -> length 1, but the
# converse does not hold. So we permit e.g. T/F/T
# joins, and if they fail at runtime they fail, but if
# they don't then it means that the argument where
# that broadcastable flag was False had length 1 along
# this dimension, and therefore this dimension should
# be broadcastable for the output.
if axis < -ndim:
raise IndexError("Join axis %d out of bounds [0, %d)" %
(axis, ndim))
if axis < 0:
axis += ndim
for x in as_tensor_variable_args:
for current_axis, bflag in enumerate(x.type.broadcastable):
# Constant negative axis can no longer be negative at
# this point. It safe to compare this way.
if current_axis == axis:
continue
if bflag:
bcastable[current_axis] = True
try:
bcastable[axis] = False
except IndexError:
raise ValueError('Join argument "axis" is out of range'
' (given input dimensions)')
else:
# When the axis may vary, no dimension can be guaranteed to be
# broadcastable.
bcastable = [False] * len(
as_tensor_variable_args[0].type.broadcastable)
if not python_all([x.ndim == len(bcastable)
for x in as_tensor_variable_args[1:]]):
raise TypeError("Join() can only join tensors with the same "
"number of dimensions.")
inputs = [as_tensor_variable(axis)] + list(as_tensor_variable_args)
if inputs[0].type not in int_types:
raise TypeError('Axis could not be cast to an integer type',
axis, inputs[0].type, int_types)
outputs = [output_maker(bcastable)]
node = Apply(self, inputs, outputs)
return node
def perform(self, node, axis_and_tensors, out_):
out, = out_
axis, tensors = axis_and_tensors[0], axis_and_tensors[1:]
ndim = tensors[0].ndim
if axis < -ndim:
raise IndexError("Join axis %d out of bounds [0, %d)" %
(axis, ndim))
out[0] = theano._asarray(numpy.concatenate(tensors, axis=axis),
dtype=node.outputs[0].type.dtype)
def c_code_cache_version(self):
return (3,)
def c_code(self, node, name, inputs, outputs, sub):
axis, tensors = inputs[0], inputs[1:]
input_1 = tensors[0]
l = len(tensors)
out, = outputs
fail = sub['fail']
adtype = node.inputs[0].type.dtype_specs()[1]
code = """
PyObject* list = PyList_New(%(l)s);
""" % locals()
for i, inp in enumerate(tensors):
code += """
Py_INCREF(%(inp)s);
PyList_SetItem(list, %(i)s, (PyObject*)%(inp)s);
""" % locals()
code += """
//PyObject* PyArray_Concatenate(PyObject* obj, int axis)
int axis = ((%(adtype)s *)PyArray_DATA(%(axis)s))[0];
int ndim = PyArray_NDIM(%(input_1)s);
if( axis < -ndim ){
PyErr_Format(PyExc_IndexError,
"Join axis %%d out of bounds [0, %%d)", axis, ndim);
%(fail)s
}
Py_XDECREF(%(out)s);
%(out)s = (PyArrayObject *)PyArray_Concatenate(list, axis);
Py_DECREF(list);
if(!%(out)s){
%(fail)s
}
""" % locals()
return code
def R_op(self, inputs, eval_points):
if None in eval_points[1:]:
return [None]
return self.make_node(inputs[0], *eval_points[1:]).outputs
def grad(self, axis_and_tensors, grads):
""" The gradient wrt a join op is a `Split`, used to partition
the gradient along the `axis` which was used for joining.
"""
gz, = grads
axis, tensors = axis_and_tensors[0], axis_and_tensors[1:]
rval = [grad_undefined(self, 0, axis)]
dtypes = [as_tensor_variable(x).type.dtype for x in tensors]
out_dtype = scal.upcast(*dtypes)
if 'float' in out_dtype or 'complex' in out_dtype:
# assume that this is differentiable
split = Split(len(tensors))
split_gz = split(gz, axis, stack([shape(x)[axis]
for x in tensors]))
# If there is only one split, it might not be in a list.
if not isinstance(split_gz, list):
split_gz = [split_gz]
# Split.make_node isn't always able to infer the right
# broadcast. As the grad need to keep the information,
# read it if needed.
split_gz = [patternbroadcast(g, t.broadcastable)
for t, g in zip(tensors, split_gz)]
rval = rval + split_gz
else:
# the output has integer type, so the gradient through it
# is 0
rval = rval + [tensor.zeros_like(dtype=config.floatX)
for tensor in tensors]
return rval
def infer_shape(self, node, ishapes):
# ishapes[0] contains the size of the axis on which we join
# Join op should get at least one input to join
assert len(ishapes) > 1
n_dim = len(ishapes[1])
for shp in ishapes[1:]:
assert shp is not None
assert len(shp) == n_dim
# The joining dimension could be negative, but we need it to be
# in [0, n_dim) in the loop below.
# An axis < -n_dim or >= ndim would be invalid, but this is
# not checked here. An Assert op would be a way of addressing that,
# but it may disrupt optimizations.
join_dim = switch(ge(node.inputs[0], 0),
node.inputs[0],
node.inputs[0] + n_dim)
out_shapes = []
for dim in xrange(n_dim):
# we have to deal with 2 possible cases in here :
# a) we are dealing with the dimension for which we join
# (called t_side from true side of the if, where the if
# compares current dimension with the joining dimension)
# b) a non joining dimension ( in which maybe a symbolic
# assertion can be used to make sure all tensors have
# the same number of elements on this non-joined dimension
# this is f_side
# initialize
t_side = ishapes[1][dim]
f_side = ishapes[1][dim]
# loop over tensors and sum for the joining dimension
for shp in ishapes[2:]:
t_side = t_side + shp[dim]
# return the dimensions found
out_shapes.append(switch(eq(dim, join_dim),
t_side, f_side))
return [tuple(out_shapes)]
"""
Convenience function to concatenate `TensorType`s along the given axis.
Parameters
----------
tensors : list of tensors (or list-like)
A list of tensors to be concatenated along the given axis.
The shapes of the tensors to be concatenated must be all
identical, except in the dimension (`axis`) on which they are to
be joined.
axis : int (symbolic or literal)
On which dimension should the tensors be joined? The `axis`
must be a valid index into the shape of the tensors to be
concatenated.
The `axis` parameter may either be an integer or an object that
can be converted to a scalar using `as_scalar`(`axis`). In the
former case, the axis is fixed at construction, while in the
latter it may vary over time depending on the value of the
`axis` variable.
"""
join = Join()
pprint.assign(lambda pstate, r: r.owner and isinstance(r.owner.op, Join),
printing.FunctionPrinter('join'))
def roll(x, shift, axis=None):
"""
Convenience function to roll TensorTypes along the given axis.
Syntax copies numpy.roll function.
Parameters
----------
x : tensor_like
Input tensor.
shift : int (symbolic or literal)
The number of places by which elements are shifted.
axis : int (symbolic or literal), optional
The axis along which elements are shifted. By default, the array
is flattened before shifting, after which the original
shape is restored.
Returns
-------
tensor
Output tensor, with the same shape as ``x``.
"""
if axis is None:
if x.ndim > 1:
y = x.flatten()
return roll(y, shift, axis=0).reshape(x.shape)
else:
axis = 0
# A slice of all elements in a dimension ':'
allslice = slice(None)
# List of slices describing the front half [:, :, shift:, :]
front_slice = slice(-shift, None)
front_list = ([allslice] * axis + [front_slice] +
[allslice] * (x.ndim - axis - 1))
# List of slices describing the back half [:, :, :shift, :]
end_slice = slice(0, -shift)
end_list = ([allslice] * axis + [end_slice] +
[allslice] * (x.ndim - axis - 1))
return join(axis,
x.__getitem__(tuple(front_list)),
x.__getitem__(tuple(end_list)))
@constructor
def shape_padleft(t, n_ones=1):
"""Reshape `t` by left-padding the shape with `n_ones` 1s.
See Also
--------
shape_padaxis
shape_padright
Dimshuffle
"""
_t = as_tensor_variable(t)
pattern = ['x'] * n_ones + [i for i in xrange(_t.type.ndim)]
return DimShuffle(_t.broadcastable, pattern)(_t)
@constructor
def shape_padright(t, n_ones=1):
"""Reshape `t` by right-padding the shape with `n_ones` 1s.
See Also
--------
shape_padaxis
shape_padleft
Dimshuffle
"""
_t = as_tensor_variable(t)
pattern = [i for i in xrange(_t.type.ndim)] + ['x'] * n_ones
return DimShuffle(_t.broadcastable, pattern)(_t)
@constructor
def shape_padaxis(t, axis):
"""Reshape `t` by inserting 1 at the dimension `axis`.
Example
-------
>>> tensor = theano.tensor.tensor3()
>>> theano.tensor.shape_padaxis(tensor, axis=0)
DimShuffle{x,0,1,2}.0
>>> theano.tensor.shape_padaxis(tensor, axis=1)
DimShuffle{0,x,1,2}.0
>>> theano.tensor.shape_padaxis(tensor, axis=3)
DimShuffle{0,1,2,x}.0
>>> theano.tensor.shape_padaxis(tensor, axis=-1)
DimShuffle{0,1,2,x}.0
See Also
--------
shape_padleft
shape_padright
Dimshuffle
"""
_t = as_tensor_variable(t)
ndim = _t.ndim + 1
if not -ndim <= axis < ndim:
msg = 'axis {0} is out of bounds [-{1}, {1})'.format(axis, ndim)
raise IndexError(msg)
if axis < 0:
axis += ndim
pattern = [i for i in xrange(_t.type.ndim)]
pattern.insert(axis, 'x')
return DimShuffle(_t.broadcastable, pattern)(_t)
@constructor
def stack(*tensors, **kwargs):
"""Stack tensors in sequence on given axis (default is 0).
Take a sequence of tensors and stack them on given axis to make a single
tensor. The size in dimension `axis` of the result will be equal to the number
of tensors passed.
Note: The interface stack(*tensors) is deprecated, you should use
stack(tensors, axis=0) insted.
Parameters
----------
tensors : list or tuple of tensors
A list of tensors to be stacked.
axis : int
The index of the new axis. Default value is 0.
Examples
--------
>>> a = theano.tensor.scalar()
>>> b = theano.tensor.scalar()
>>> c = theano.tensor.scalar()
>>> x = theano.tensor.stack([a, b, c])
>>> x.ndim # x is a vector of length 3.
1
>>> a = theano.tensor.tensor4()
>>> b = theano.tensor.tensor4()
>>> c = theano.tensor.tensor4()
>>> x = theano.tensor.stack([a, b, c])
>>> x.ndim # x is a 5d tensor.
5
>>> rval = x.eval(dict((t, np.zeros((2, 2, 2, 2))) for t in [a, b, c]))
>>> rval.shape # 3 tensors are stacked on axis 0
(3, 2, 2, 2, 2)
>>> x = theano.tensor.stack([a, b, c], axis=3)
>>> x.ndim
5
>>> rval = x.eval(dict((t, np.zeros((2, 2, 2, 2))) for t in [a, b, c]))
>>> rval.shape # 3 tensors are stacked on axis 3
(2, 2, 2, 3, 2)
>>> x = theano.tensor.stack([a, b, c], axis=-2)
>>> x.ndim
5
>>> rval = x.eval(dict((t, np.zeros((2, 2, 2, 2))) for t in [a, b, c]))
>>> rval.shape # 3 tensors are stacked on axis -2
(2, 2, 2, 3, 2)
"""
# ---> Remove this when moving to the new interface:
if not tensors and not kwargs:
raise Exception('theano.tensor.stack(tensors, axis) must have at least'
' one parameter')
if not kwargs and not isinstance(tensors[0], (list, tuple)):
warnings.warn('stack(*tensors) interface is deprecated, use'
' stack(tensors, axis=0) instead.', DeprecationWarning,
stacklevel=3)
axis = 0
elif 'tensors' in kwargs:
tensors = kwargs['tensors']
if 'axis' in kwargs:
axis = kwargs['axis']
else:
axis = 0
else:
if len(tensors) == 2:
axis = tensors[1]
elif 'axis' in kwargs:
axis = kwargs['axis']
else:
axis = 0
tensors = tensors[0]
# <--- Until here.
if len(tensors) == 0:
raise Exception('tensors is empty. You should at least provide one'
' tensor to theano.tensor.stack(tensors, axis).')
# If all tensors are scalars of the same type, call make_vector.
# It makes the graph simpler, by not adding DimShuffles and Rebroadcasts
# This should be an optimization!
# Doing it here make the graph less canonicalized
# (more type need to be understood by all optimization)
# And DebugMode can't detect error in this code as it is not in an
# optimization.
# See ticket #660
if numpy.all(
[ # in case there is direct int in tensors.
isinstance(t, (numpy.number, float, integer_types,
python_complex)) or
(isinstance(t, Variable) and
isinstance(t.type, TensorType) and
t.ndim == 0)
for t in tensors]):
# in case there is direct int
tensors = list(map(as_tensor_variable, tensors))
dtype = scal.upcast(*[i.dtype for i in tensors])
return theano.tensor.opt.MakeVector(dtype)(*tensors)
return join(axis, *[shape_padaxis(t, axis) for t in tensors])
@constructor
def concatenate(tensor_list, axis=0):
"""Alias for `join`(axis, *tensor_list).
This function is similar to `join`, but uses the signature of
numpy's concatenate function.
Raises
------
TypeError
The tensor_list must be a tuple or list.
"""
# Check someone did not make the common mistake to do something like:
# c = concatenate(x, y)
# instead of
# c = concatenate((x, y))
if not isinstance(tensor_list, (tuple, list)):
raise TypeError(
"The 'tensors' argument must be either a tuple "
"or a list, make sure you did not forget () or [] around "
"arguments of concatenate.", tensor_list)
return join(axis, *tensor_list)
def get_vector_length(v):
"""Return the run-time length of a symbolic vector.
Parameters
----------
v
A rank-1 TensorType variable.
Raises
------
TypeError
`v` hasn't the proper type.
ValueError
No special case applies, the length is not known.
In general this is not possible, but for a number of special cases
the length can be determined at compile / graph-construction time.
This function implements these special cases.
"""
v = as_tensor_variable(v)
if v.ndim != 1:
raise TypeError("argument must be symbolic vector, got '%s'" %
v)
if v.type.broadcastable[0]:
return 1
if isinstance(v, gof.Constant) and v.type.ndim == 1:
return len(v.data)
if v.owner and isinstance(v.owner.op, theano.tensor.opt.MakeVector):
return len(v.owner.inputs)
if v.owner and isinstance(v.owner.op, Shape):
return v.owner.inputs[0].type.ndim
# If we take a slice, we know how many elements it will result in
if ((v.owner and
isinstance(v.owner.op, theano.tensor.subtensor.Subtensor) and
isinstance(v.owner.op.idx_list[0], slice) and
v.owner.inputs[0].owner and
isinstance(v.owner.inputs[0].owner.op, theano.compile.ops.Shape))):
start = extract_constant(theano.tensor.subtensor.get_idx_list(
v.owner.inputs, v.owner.op.idx_list)[0].start)
stop = extract_constant(theano.tensor.subtensor.get_idx_list(
v.owner.inputs, v.owner.op.idx_list)[0].stop)
step = extract_constant(theano.tensor.subtensor.get_idx_list(
v.owner.inputs, v.owner.op.idx_list)[0].step)
ndim = v.owner.inputs[0].owner.inputs[0].ndim
types = (numbers.Integral, numpy.integer)
if start is None:
start = 0
elif isinstance(start, types) and start < 0:
start += ndim
if start < 0:
start = 0
if stop is None:
stop = ndim
elif isinstance(stop, types):
if stop > ndim:
stop = ndim
elif stop < 0:
stop += ndim
if step is None:
step = 1
if (isinstance(stop, types) and
isinstance(start, types) and
isinstance(step, types) and
start >= 0 and stop >= 0 and
step > 0 and stop >= start):
return (stop - start - 1) // step + 1
if isinstance(v, Variable):
msg = theano.printing.debugprint(v, file='str')
else:
msg = str(v)
raise ValueError("length not known: %s" % msg)
@constructor
def horizontal_stack(*args):
"""
Horizontally stack two L{TensorType}s.
Stack two L{TensorType}s along the second axis (column wise). These
L{TensorType}s must have the same shape along all dimensions but the
second.
"""
# Note: 'horizontal_stack' and 'vertical_stack' do not behave exactly like
# Numpy's hstack and vstack functions. This is intended, because Numpy's
# functions have potentially confusing/incoherent behavior (try them on 1D
# arrays). If this is fixed in a future version of Numpy, it may be worth
# trying to get closer to Numpy's way of doing things. In the meantime,
# better keep different names to emphasize the implementation divergences.
assert len(args) >= 2
for arg in args:
assert arg.type.ndim == 2
return concatenate(args, axis=1)
@constructor
def vertical_stack(*args):
assert len(args) >= 2
for arg in args:
assert arg.type.ndim == 2
return concatenate(args, axis=0)
class Reshape(Op):
"""Perform a reshape operation of the input x to the new shape shp.
The number of dimensions to which to reshape to (ndim) must be
known at graph build time.
"""
view_map = {0: [0]} # output 0 is potentially aliased to inputs [0]
_f16_ok = True
check_input = False
__props__ = ("ndim",)
# name does not participate because it doesn't affect computations
def __init__(self, ndim, name=None):
self.ndim = ndim
self.name = name
def __str__(self):
return '%s{%s}' % (self.__class__.__name__, self.ndim)
def make_node(self, x, shp):
x = as_tensor_variable(x)
shp_orig = shp
shp = as_tensor_variable(shp, ndim=1)
if not (shp.dtype.startswith('int') or
(isinstance(shp, TensorConstant) and shp.data.size == 0)):
# It raises an error if shp is not of integer type,
# except when shp is constant and empty
# (in this case, shp.dtype does not matter anymore).
raise TypeError("Shape must be integers", shp, shp.dtype)
assert shp.ndim == 1
if isinstance(shp, TensorConstant):
bcast = [s == 1 for s in shp.data]
return gof.Apply(self, [x, shp], [tensor(x.type.dtype, bcast)])
else:
bcasts = [False] * self.ndim
shp_list = shp_orig
if hasattr(shp_orig, "ndim") and shp_orig.ndim == 0:
shp_list = [shp_orig]
for index in xrange(self.ndim):
y = shp_list[index]
y = as_tensor_variable(y)
# Try to see if we can infer that y has a constant value of 1.
# If so, that dimension should be broadcastable.
try:
bcasts[index] = (
hasattr(y, 'get_scalar_constant_value') and
y.get_scalar_constant_value() == 1)
except NotScalarConstantError:
pass
return gof.Apply(self, [x, shp], [tensor(x.type.dtype, bcasts)])
def perform(self, node, inp, out_):
x, shp = inp
out, = out_
if (len(shp) != self.ndim):
raise ValueError('shape argument to Reshape.perform has incorrect'
' length %i'
', should be %i' % (len(shp), self.ndim), shp)
try:
out[0] = numpy.reshape(x, shp)
except Exception:
raise ValueError('Cannot reshape input of shape %s to shape %s' %
(x.shape, shp))
if not out[0].flags.aligned:
raise RuntimeError("numpy.reshape returned a not aligned tensor."
" NumPy versions 1.6.2, 1.7.0 and 1.7.1 have"
" this problem for some input shape/new shape"
" combinations. Use another NumPy version."
" Input shape: %s, input stride: %s,"
" new_shape: %s, new_strides: %s." % (
x.shape, x.strides, shp, out[0].strides))
def connection_pattern(self, node):
return [[True], [False]]
def grad(self, inp, grads):
x, shp = inp
g_out, = grads
return [reshape(g_out, shape(x), ndim=x.ndim),
DisconnectedType()()]
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return [None]
return self(eval_points[0], *inputs[1:], **dict(return_list=True))
def infer_shape(self, node, ishapes):
# inputs[1] can contain at most one value of '-1', meaning the actual
# shape of the output will be automatically computed by reshape, so
# that the total number of elements stays the same.
# TODO: Maybe put that formula here?
# It's not trivial, because we would have to check if the product of
# all the non-minus-one shapes is a divisor of the product of the
# original shapes.
# The following expression leads to cycles in feature_shape,
# because it tries to replace the Shape_i node by the switch
# statement, which depends on Shape_i.
# return [tuple([switch(eq(node.inputs[1][i], -1),
# theano.tensor.opt.Shape_i(i)(node.outputs[0]),
# node.inputs[1][i])
# for i in xrange(self.ndim)]
# )]
# Here, we only simplify if the shape (node.inputs[1]) is a constant,
# ideally it would suffice to check that it is always non-negative.
# If current variable is a scalar and its dimensionality should
# change to self.ndim, then use size 1 for all new dimensions.
if len(ishapes[0]) == 0:
return [(1,) * self.ndim]
requ = node.inputs[1]
if isinstance(requ, theano.tensor.TensorConstant):
requ = list(requ.data)
requ_part = [ele for ele in requ if ele != -1]
crit = len(requ) - len(requ_part)
if crit == 1 and len(requ_part) > 0:
missing = mul(*ishapes[0]) // mul(*requ_part)
for i, ele in enumerate(requ):
if ele == -1:
requ[i] = missing
elif crit == 1: # we reshape to -1
requ = [mul(*ishapes[0])] if ishapes[0] else [1]
elif crit > 1:
raise ValueError('shape argument to Reshape.perform'
' must have at most one entry equal to -1')
return [requ]
else:
new_dims = [node.inputs[1][i] for i in xrange(self.ndim)]
# since new_dims can have negative value (-1), the
# multiplication of all values should be negated
# to give a positive value.
# To avoid optimization complexity, we avoid checking
# for the case when there are two or more '-1' values.
if self.ndim:
rest_size = (mul(*ishapes[0]) // -mul(*new_dims))
return [tuple([switch(eq(new_dims[i], -1),
rest_size,
new_dims[i])
for i in xrange(self.ndim)])]
def c_code_cache_version(self):
return (6,)
def c_code(self, node, name, inputs, outputs, sub):
if isinstance(node.inputs[0], TensorVariable):
x, shp = inputs
z, = outputs
new_ndim = self.ndim
sdtype = node.inputs[1].type.dtype_specs()[1]
fail = sub['fail']
return """
assert (PyArray_NDIM(%(shp)s) == 1);
npy_intp new_dims[%(new_ndim)s];
PyArray_Dims newshape;
newshape.ptr = new_dims;
newshape.len = %(new_ndim)s;
for (int ii = 0; ii < %(new_ndim)s; ++ii)
{
// -- We do not want an explicit cast here. the shp can be any
// -- int* dtype. The compiler will explicitly upcast it, but
// -- will err if this will downcast. This could happen if the
// -- user pass an int64 dtype, but npy_intp endup being int32.
new_dims[ii] = ((%(sdtype)s*)(
PyArray_BYTES(%(shp)s) +
ii * PyArray_STRIDES(%(shp)s)[0]))[0];
}
Py_XDECREF(%(z)s);
%(z)s = (PyArrayObject *) PyArray_Newshape(%(x)s, &newshape,
NPY_CORDER);
if (!%(z)s)
{
//The error message should have been set by PyArray_Newshape
%(fail)s;
}
if (!PyArray_ISALIGNED(%(z)s)) {
PyErr_Format(
PyExc_RuntimeError,
"PyArray_Newshape returned an object that isn't aligned!"
" NumPy versions 1.6.2, 1.7.0 and 1.7.1 have"
" this problem for some input shape/new shape"
" combinations. Use another NumPy version.");
%(fail)s;
}
""" % locals()
else:
return Op.c_code(self, node, name, inputs, outputs, sub)
def reshape(x, newshape, ndim=None, name=None):
if ndim is None:
newshape = as_tensor_variable(newshape)
if newshape.ndim != 1:
raise TypeError(
"New shape in reshape must be a vector or a list/tuple of"
" scalar. Got %s after conversion to a vector." % newshape)
try:
ndim = get_vector_length(newshape)
except ValueError:
raise ValueError(
"The length of the provided shape (%s) cannot "
"be automatically determined, so Theano is not able "
"to know what the number of dimensions of the reshaped "
"variable will be. You can provide the 'ndim' keyword "
"argument to 'reshape' to avoid this problem." % newshape)
op = Reshape(ndim, name)
rval = op(x, newshape)
return rval
class Flatten(Op):
"""
Flatten a tensor.
Flattens a tensor to `outdim` dimensions by preserving the leading
outdim - 1 shape components.
.. note:: The interface Flatten(Op) is deprecated, you should use flatten.
"""
view_map = {0: [0]}
check_input = False
__props__ = ("outdim",)
def __init__(self, outdim=1):
warnings.warn(
"Flatten class is deprecated, "
"please use flatten method instead.",
DeprecationWarning,
stacklevel=4)
self.outdim = int(outdim)
def __str__(self):
return '%s{%s}' % (self.__class__.__name__, self.outdim)
def make_node(self, x):
t_x = as_tensor_variable(x)
if self.outdim < 1 or (x.ndim and self.outdim > x.ndim):
raise ValueError('invalid output ndimensions (%i) for tensor of '
'rank %i' % (self.outdim, t_x.ndim))
# Infer the broadcastable pattern of the output. For every dimension
# unaffected by the flatten, the broadcast flag should be unchanged.
# For the dimension resulting from the collapse of other dimensions,
# it should be broadcastable iff all the collapsed dimensions were
# broadcastable.
bcast_kept_dims = x.broadcastable[:self.outdim - 1]
bcast_new_dim = python_all(x.broadcastable[self.outdim - 1:])
broadcastable = bcast_kept_dims + (bcast_new_dim,)
return gof.Apply(self, [t_x], [tensor(x.type.dtype,
broadcastable)])
def perform(self, node, inp, out_):
x, = inp
out, = out_
outdim = self.outdim
if outdim == 1:
try:
out[0] = x.reshape(x.size)
except AttributeError:
out[0] = x.reshape((numpy.prod(x.shape),))
elif outdim == len(x.shape):
out[0] = x
else:
newshape = (x.shape[:outdim - 1] +
(numpy.prod(x.shape[outdim - 1:]),))
out[0] = x.reshape(newshape)
def infer_shape(self, node, in_shapes):
in_shp, = in_shapes
part1 = in_shp[:self.outdim - 1]
part2 = in_shp[self.outdim - 1:]
if len(part2) > 1:
part2 = (prod(part2, dtype='int64'),)
elif len(part2) == 1:
# We do not want to force an upcast of part2 if its length is 1
pass
else:
if len(in_shp) == 0 and self.outdim == 1:
part2 = (1,)
else:
raise ValueError('invalid output ndimensions (%i) for tensor '
'of rank %i' % (self.outdim, len(in_shp)))
out_shape = (part1 + part2)
return [out_shape]
def grad(self, inp, grads):
x, = inp
g_out, = grads
return [reshape(g_out, shape(x), x.ndim)]
def R_op(self, inputs, eval_points):
if None in eval_points:
return [None]
return self.make_node(*eval_points).outputs
def c_code_cache_version(self):
return (1, 1)
def c_code(self, node, name, inputs, outputs, sub):
x, = inputs
out, = outputs
outdim = self.outdim
fail = sub['fail']
return """
if (%(outdim)s == PyArray_NDIM(%(x)s))
{
Py_XDECREF(%(out)s);
Py_XINCREF(%(x)s);
%(out)s = %(x)s;
}
else
{
Py_XDECREF(%(out)s);
if (%(outdim)s == 1)
{
npy_intp size = PyArray_SIZE(%(x)s);
PyArray_Dims newshape;
newshape.ptr = &size;
newshape.len = 1;
%(out)s = (PyArrayObject*)PyArray_Newshape(%(x)s,
&newshape,
NPY_CORDER);
}
else
{
npy_intp *oldshape = PyArray_DIMS(%(x)s);
npy_intp newshape_dims[%(outdim)s];
int i;
for (i = 0; i < %(outdim)s - 1; ++i)
newshape_dims[i] = oldshape[i];
newshape_dims[i] = 1;
for (int j = %(outdim)s - 1; j < PyArray_NDIM(%(x)s); ++j)
newshape_dims[i] *= oldshape[j];
PyArray_Dims newshape;
newshape.ptr = newshape_dims;
newshape.len = %(outdim)s;
%(out)s = (PyArrayObject*)PyArray_Newshape(%(x)s,
&newshape,
NPY_CORDER);
}
}
if (!%(out)s)
{
//The error message should have been set by
// PyArray_Newshape
%(fail)s;
}
if (!PyArray_ISALIGNED(%(out)s)) {
PyErr_Format(
PyExc_RuntimeError,
"PyArray_Newshape returned an object that isn't"
" aligned! NumPy versions 1.6.2, 1.7.0 and 1.7.1 have"
" this problem for some input shape/new shape"
" combinations. Use another NumPy version.");
%(fail)s;
}
""" % locals()
def is_flat(var, outdim=1):
"""
Verifies the dimensionality of the var is equal to
outdim. This method is usually called after flatten method on a
variable, where the first outdim-1 dimension size(s) of the variable
is kept intact, and the last dimension size of the variable is made
equal to the multiplication of its remaining dimension size(s), such that
the variable would end up with as many dimension as outdim.
Parameters
----------
var : theano.tensor.var.TensorVariable
the theano var on which the dimensionality is checked.
outdim : int
the expected dimensionality of var.
Returns
-------
bool
the comparison result of var's dim
and the expected outdim.
"""
return var.ndim == outdim
def flatten(x, outdim=1):
"""
Reshapes the variable x by keeping
the first outdim-1 dimension size(s) of x the same,
and making the last dimension size of x equal to
the multiplication of its remaining dimension size(s).
Parameters
----------
x : theano.tensor.var.TensorVariable
the variable that should be reshaped.
outdim : int
the number of dimensions of the returned variable
Returns
-------
theano.tensor.var.TensorVariable
the flattend variable with dimensionality of outdim
"""
# Any input variable can be flattened to have outdim of 1,
# even if it's a scalar. Otherwise, outdim must be positive
# and smaller than x.ndim.
if outdim < 1 or (outdim > 1 and outdim > x.ndim):
raise ValueError('outdim %s out of bound [1, %d)'
% (outdim, x.ndim + 1))
if outdim > 1:
dims = tuple(x.shape[:outdim - 1]) + (-1,)
else:
dims = (-1,)
x_reshaped = x.reshape(dims)
bcast_kept_dims = x.broadcastable[:outdim - 1]
bcast_new_dim = python_all(x.broadcastable[outdim - 1:])
broadcastable = bcast_kept_dims + (bcast_new_dim,)
x_reshaped = theano.tensor.addbroadcast(
x_reshaped, *filter(lambda i: broadcastable[i], range(outdim)))
return x_reshaped
# class TileGrad(Op):
# """
# Calculates the gradient of the Tile Op.
# """
# # this is so weird, I can't think of how to make this a general thing.
# def make_node(self, x, reps, g_out):
# return gof.Apply(self, [x, reps, g_out], [x.type()])
#
# def perform(self, node, inp, out):
# x, reps, g_out = inp
# gx, = out
# xsh = x.shape
# if len(reps) == 2 and reps[1] == 1 and len(x.shape) == 1:
# gx[0] = numpy.sum(g_out, axis=0)
# else:
# raise NotImplementedError('x.shape, reps combination not '
# 'supported', (x.shape, reps))
#
# tilegrad = TileGrad()
class Tile(Op):
"""
Construct an array by repeating the input x according to reps pattern.
.. note:: Deprecated
Use tile() instead.
Tiles its input according to reps. The length of reps is the number of
dimension of x and contains the number of times to tile x in each
dimension.
See Also
--------
numpy.tile : http://docs.scipy.org/doc/numpy/reference/generated/numpy.tile.html
"""
__props__ = ("ndim",)
def __init__(self, ndim):
self.ndim = ndim
def __str__(self):
return self.__class__.__name__ + "{ndim=%d}" % self.ndim
def make_node(self, x, reps):
warnings.warn((
"Tile op is deprecated, use tile function instead."), stacklevel=3)
x = as_tensor_variable(x)
reps = as_tensor_variable(reps)
return gof.Apply(self, [x, reps], [tensor(x.type.dtype, [False] *
self.ndim)])
def perform(self, node, inp, out_):
x, reps = inp
out, = out_
res = numpy.tile(x, reps)
if res.ndim != self.ndim:
raise ValueError(
'Tile.perform produced incorrect number of dimensions')
if (numpy.asarray(reps) == 1).all():
# In that case, some NumPy version return a view! As this
# op isn't declared as inplace, we need to check that and
# copy the data.
if numpy.may_share_memory(res, x):
res = res.copy()
out[0] = res
def infer_shape(self, node, in_shapes):
# Note: in contrast with numpy, it is assumed that x.shape and reps
# have equal length; see also tile function below
# Note: if reps were to be allowed not to be a constant and x.shape
# and reps to be unequal, the following block of code could be used:
# prepend 1 to x.shape if needed
# if self.ndim > x.ndim:
# shp = concatenate(ones(self.ndim - x.ndim), shp)
# prepend 1 to reps if needed
# reps = concatenate(ones(self.ndim - reps.shape[0]), reps)
x, reps = node.inputs
shp = in_shapes[0]
tiled_shp = shp * reps
out_shape = []
for i in xrange(self.ndim):
out_shape.append(tiled_shp[i])
return [out_shape]
def grad(self, inp, grads):
x, reps = inp
g_out, = grads
# return [tilegrad(x, reps, g_out), None]
raise NotImplementedError()
def tile(x, reps, ndim=None):
"""
Tile input array `x` according to `reps`.
See the docstring of `numpy.tile` for details.
'reps' can be constant integer (e.g. 3), constant vector(e.g. [2 3]),
symbolic scalar (e.g. tensor.iscalar()), symbolic vector (e.g. tensor.ivector())
or a list of symbolic scalar (e.g. [tensor.iscalar(), tensor.iscalar()]).
ndim is the number of the dimensions of the output, if it is provided, ndim
should be equal or larger than x.ndim and len(reps), otherwise, we will use
max(x.ndim, len(reps)) as ndim. If reps is symbolic vector, the ndim has to
be provided.
"""
if ndim is not None and ndim < x.ndim:
raise ValueError("ndim should be equal or larger than x.ndim")
# if reps is tensor.scalar, integer or tensor.vector, we convert it to a list.
if not isinstance(reps, (list, tuple)):
reps_astensor = as_tensor_variable(reps)
ndim_check = reps_astensor.ndim
if reps_astensor.dtype not in theano.tensor.discrete_dtypes:
raise ValueError("elements of reps must be integer dtype")
# tensor.scalar/integer case
if ndim_check == 0:
reps = [reps]
# tensor.vector case
elif ndim_check == 1:
if ndim is None:
raise ValueError("if reps is tensor.vector, you should specify "
"the ndim")
else:
offset = ndim - reps.shape[0]
# assert that reps.shape[0] does not exceed ndim
offset = theano.tensor.opt.assert_(offset, ge(offset, 0))
# if reps.ndim is less than x.ndim, we pad the reps with
# "1" so that reps will have the same ndim as x.
reps_ = [switch(i < offset, 1, reps[i - offset]) for i in range(ndim)]
reps = reps_
# other raise error
else:
raise ValueError("the dimension of reps should not exceed 1")
else:
if ndim is not None and len(reps) > ndim:
raise ValueError("len(reps) should be equal or less than ndim")
if not numpy.all([isinstance(r, integer_types) or
(isinstance(r, TensorVariable) and
r.dtype in theano.tensor.discrete_dtypes) for r in reps]):
raise ValueError("elements of reps must be scalars of integer dtype")
# if reps.ndim is less than x.ndim, we pad the reps with
# "1" so that reps will have the same ndim as x.
reps = list(reps)
if ndim is None:
ndim = builtins.max(len(reps), x.ndim)
if len(reps) < ndim:
reps = [1] * (ndim - len(reps)) + reps
shape = [1] * (ndim - x.ndim) + [x.shape[i] for i in xrange(x.ndim)]
alloc_shape = reps + shape
y = alloc(x, *alloc_shape)
shuffle_ind = numpy.arange(ndim * 2).reshape(2, ndim)
shuffle_ind = shuffle_ind.transpose().flatten()
y = y.dimshuffle(*shuffle_ind)
new_shapes = [sh * reps[i] for i, sh in enumerate(shape)]
y = y.reshape(new_shapes)
return y
class ARange(Op):
"""Create an array containing evenly spaced values within a given interval.
Parameters and behaviour are the same as numpy.arange().
"""
__props__ = ("dtype",)
def __init__(self, dtype):
self.dtype = dtype
def make_node(self, start, stop, step):
start, stop, step = map(as_tensor_variable, (start, stop, step))
assert start.ndim == 0
assert stop.ndim == 0
assert step.ndim == 0
inputs = [start, stop, step]
outputs = [tensor(self.dtype, (False,))]
return Apply(self, inputs, outputs)
@theano.configparser.change_flags(warn_float64='ignore')
def infer_shape(self, node, i_shapes):
# Note start, stop and step can be float numbers.
start, stop, step = node.inputs
def is_constant_value(var, value):
try:
v = get_scalar_constant_value(var)
return numpy.all(v == value)
except NotScalarConstantError:
pass
return False
def upcast(var):
if ('int' in var.dtype and
# We do not want to cast uint64 to int64 as this can
# loose information. If we upcast uint64 with int64,
# this give float64. This is safer then checking for
# uint64 in case we support [u]int128 or other in the
# future.
scal.upcast(var.dtype, 'int64') == 'int64'):
return cast(var, 'int64')
return var
if is_constant_value(step, 1):
if is_constant_value(start, 0):
return [(cast(stop, 'int64'),)]
else:
stop = upcast(stop)
start = upcast(start)
return [(maximum(cast(stop - start, 'int64'), 0),)]
else:
stop = upcast(stop)
start = upcast(start)
return [(maximum(cast(ceil(cast((stop - start), 'float64') / step),
'int64'), 0),)]
def perform(self, node, inp, out_):
start, stop, step = inp
out, = out_
start = start.item()
stop = stop.item()
step = step.item()
out[0] = numpy.arange(start, stop, step, dtype=self.dtype)
def connection_pattern(self, node):
return [[True], [False], [True]]
def grad(self, inputs, grads):
start, stop, step = inputs
gz, = grads
# start and step affect the output values
# but the outputs are integers so there's
# no gradient through them
# stop does not affect the output values,
# just the output shape, so it is disconnected
return [start.zeros_like(),
DisconnectedType()(),
step.zeros_like()]
def R_op(self, inputs, eval_points):
return [None]
_arange = {}
def arange(start, stop=None, step=1, dtype=None):
# If only one argument is provided, it is in fact the "stop" argument,
# and start is 0.
if stop is None:
start, stop = 0, start
start, stop, step = map(as_tensor_variable, (start, stop, step))
# If dtype is not provided, infer it from the other arguments
if dtype is None:
dtype = scal.upcast(start.type.dtype, stop.type.dtype, step.type.dtype)
# don't try to be stingy and byte-optimize, this leads to
# overflow problems.
if dtype.startswith('int'):
dtype = 'int64'
if dtype.startswith('uint'):
dtype = 'uint64'
if config.cast_policy in ('numpy', 'numpy+floatX'):
# We enforce numpy semantics, except in the special case where
# `config.cast_policy` is 'numpy+floatX' and we want to use float32
# rather than float64.
# As an example, if `start`, `stop` and `step` are all int32,
# `numpy.arange` returns an int64 array (on 64-bit platforms),
# while the upcast above returns int32.
numpy_dtype = numpy.arange(
start=numpy.array(0, dtype=start.dtype),
stop=numpy.array(1, dtype=stop.dtype),
step=numpy.array(1, dtype=step.dtype)).dtype
if numpy_dtype != dtype:
if (config.cast_policy == 'numpy+floatX' and
config.floatX == 'float32' and
numpy_dtype == 'float64' and
# No explicit float64 in the three arguments?
python_all(
dt != 'float64'
for dt in [s.dtype for s in (start, stop, step)])):
# We use float32 instead.
assert dtype != 'float64'
dtype = 'float32'
else:
# We use the same dtype as numpy instead of the result of
# the upcast.
dtype = str(numpy_dtype)
if dtype not in _arange:
_arange[dtype] = ARange(dtype)
return _arange[dtype](start, stop, step)
class _nd_grid(object):
"""Create a dense n-dimensional 'meshgrid' with equally spaced points.
Used to create the instance ``mgrid`` and ``ogrid`` which act similarly
to their numpy equivalents.
Parameters
----------
sparse : boolean, optional, default=True
Specifying False leads to the equivalent of numpy's mgrid functionality.
Specifying True leads to the equivalent of ogrid.
Examples
--------
>>> a = T.mgrid[0:5, 0:3]
>>> a[0].eval()
array([[0, 0, 0],
[1, 1, 1],
[2, 2, 2],
[3, 3, 3],
[4, 4, 4]], dtype=int8)
>>> a[1].eval()
array([[0, 1, 2],
[0, 1, 2],
[0, 1, 2],
[0, 1, 2],
[0, 1, 2]], dtype=int8)
>>> b = T.ogrid[0:5, 0:3]
>>> b[0].eval()
array([[0],
[1],
[2],
[3],
[4]], dtype=int8)
>>> b[1].eval()
array([[0, 1, 2, 3]], dtype=int8)
"""
def __init__(self, sparse=False):
self.sparse = sparse
def __getitem__(self, *args):
ndim = len(args[0])
for sl in args[0]:
if isinstance(sl.step, python_complex):
raise NotImplementedError("Not implemented for slices "
"whose step is complex")
ranges = [arange(sl.start or 0,
sl.stop,
sl.step or 1) for sl in args[0]]
shapes = [tuple([1] * j + [r.shape[0]] + [1] * (ndim - 1 - j))
for j, r in enumerate(ranges)]
ranges = [r.reshape(shape) for r, shape in zip(ranges, shapes)]
if self.sparse:
grids = ranges
else:
grids = []
ones = [ones_like(r) for r in ranges]
for i in range(ndim):
grid = 1
for j in range(ndim):
if j == i:
grid = grid * ranges[j]
else:
grid = grid * ones[j]
grids.append(grid)
return grids
mgrid = _nd_grid()
ogrid = _nd_grid(sparse=True)
class PermuteRowElements(Op):
"""Permute the elements of each row (inner-most dim) of a tensor.
A permutation will be applied to every row (vector) of the input tensor x.
Depending on the dimensionality of x and the permutation tensor y,
different cases are possible.
If y.ndim = 1, y is a single permutation, that will be applied to every
vector of x. For instance, if x is a matrix, the same permutation will be
applied to each row of x.
If x.ndim = y.ndim, each row of x corresponds to a row of y, containing
a permutation that will be applied to that row. For instance, if x and y
are two matrices, a different permutation will be applied to each row of x.
If x.ndim > y.ndim, y will be broadcasted to fit x, then each row (vector)
of x will be reordered according to the corresponding row of y. (This is
a generalization of the first case).
If x.ndim = 1, every permutation in y will be applied to x, and the output
will contain all the results.
If x.ndim < y.ndim, x will be broadcasted to fit y, and different
permutations contained in y will be applied to each vector in x. (This is
a generalization of the previous case).
If the "inverse" argument is True, the Op will perform the inverse
permutation instead.
"""
__props__ = ()
def make_node(self, x, y, inverse):
x = as_tensor_variable(x)
y = as_tensor_variable(y)
if inverse: # as_tensor_variable does not accept booleans
inverse = as_tensor_variable(1)
else:
inverse = as_tensor_variable(0)
# y should contain integers
assert (y.type.dtype.startswith('int') or
y.type.dtype.startswith('uint'))
# Inverse should be an integer scalar
assert (inverse.type.ndim == 0 and
(inverse.type.dtype.startswith('int') or
inverse.type.dtype.startswith('uint')))
# Match shapes of x and y
x_dim = x.type.ndim
y_dim = y.type.ndim
if x_dim > y_dim:
y = shape_padleft(y, n_ones=(x_dim - y_dim))
elif x_dim < y_dim:
x = shape_padleft(x, n_ones=(y_dim - x_dim))
# Compute the broadcastable pattern of the output
out_broadcastable = [xb and yb for xb, yb in
izip(x.type.broadcastable, y.type.broadcastable)]
out_type = tensor(dtype=x.type.dtype, broadcastable=out_broadcastable)
inputlist = [x, y, inverse]
outputlist = [out_type]
return Apply(self, inputlist, outputlist)
def _rec_perform(self, node, x, y, inverse, out, curdim):
"""Perform the permutation by doing a recursion over the input
dimensions.
For every dimension, starting with the leftmost, the right set of
indices is determined (depending if broadcasting or not), then
the function is recursively called on the appropriate subtensors.
The terminal case is reached when the current tensors are vector,
then the permutation contained in y is applied to x.
Parameters
----------
x : tensor
The input tensor, on which the permutation is applied.
y : tensor
Tensor containing the permutations to apply.
out : tensor
Tensor storing the output result.
curdim : int
Counter of the current depth of recursion.
inverse
Wether to apply permutations or their inverse.
"""
if len(x.shape) == 1:
# Numpy advanced indexing works in this case
if inverse:
out[y] = x[:]
else:
out[:] = x[y]
if (numpy.__version__ <= '1.6.1' and
out.size != numpy.uint32(out.size)):
warnings.warn(
'Numpy versions 1.6.1 and below have a bug preventing '
'advanced indexing from correctly filling arrays that '
'are too big (>= 2^32 elements). It is possible that '
'out (%s), with shape %s, is not correctly filled.'
% (out, out.shape))
else:
xs0 = x.shape[0]
ys0 = y.shape[0]
if xs0 == ys0:
for i in xrange(xs0):
self._rec_perform(node, x[i], y[i], inverse, out[i],
curdim + 1)
elif ys0 == 1 and node.inputs[1].type.broadcastable[curdim]:
# Broadcast y
for i in xrange(xs0):
self._rec_perform(node, x[i], y[0], inverse, out[i],
curdim + 1)
elif xs0 == 1 and node.inputs[0].type.broadcastable[curdim]:
# Broadcast x
for i in xrange(ys0):
self._rec_perform(node, x[0], y[i], inverse, out[i],
curdim + 1)
else:
raise ValueError('Dimension mismatch: %s, %s' % (xs0, ys0))
def perform(self, node, inp, out):
x, y, inverse = inp
outs, = out
x_s = x.shape
y_s = y.shape
assert len(x_s) == len(y_s)
# Make sure the output is big enough
out_s = []
for xdim, ydim in izip(x_s, y_s):
if xdim == ydim:
outdim = xdim
elif xdim == 1:
outdim = ydim
elif ydim == 1:
outdim = xdim
else:
raise ValueError('Dimension mismatch: %s, %s' % (xdim, ydim))
out_s.append(outdim)
if outs[0] is None or outs[0].shape != out_s:
outs[0] = numpy.empty(out_s, dtype=x.dtype)
self._rec_perform(node, x, y, inverse, outs[0], curdim=0)
def infer_shape(self, node, in_shapes):
shp_x = in_shapes[0]
shp_y = in_shapes[1]
assert len(shp_x) == len(shp_y)
out_shape = []
for i in xrange(len(shp_x)):
out_shape.append(maximum(shp_x[i], shp_y[i]))
return [out_shape]
def grad(self, inp, grads):
x, y, inverse = inp
gz, = grads
# First, compute the gradient wrt the broadcasted x.
# If 'inverse' is False (0), apply the inverse of y on gz.
# Else, apply y on gz.
gx = permute_row_elements(gz, y, eq(inverse, 0))
# If x has been broadcasted along some axes, we need to sum
# the gradient over these axes, but keep the dimension (as
# broadcastable)
broadcasted_dims = [dim for dim in xrange(gz.type.ndim)
if x.type.broadcastable[dim] and
not gz.type.broadcastable[dim]]
gx = Sum(axis=broadcasted_dims)(gx)
# Sum(...) removed the dimensions in broadcasted_dims,
# so we need to put them back.
newdims = []
i = 0
for dim in xrange(gz.type.ndim):
if dim in broadcasted_dims:
newdims.append('x')
else:
newdims.append(i)
i += 1
gx = DimShuffle(gx.type.broadcastable, newdims)(gx)
assert gx.type.broadcastable == x.type.broadcastable
# if x is an integer type, then so is the output.
# this means f(x+eps) = f(x) so the gradient with respect
# to x is zero
if x.type.dtype.find('int') != -1:
gx = x.zeros_like()
# The elements of y and of inverse both affect the output,
# so they are connected to the output,
# and the transformation isn't defined if their values
# are non-integer, so the gradient with respect to them is
# undefined
return [gx, grad_undefined(self, 1, y),
grad_undefined(self, 1, inverse)]
_permute_row_elements = PermuteRowElements()
def permute_row_elements(x, y, inverse=0):
return _permute_row_elements(x, y, inverse)
def inverse_permutation(perm):
"""Computes the inverse of permutations.
Each row of input should contain a permutation of the first integers.
"""
return permute_row_elements(
arange(perm.shape[-1], dtype=perm.dtype),
perm,
inverse=True)
#########################
# Linalg : Dot
#########################
#
# For BLAS-related ops see blas.py
#
# TODO: Dotinv should go here, Eigs, Svd, etc.
class Dot(Op):
"""
Computes the dot product of two variables. For two matrices, this is
equivalent to matrix multiplication. For two vectors, this is the inner
product.
Notes
-----
Matrix-matrix products are sometimes optimized to Dot22 or Gemm ops
(see tensor.blas).
Vector-vector products are sometimes optimized to Ger or CGer (see
tensor.blas).
Matrix-vector products are sometimes optimized to Gemv, CGemv (see
tensor.blas).
"""
__props__ = ()
# the rationale for Dot22 is related to getting GEMM Ops into the
# graph. See Dot22 in tensor.blas for details.
def make_node(self, *inputs):
inputs = list(map(as_tensor_variable, inputs))
if len(inputs) != 2:
raise TypeError(
'theano.tensor.Dot: 2 arguments required, %d given ' %
len(inputs))
if inputs[0].ndim not in (1, 2):
raise TypeError(
'theano.tensor.Dot: input 0 (0-indexed) must have ndim of '
'1 or 2, %d given. Consider calling theano.tensor.dot '
'instead.' % inputs[0].ndim)
if inputs[1].ndim not in (1, 2):
raise TypeError(
'theano.tensor.Dot: input 1 (0-indexed) must have ndim of '
'1 or 2, %d given. Consider calling theano.tensor.dot '
'instead.' % inputs[1].ndim)
i_broadcastables = [input.type.broadcastable for input in inputs]
bx, by = i_broadcastables
if len(by) == 2: # y is a matrix
bz = bx[:-1] + by[-1:]
elif len(by) == 1: # y is vector
bz = bx[:-1]
i_dtypes = [input.type.dtype for input in inputs]
outputs = [tensor(scal.upcast(*i_dtypes), bz)]
return Apply(self, inputs, outputs)
def perform(self, node, inp, out):
x, y = inp
z, = out
# the asarray is here because dot between two vectors
# gives a numpy float object but we need to return a 0d
# ndarray
z[0] = numpy.asarray(numpy.dot(x, y))
def grad(self, inp, grads):
x, y = inp
gz, = grads
xdim, ydim, gdim = x.type.ndim, y.type.ndim, gz.type.ndim
# grad is scalar, so x is vector and y is vector
if gdim == 0:
xgrad = gz * y
ygrad = gz * x
# x is vector, y is matrix, grad is vector
elif xdim == 1 and ydim == 2:
xgrad = dot(gz, y.T)
ygrad = outer(x.T, gz)
# x is matrix, y is vector, grad is vector
elif xdim == 2 and ydim == 1:
xgrad = outer(gz, y.T)
ygrad = dot(x.T, gz)
# x is matrix, y is matrix, grad is matrix
elif xdim == ydim == 2:
xgrad = dot(gz, y.T)
ygrad = dot(x.T, gz)
# If x or y contain broadcastable dimensions but only one of
# them know that a matching dimensions is broadcastable, the
# above code don't always return the right broadcast pattern.
# This cause problem down the road. See gh-1461.
if xgrad.broadcastable != x.broadcastable:
xgrad = patternbroadcast(xgrad, x.broadcastable)
if ygrad.broadcastable != y.broadcastable:
ygrad = patternbroadcast(ygrad, y.broadcastable)
rval = xgrad, ygrad
for elem in rval:
assert elem.dtype.find('float') != -1
return rval
def R_op(self, inputs, eval_points):
# R_op for a \dot b evaluted at c for a and d for b is
# simply c \dot b + a \dot d
assert len(inputs) == 2
assert len(eval_points) == 2
if eval_points[0] is None and eval_points[1] is None:
return [None]
debugger_available = config.compute_test_value != 'off'
if debugger_available:
try:
iv0 = gof.op.get_test_value(inputs[0])
except AttributeError:
gof.op.missing_test_message(
'first input passed to Dot.R_op has no test value')
debugger_available = False
try:
iv1 = gof.op.get_test_value(inputs[1])
except AttributeError:
gof.op.missing_test_message(
'second input passed to Dot.R_op has no test value')
debugger_available = False
if eval_points[0]:
try:
ev0 = gof.op.get_test_value(eval_points[0])
except AttributeError:
gof.op.missing_test_message(
'first eval point passed to Dot.R_op '
'has no test value')
debugger_available = False
if eval_points[1]:
try:
ev1 = gof.op.get_test_value(eval_points[1])
except AttributeError:
gof.op.missing_test_message(
'second eval point passed to Dot.R_op '
'has no test value')
debugger_available = False
if debugger_available:
input_values = [iv0, iv1]
eval_point_values = [ev0, ev1]
for i in xrange(2):
if eval_point_values[i] is not None and \
input_values[i].shape != eval_point_values[i].shape:
raise ValueError(
'input ' + str(i) + ' and eval_point ' + str(i) +
' to Dot.R_op should have the same shape, but '
'their shapes are %s and %s, respectively' % (
str(input_values[i].shape),
str(eval_point_values[i].shape)))
if eval_points[0]:
t1 = self(eval_points[0], inputs[1])
if eval_points[1]:
t2 = self(inputs[0], eval_points[1])
if eval_points[0] and eval_points[1]:
return [t1 + t2]
elif eval_points[0]:
return [t1]
else:
return [t2]
def infer_shape(self, node, shapes):
xshp, yshp = shapes
x, y = node.inputs
# vector / vector
if x.ndim == 1 and y.ndim == 1:
return [()]
# matrix / vector
if x.ndim == 2 and y.ndim == 1:
return [xshp[:-1]]
# vector / matrix
if x.ndim == 1 and y.ndim == 2:
return [yshp[-1:]]
# matrix / matrix
if x.ndim == 2 and y.ndim == 2:
return [xshp[:-1] + yshp[-1:]]
raise NotImplementedError()
def __str__(self):
return "dot"
_dot = Dot()
pprint.assign(_dot, printing.OperatorPrinter(printing.special['middle_dot'],
-1, 'left'))
def dot(a, b):
"""
Computes the dot product of two variables.
For two matrices, this is equivalent to matrix multiplication.
For two vectors, this is the inner product.
When one variable is a scalar, this is like elementwise multiplication.
For N dimensions, this is a sum product over the last axis
of the first array and the second-to-last axis of the second array:
dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
Note that this dot function does one of three things, in the following
sequence:
1. If either a or b is scalar, it returns the elementwise product
without calling the Theano Dot op.
2. If either a or b has more than 2 dimensions, it calls Theano's
tensordot function with appropriate axes. The tensordot function
expresses high-dimensional dot products in terms of 2D matrix
multiplications, so it may be possible to futherize optimize for
performance.
3. If both a and b have either 1 or 2 dimensions, it calls Theano's
Dot op on a and b.
Notes
-----
Matrix-matrix products are sometimes optimized to Dot22 or Gemm ops
(see tensor.blas).
Vector-vector products are sometimes optimized to Ger or CGer (see
tensor.blas).
Matrix-vector products are sometimes optimized to Gemv, CGemv (see
tensor.blas).
"""
a, b = as_tensor_variable(a), as_tensor_variable(b)
if a.ndim == 0 or b.ndim == 0:
return a * b
elif a.ndim > 2 or b.ndim > 2:
return tensordot(a, b, [[a.ndim - 1], [numpy.maximum(0, b.ndim - 2)]])
else:
return _dot(a, b)
#########################
# Linalg : TensorDot
#########################
def _tensordot_as_dot(a, b, axes, dot, batched):
"""
Reduces a tensor dot product to a matrix or vector dot product. Based
on code from Tijmen Tieleman's gnumpy
(http://www.cs.toronto.edu/~tijmen/gnumpy.html).
Please see the documentation of tensordot for the meaning of the a, b
and axes arguments.
:param dot: a function that accepts two symbolic variables and computes
the appropriate dot product (e.g. dot, batched_dot)
:type dot: function
:param batched: whether to treat the first axis of a and b as a batch
axis. If so, this axis will be preserved in the output,
allowing this function to be used also for batched
tensor dot products.
:type batched: boolean
:returns: a tensor with shape equal to the concatenation of a's shape
(less any dimensions that were summed over) and b's shape
(less the first dimension and any dimensions that were summed
over).
:rtype: symbolic tensor
"""
a, b = as_tensor_variable(a), as_tensor_variable(b)
if not numpy.isscalar(axes) and len(axes) != 2:
raise ValueError('Axes should be an integer or a '
'list/tuple of len 2 (%s was provided)'
% str(axes))
# if 'axes' is a number of axes to multiply and sum over (trailing axes
# of a, leading axes of b), we can just reshape and use dot.
elif numpy.isscalar(axes):
axes = int(axes)
for operand_name, operand in (("a", a), ("b", b)):
if axes > operand.ndim:
raise ValueError(
'axes can not be larger than the dimension of %s '
'(%s.ndim=%i, axes=%i)'
% (operand_name, operand_name, operand.ndim, axes))
if batched and axes == operand.ndim:
raise ValueError(
'axes to sum over must not include the batch axis '
'of %s (%s.ndim=%i, axes=%i)'
% (operand_name, operand_name, operand.ndim, axes))
batch_axes = 1 if batched else 0
a_outaxes = slice(0, a.ndim - axes)
b_outaxes = slice(batch_axes + axes, b.ndim)
outshape = concatenate([a.shape[a_outaxes], b.shape[b_outaxes]])
outbcast = a.broadcastable[a_outaxes] + b.broadcastable[b_outaxes]
outndim = len(outbcast)
a_shape = [1] * 2
b_shape = [1] * 2
# compute total size of summed axes
for i in xrange(0, axes):
a_shape[1] *= a.shape[-(i + 1)]
b_shape[0] *= b.shape[batch_axes + i]
# compute total size of other axes
for i in xrange(0, a.ndim - axes - batch_axes):
a_shape[0] *= a.shape[batch_axes + i]
for i in xrange(0, b.ndim - axes - batch_axes):
b_shape[1] *= b.shape[-(i + 1)]
if batched:
a_shape.insert(0, a.shape[0])
b_shape.insert(0, b.shape[0])
a_reshaped = a.reshape(a_shape)
b_reshaped = b.reshape(b_shape)
out_reshaped = dot(a_reshaped, b_reshaped)
out = out_reshaped.reshape(outshape, outndim)
# Make sure the broadcastable pattern of the result is correct,
# since some shape information can be lost in the reshapes.
return patternbroadcast(out, outbcast)
# if 'axes' is a list, transpose a and b such that the summed axes of a
# are last and the summed axes of b are first.
else:
axes = [_pack(axes_) for axes_ in axes]
if len(axes[0]) != len(axes[1]):
raise ValueError('Axes elements must have the same length.')
for i, (operand_name, operand) in enumerate((("a", a),
("b", b))):
if len(axes[i]) > operand.ndim:
raise ValueError(
'axes[%i] should be array_like with length less than '
'the dimensions of %s (%s.ndim=%i, len(axes[0])=%i).' %
(i, operand_name, operand_name, operand.ndim,
len(axes[i])))
if len(axes[i]) > 0 and numpy.max(axes[i]) >= operand.ndim:
raise ValueError(
'axes[%i] contains dimensions greater than or equal '
'to %s.ndim (%s.ndim=%i, max(axes[0])=%i).' %
(i, operand_name, operand_name, operand.ndim,
numpy.max(numpy.array(axes[i]))))
if batched and 0 in axes[i]:
raise ValueError(
'axes to sum over must not contain the batch axis '
'(axes[%i]=%s)' %
(i, axes[i]))
batch_axes = [0] if batched else []
other_axes = [[x for x in xrange(operand.ndim)
if x not in axes[i] and x not in batch_axes]
for i, operand in enumerate((a, b))]
a_shuffled = a.dimshuffle(batch_axes + other_axes[0] + axes[0])
b_shuffled = b.dimshuffle(batch_axes + axes[1] + other_axes[1])
# now that a and b are in the right order, recur with integer axes
return _tensordot_as_dot(a_shuffled, b_shuffled, len(axes[0]),
dot=dot, batched=batched)
def tensordot(a, b, axes=2):
"""
Compute a generalized dot product over provided axes.
Given two tensors a and b, tensordot computes a generalized dot product over
the provided axes. Theano's implementation reduces all expressions to
matrix or vector dot products and is based on code from Tijmen Tieleman's
gnumpy (http://www.cs.toronto.edu/~tijmen/gnumpy.html).
Parameters
----------
a: symbolic tensor
The first tensor variable.
b: symbolic tensor
The second tensor variable
axes: int or array-like of length 2
If an integer, the number of axes to sum over.
If an array, it must have two array elements containing the axes
to sum over in each tensor.
Note that the default value of 2 is not guaranteed to work
for all values of a and b, and an error will be raised if
that is the case. The reason for keeping the default is to
maintain the same signature as numpy's tensordot function
(and np.tensordot raises analogous errors for non-compatible
inputs).
If an integer i, it is converted to an array containing
the last i dimensions of the first tensor and the first
i dimensions of the second tensor:
axes = [list(range(a.ndim - i, b.ndim)), list(range(i))]
If an array, its two elements must contain compatible axes
of the two tensors. For example, [[1, 2], [2, 0]] means sum
over the 2nd and 3rd axes of a and the 3rd and 1st axes of b.
(Remember axes are zero-indexed!) The 2nd axis of a and the
3rd axis of b must have the same shape; the same is true for
the 3rd axis of a and the 1st axis of b.
Returns
-------
symbolic tensor
A tensor with shape equal to the concatenation of a's shape
(less any dimensions that were summed over) and b's shape
(less any dimensions that were summed over).
Examples
--------
It may be helpful to consider an example to see what tensordot does.
Theano's implementation is identical to NumPy's. Here a has shape (2, 3, 4)
and b has shape (5, 6, 4, 3). The axes to sum over are [[1, 2], [3, 2]] --
note that a.shape[1] == b.shape[3] and a.shape[2] == b.shape[2]; these axes
are compatible. The resulting tensor will have shape (2, 5, 6) -- the
dimensions that are not being summed:
>>> a = np.random.random((2,3,4))
>>> b = np.random.random((5,6,4,3))
#tensordot
>>> c = np.tensordot(a, b, [[1,2],[3,2]])
#loop replicating tensordot
>>> a0, a1, a2 = a.shape
>>> b0, b1, _, _ = b.shape
>>> cloop = np.zeros((a0,b0,b1))
#loop over non-summed indices -- these exist
#in the tensor product.
>>> for i in range(a0):
... for j in range(b0):
... for k in range(b1):
... #loop over summed indices -- these don't exist
... #in the tensor product.
... for l in range(a1):
... for m in range(a2):
... cloop[i,j,k] += a[i,l,m] * b[j,k,m,l]
>>> np.allclose(c, cloop)
true
This specific implementation avoids a loop by transposing a and b such that
the summed axes of a are last and the summed axes of b are first. The
resulting arrays are reshaped to 2 dimensions (or left as vectors, if
appropriate) and a matrix or vector dot product is taken. The result is
reshaped back to the required output dimensions.
In an extreme case, no axes may be specified. The resulting tensor
will have shape equal to the concatenation of the shapes of a and b:
>>> c = np.tensordot(a, b, 0)
>>> print(a.shape)
(2,3,4)
>>> print(b.shape)
(5,6,4,3)
>>> print(c.shape)
(2,3,4,5,6,4,3)
See the documentation of numpy.tensordot for more examples.
"""
return _tensordot_as_dot(a, b, axes, dot=dot, batched=False)
def outer(x, y):
"""Return vector-vector outer product.
If an input isn't a vector, we flatten it first.
"""
if x.ndim != 1:
x = x.flatten()
if y.ndim != 1:
y = y.flatten()
return dot(
x.dimshuffle(0, 'x'),
y.dimshuffle('x', 0))
def any(x, axis=None, keepdims=False):
out = elemwise.Any(axis)(x)
if keepdims:
out = makeKeepDims(x, out, axis)
return out
def all(x, axis=None, keepdims=False):
out = elemwise.All(axis)(x)
if keepdims:
out = makeKeepDims(x, out, axis)
return out
# Some NumPy version like 1.9.2 return a view for numpy.diagonal
x = numpy.zeros((4, 4))
numpy_diagonal_return_view = numpy.may_share_memory(numpy.diagonal(x), x)
del x
class Diagonal(Op):
"""Return specified diagonals.
Parameters
----------
x
A tensor variable with x.ndim >= 2.
Returns
-------
vector
A vector representing the diagonal elements.
"""
__props__ = ("offset", "axis1", "axis2")
def __init__(self, offset=0, axis1=0, axis2=1):
if numpy_diagonal_return_view:
self.view_map = {0: [0]}
self.offset = offset
self.axis1 = axis1
self.axis2 = axis2
def make_node(self, x):
x = as_tensor_variable(x)
assert x.ndim >= 2
return Apply(self, [x], [tensor(dtype=x.dtype,
broadcastable=[False] * (x.ndim - 1))])
def perform(self, node, inputs, outputs):
(x,) = inputs
(z,) = outputs
z[0] = x.diagonal(self.offset, self.axis1, self.axis2)
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
return [grad_not_implemented(self, 0, x)]
def infer_shape(self, node, shapes):
in_shape, = shapes
dim1 = in_shape[self.axis1]
dim2 = in_shape[self.axis2]
out_shape = [d for i, d in enumerate(in_shape)
if i not in (self.axis1, self.axis2)]
# The following logic is inspired by C code of PyArray_Diagonal().
offset = self.offset
if offset > 0:
diag_size = clip(dim2 - offset, 0, dim1)
elif offset < 0:
diag_size = clip(dim1 + offset, 0, dim2)
else:
diag_size = minimum(dim1, dim2)
out_shape.append(diag_size)
return [tuple(out_shape)]
def diagonal(a, offset=0, axis1=0, axis2=1):
if (offset, axis1, axis2) == (0, 0, 1):
return theano.tensor.nlinalg.extract_diag(a)
return Diagonal(offset, axis1, axis2)(a)
class Diag(Op):
__props__ = ()
def make_node(self, diag):
diag = as_tensor_variable(diag)
if diag.type.ndim != 1:
raise TypeError('data argument must be a vector', diag.type)
return Apply(self, [diag], [matrix(dtype=diag.dtype)])
def perform(self, node, inputs, outputs):
(z,) = outputs
z[0] = numpy.diag(inputs[0])
def grad(self, inputs, gout):
(gz,) = gout
return [diagonal(gz)]
def infer_shape(self, nodes, shapes):
return [(shapes[0][0],) * 2]
def diag(v, k=0):
if v.ndim == 1:
assert k == 0, "diagonals other than main are not implemented"
return Diag()(v)
elif v.ndim == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def stacklists(arg):
"""
Recursively stack lists of tensors to maintain similar structure.
This function can create a tensor from a shaped list of scalars:
Examples
--------
>>> from theano.tensor import stacklists, scalars, matrices
>>> from theano import function
>>> a, b, c, d = scalars('abcd')
>>> X = stacklists([[a, b], [c, d]])
>>> f = function([a, b, c, d], X)
>>> f(1, 2, 3, 4)
array([[ 1., 2.],
[ 3., 4.]], dtype=float32)
We can also stack arbitrarily shaped tensors. Here we stack matrices into
a 2 by 2 grid:
>>> from numpy import ones
>>> a, b, c, d = matrices('abcd')
>>> X = stacklists([[a, b], [c, d]])
>>> f = function([a, b, c, d], X)
>>> x = ones((4, 4), 'float32')
>>> f(x, x, x, x).shape
(2, 2, 4, 4)
"""
if isinstance(arg, (tuple, list)):
return stack(list(map(stacklists, arg)))
else:
return arg
def ptp(a, axis=None):
"""
Range of values (maximum - minimum) along an axis.
The name of the function comes from the acronym for peak to peak.
Parameters
----------
a
Input tensor.
axis
Axis along which to find the peaks. By default, flatten the array.
Returns
-------
array
A new array holding the result.
"""
a = as_tensor_variable(a)
out = max(a, axis) - min(a, axis)
return out
def power(x, y):
return x ** y
def swapaxes(y, axis1, axis2):
"swap axes of inputted tensor"
y = as_tensor_variable(y)
ndim = y.ndim
li = list(range(0, ndim))
li[axis1], li[axis2] = li[axis2], li[axis1]
return y.dimshuffle(li)
def choose(a, choices, out=None, mode='raise'):
"""
Construct an array from an index array and a set of arrays to choose from.
First of all, if confused or uncertain, definitely look at the Examples -
in its full generality, this function is less simple than it might seem
from the following code description (below ndi = numpy.lib.index_tricks):
np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)]).
But this omits some subtleties. Here is a fully general summary:
Given an ``index`` array (a) of integers and a sequence of n arrays
(choices), a and each choice array are first broadcast, as necessary,
to arrays of a common shape; calling these Ba and
Bchoices[i], i = 0,...,n-1 we have that, necessarily,
Ba.shape == Bchoices[i].shape for each i.
Then, a new array with shape Ba.shape is created as follows:
- if mode=raise (the default), then, first of all, each element of a
(and thus Ba) must be in the range [0, n-1]; now, suppose that
i (in that range) is the value at the (j0, j1, ..., jm) position in Ba -
then the value at the same position in the new array is the value in
Bchoices[i] at that same position;
- if mode=wrap, values in a (and thus Ba) may be any (signed) integer;
modular arithmetic is used to map integers outside the range [0, n-1]
back into that range; and then the new array is constructed as above;
- if mode=clip, values in a (and thus Ba) may be any (signed) integer;
negative integers are mapped to 0; values greater than n-1 are mapped
to n-1; and then the new array is constructed as above.
Parameters
----------
a : int array
This array must contain integers in [0, n-1], where n is the number of
choices, unless mode=wrap or mode=clip, in which cases any integers
are permissible.
choices : sequence of arrays
Choice arrays. a and all of the choices must be broadcastable to
the same shape. If choices is itself an array (not recommended),
then its outermost dimension (i.e., the one corresponding to
choices.shape[0]) is taken as defining the ``sequence``.
out : array, optional
If provided, the result will be inserted into this array.
It should be of the appropriate shape and dtype.
mode : {``raise`` (default), ``wrap``, ``clip``}, optional
Specifies how indices outside [0, n-1] will be treated:
``raise`` : an exception is raised
``wrap`` : value becomes value mod n
``clip`` : values < 0 are mapped to 0, values > n-1 are mapped to n-1
Returns
-------
merged_array - array
The merged result.
Raises
------
ValueError - shape mismatch
If a and each choice array are not all broadcastable to the same shape.
"""
# This is done to keep the same function signature then NumPy.
assert out is None
return Choose(mode)(a, choices)
class Choose(Op):
__props__ = ('mode',)
def __init__(self, mode):
assert mode in ("raise", "wrap", "clip")
self.mode = mode
def infer_shape(self, node, shapes):
if isinstance(node.inputs[1], TensorVariable):
# We have padded node.inputs[0] to the right number of
# dimensions for the output
l = []
for sh1, sh2, b1 in zip(shapes[0],
shapes[1][1:],
node.inputs[0].broadcastable):
if b1:
l.append(sh2)
else:
l.append(sh1)
return [tuple(l)]
else:
import theano.typed_list
assert isinstance(node.inputs[1],
theano.typed_list.TypedListVariable)
raise ShapeError("Case not implemented")
shape = shapes[0]
for i in xrange(len(shapes[0]) - 1):
shape[i] = shapes[1][i]
return [(shape)]
def make_node(self, a, choices):
# Import here as it isn't imported by default and we can't
# import at the top as it would cause circular import.
import theano.typed_list
a = as_tensor_variable(a)
if a.dtype not in theano.tensor.discrete_dtypes:
raise TypeError(
'choose first argument must have an [u]int* dtype. Got %s.'
% a.dtype)
if isinstance(choices, (tuple, list,
theano.typed_list.TypedListVariable)):
choice = theano.typed_list.make_list(choices)
choice_ndim = choice.ttype.ndim
choice_bcast = choice.ttype.broadcastable
else:
choice = as_tensor_variable(choices)
choice_ndim = choice.ndim - 1
choice_bcast = choice.broadcastable[1:]
out_ndim = numpy.max([a.ndim, choice_ndim])
# Make explicit all added broadcastable dimensions.
a = shape_padleft(a, out_ndim - a.ndim)
if len(choice_bcast) != out_ndim:
if isinstance(choice.type, TensorType):
choice = choice.dimshuffle(0,
*(('x',) * (out_ndim - choice_ndim) +
tuple(range(1, choice.ndim))))
choice_ndim = choice.ndim - 1
choice_bcast = choice.broadcastable[1:]
else:
raise NotImplementedError(
"We currently didn't implemented that case. "
"To make it work, explicitly add dimensions "
"of size one for dimensions that will be broadcasted")
bcast = [False] * out_ndim
for idx, (b1, b2) in enumerate(
zip(a.broadcastable,
(True,) * (out_ndim - choice_ndim) + choice_bcast)):
if b1 and b2:
bcast[idx] = True
o = TensorType(choice.dtype, bcast)
return Apply(self, [a, choice], [o()])
def perform(self, node, inputs, outputs):
(z,) = outputs
a = inputs[0]
choice = inputs[1]
# TODO reuse out?
z[0] = numpy.choose(a, choice, mode=self.mode)
class AllocEmpty(gof.Op):
"""Implement Alloc on the cpu, but without initializing memory."""
__props__ = ("dtype",)
# specify the type of the data
def __init__(self, dtype):
assert isinstance(dtype, str), dtype
self.dtype = dtype.lower()
def validate_shape(self, shape):
sh = [as_tensor_variable(s) for s in shape]
bcast = []
for s in sh:
if s.type.dtype[:3] not in ('int', 'uin'):
raise TypeError('Shape arguments must be integers', s)
# if s is constant 1, then we're broadcastable in that dim
try:
const_shp = get_scalar_constant_value(s)
except NotScalarConstantError:
const_shp = None
bcast.append(1 == const_shp)
otype = TensorType(dtype=self.dtype, broadcastable=bcast)
output = otype()
return sh, output
def make_node(self, *shape):
shape, output = self.validate_shape(shape)
output.tag.values_eq_approx = values_eq_approx_always_true
# The outut can contain nan/inf. output.type is a new
# instance, so we can do this only for that variable.
output.type.filter_checks_isfinite = False
# We can't reuse filter_checks_isfinite as by default it is
# False and it is set to true only in DebugMode.
# We can't set it in the type as other make_node can reuse the type.
# We can't set it in the variable as it isn't copied when we copy
# the variale. So we set it in the tag.
output.tag.nan_guard_mode_check = False
return Apply(self, shape, [output])
def debug_perform(self, node, inputs, out_):
self.perform(node, inputs, out_)
out_[0][0].fill(-123456789)
def perform(self, node, inputs, out_):
out, = out_
sh = tuple([int(i) for i in inputs])
if out[0] is None or out[0].shape != sh:
out[0] = numpy.empty(sh, dtype=self.dtype)
def c_code(self, node, name, inputs, out_, sub):
dtype = "NPY_" + self.dtype.upper()
out, = out_
fail = sub['fail']
shps = inputs
nd = len(shps)
str = "npy_intp dims[%(nd)s];\n" % locals()
for idx, sh in enumerate(shps):
str += "dims[%(idx)s] =" \
"((npy_intp)((dtype_%(sh)s*)" \
" PyArray_DATA(%(sh)s))[0]);\n" % locals()
# Validate that the output storage exists
str += "if(%(out)s==NULL\n" % locals()
for idx, sh in enumerate(shps):
str += "||PyArray_DIMS(%(out)s)[%(idx)s]!=dims[%(idx)s]" % locals()
str += """){
/* Reference received to invalid output variable.
Decrease received reference's ref count and allocate new
output variable */
Py_XDECREF(%(out)s);
%(out)s = (PyArrayObject*)PyArray_EMPTY(%(nd)s,
dims,
%(dtype)s,
0);
if (!%(out)s)
{
PyErr_SetString(PyExc_MemoryError, "alloc failed");
%(fail)s;
}
}
""" % locals()
return str
def infer_shape(self, node, input_shapes):
return [node.inputs]
def c_code_cache_version(self):
return (3,)
def do_constant_folding(self, node):
return False
def connection_pattern(self, node):
return [[False] for i in node.inputs]
def grad(self, inputs, grads):
return [DisconnectedType()() for i in inputs]
def R_op(self, inputs, eval_points):
return [zeros(inputs, self.dtype)]
| {
"repo_name": "JazzeYoung/VeryDeepAutoEncoder",
"path": "theano/tensor/basic.py",
"copies": "1",
"size": "211461",
"license": "bsd-3-clause",
"hash": -4765078037086965000,
"line_mean": 32.3482100615,
"line_max": 121,
"alpha_frac": 0.5681378599,
"autogenerated": false,
"ratio": 3.8549083948591742,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9921940464077397,
"avg_score": 0.00022115813635559427,
"num_lines": 6341
} |
"""A `Type` and `Op` classes to work with numpy.ndarrays symbolically."""
from six.moves import builtins
import sys
import warnings
import numpy
from six import integer_types
from six.moves import xrange
import numbers
import theano
from theano.compat import izip
from theano.configparser import config
from theano import gof
from theano.gof import Apply, Constant, Op, Variable
from theano.tensor import elemwise
from theano.tensor.var import (AsTensorError, TensorVariable,
TensorConstant,
_tensor_py_operators)
from theano.tensor.type import TensorType, values_eq_approx_always_true
from theano.tensor.type_other import NoneConst
from theano import scalar as scal
from functools import partial
from theano import compile, printing
from theano.printing import pprint, min_informative_str
# For history
from theano.compile import Rebroadcast, Shape, shape
# We use these exceptions as well.
import theano.scalar.sharedvar
from theano.gradient import grad_undefined
from theano.gradient import grad_not_implemented
from theano.gradient import DisconnectedType
# set up the external interface
from theano.tensor.elemwise import Elemwise, DimShuffle, CAReduce, Sum
import logging
_logger = logging.getLogger("theano.tensor.basic")
__docformat__ = "restructuredtext en"
# This is needed as we will hide it later
python_complex = complex
python_any = any
python_all = all
# Define common subsets of dtypes (as strings).
complex_dtypes = list(map(str, scal.complex_types))
continuous_dtypes = list(map(str, scal.continuous_types))
float_dtypes = list(map(str, scal.float_types))
discrete_dtypes = list(map(str, scal.discrete_types))
all_dtypes = list(map(str, scal.all_types))
int_dtypes = list(map(str, scal.int_types))
uint_dtypes = list(map(str, scal.uint_types))
class ShapeError(Exception):
"""Raised when the shape cannot be computed."""
pass
def check_equal_numpy(x, y):
"""
Return True iff x and y are equal.
Checks the dtype and shape if x and y are numpy.ndarray instances.
"""
if isinstance(x, numpy.ndarray) and isinstance(y, numpy.ndarray):
return (x.dtype == y.dtype and x.shape == y.shape and
numpy.any(abs(x - y) < 1e-10))
elif (isinstance(x, numpy.random.RandomState) and
isinstance(y, numpy.random.RandomState)):
return python_all(numpy.all(a == b) for a, b in
izip(x.__getstate__(), y.__getstate__()))
else:
return x == y
compile.register_checker(check_equal_numpy)
__oplist_constructor_list = []
"""List of functions to be listed as op constructors in the oplist
(`gen_oplist`, doc/oplist.txt)."""
def constructor(f):
"""Add `f` to :doc:`oplist`.
Make `f` appear as a constructor in the oplist (`gen_oplist`,
doc/oplist.txt).
"""
__oplist_constructor_list.append(f)
return f
def __oplist_tag(thing, tag):
tags = getattr(thing, '__oplist_tags', [])
tags.append(tag)
thing.__oplist_tags = tags
if 0:
# this starts to feel like we're enumerating all the types
# the one place where this is used we should also allow for sparse
# variables
# - JB 20100226
def as_cuda_or_tensor_variable(x, name=None, ndim=None):
"""
Do the same as_tensor_variable,
but do not transfer the value on the gpu.
"""
if hasattr(x, '_as_CudaNdarrayVariable'):
# TODO: pass name and ndim arguments
return x._as_CudaNdarrayVariable()
return as_tensor_variable(x, name, ndim)
def as_tensor_variable(x, name=None, ndim=None):
"""Return `x`, transformed into a `TensorType`.
This function is often used by `make_node` methods of `Op` subclasses
to turn ndarrays, numbers, `Scalar` instances, `Apply` instances and
`TensorType` instances into valid input list elements.
Parameters
----------
x : Apply instance, Variable instance, numpy.ndarray, or number
This thing will be transformed into a `Variable` in a sensible way. An
ndarray argument will not be copied, but a list of numbers will be
copied to make an ndarray.
name : str or None
If a new `Variable` instance is created, it will be named with this
string.
ndim : None or integer
Return a Variable with this many dimensions. Raise TypeError if it's
not possible.
Raises
------
ValueError
If an `Apply` with more than one output is fetched.
AsTensorError
If `x` cannot be converted to a TensorType Variable.
"""
if hasattr(x, '_as_TensorVariable'):
return x._as_TensorVariable() # TODO: pass name and ndim arguments
if isinstance(x, gof.Apply):
# use Apply's default output mechanism
if (x.op.default_output is None) and (len(x.outputs) != 1):
raise ValueError(
"It is ambiguous which output of a multi-output Op has"
" to be fetched.", x)
x = x.default_output()
if isinstance(x, Variable):
if isinstance(x.type, scal.Scalar):
x = tensor_from_scalar(x)
if not isinstance(x.type, TensorType):
raise AsTensorError(
"Variable type field must be a TensorType.", x, x.type)
if ndim is None:
return x
else:
if (x.type.ndim > ndim):
# strip off leading broadcastable dimensions
first_non_broadcastable = [idx for idx in xrange(x.ndim)
if not x.broadcastable[idx]][0]
x = x.dimshuffle(list(range(x.ndim))[first_non_broadcastable:])
if x.ndim > ndim:
raise ValueError(
'TensorType could not be cast to have %i dimensions'
% ndim, x.type
)
return x
elif (x.type.ndim < ndim):
return shape_padleft(x, n_ones=(ndim - x.type.ndim))
else:
return x
if isinstance(x, (tuple, list)) and python_any(isinstance(xi, Variable)
for xi in x):
try:
return stack(x)
except (TypeError, ValueError):
pass
if isinstance(x, bool):
raise AsTensorError(
"Cannot cast True or False as a tensor variable. Please use 1 or "
"0. This error might be caused by using the == operator on "
"Variables. v == w does not do what you think it does, "
"use theano.tensor.eq(v, w) instead.")
try:
return constant(x, name=name, ndim=ndim)
except TypeError:
try:
str_x = str(x)
except Exception:
str_x = repr(x)
raise AsTensorError("Cannot convert %s to TensorType" % str_x, type(x))
# this has a different name, because _as_tensor_variable is the
# function which ops use to upcast their arguments... this
# internal-use function is a good place to put debugging stuff, better
# than the global astensor.
_as_tensor_variable = as_tensor_variable
as_tensor = as_tensor_variable
class NumpyAutocaster(object):
"""
This class is used to cast python ints and floats to numpy arrays.
The behavior when called on scalar `x` depends on `config.cast_policy`:
- 'numpy' will simply use the same type as found by `numpy.asarray(x)`.
- 'numpy+floatX' will do the same, except it will use float32 instead
of float64 if `x` is a Python float and `config.floatX` is set to
'float32' (note that if `x` is a numpy scalar whose data type is
float64, it is not modified since we assume the user is purposedly
using float64).
- 'custom' lets one define a tuple of data types such that:
- if `x` is already a numpy scalar and its data type is in this
tuple, then it is returned unchanged;
- otherwise, the first data type in this tuple that can represent
`x` without loss of precision will be used, unless `x` is a float
and 'float32' is in the tuple (in which case `x` is cast as a
float32);
- if no data type can represent `x` without loss of precision, then
the last data type in the tuple will be used.
Parameters
----------
dtypes: tuple of strings
The ordered list of preferred data types (only used when
`config.cast_policy` is set to 'custom', see the `NumpyAutocaster`
help for details).
"""
def __init__(self, dtypes):
self.dtypes = tuple(dtypes)
def __call__(self, x):
# Make sure we only deal with scalars.
assert (isinstance(x, integer_types) or
isinstance(x, float) or
(isinstance(x, numpy.ndarray) and x.ndim == 0))
if config.cast_policy == 'numpy':
return numpy.asarray(x)
elif config.cast_policy == 'numpy+floatX':
rval = numpy.asarray(x)
if ((not hasattr(x, 'dtype') and
rval.dtype in ('float64', 'float32') and
rval.dtype != config.floatX)):
rval = theano._asarray(rval, dtype=config.floatX)
return rval
# The following is the original code, corresponding to the 'custom'
# option for `config.cast_policy`.
assert config.cast_policy == 'custom'
try:
# Pass through numpy scalars, since they are already typed on
# purpose typically.
if str(x.dtype) in self.dtypes:
# No need to cast `x` into a new dtype. Note that we still
# need to convert it into an array, because it may not be
# one already (e.g. if x == numpy.float64(1.1)).
return numpy.asarray(x)
except AttributeError:
# Means `x` has no 'dtype' attribute.
pass
# unsafe downcast of float64 variables when config.floatX == 'float32'
# recall: float is numpy.float
if ((isinstance(x, float) and
config.floatX in self.dtypes and
config.floatX != 'float64')):
return theano._asarray(x, dtype=config.floatX)
# Don't autocast to float16 unless config.floatX is float16
try_dtypes = [d for d in self.dtypes
if config.floatX == 'float16' or d != 'float16']
for dtype in try_dtypes:
x_ = theano._asarray(x, dtype=dtype)
if numpy.all(x == x_):
break
# returns either an exact x_==x, or the last cast x_
return x_
autocast_int = NumpyAutocaster(('int8', 'int16', 'int32', 'int64'))
autocast_float = NumpyAutocaster(('float16', 'float32', 'float64'))
# autocast_float dtypes might be manipulated in tensor.__init__
#
# Note: it's a bit weird for a compiler to automatically downcast
# literals like this, and it might have implications for efficiency
# when mixing types. For example when you add 1.0 + dmatrix(), the
# 1.0 could be converted to float32, and require upcasting for the +
# operation at every position in the dmatrix. using
# theano._asarray(1.0, dtype='float64') will circumvent this
# autocasting, and in future, our ops might be smarter about factoring
# out upcasts. The advantage of this mechanism is to combine it with
# floatX so that 1.0 + xmatrix() will always have the same type as the
# xmatrix().
#
class autocast_float_as(object):
"""
Temporarily adjust autocasting behavior.
This class makes it possible to temporarily and locally adjust autocasting
behavior when `config.cast_policy` is set to 'custom'.
If `config.cast_policy` is not 'custom', an exception is raised.
This class might be convenient in some code, but it definitely
helps to test the autocasting mechanism.
Examples
--------
>>> with autocast_float_as('float32'):
... assert (fvector() + 1.1).dtype == 'float32' # temporary downcasting
>>> assert (fvector() + 1.1).dtype == 'float64' # back to default behaviour
"""
def __init__(self, *dtypes):
self.dtypes = dtypes
assert config.cast_policy == 'custom'
def __enter__(self):
assert config.cast_policy == 'custom'
self.old_dtypes = autocast_float.dtypes
autocast_float.dtypes = self.dtypes
def __exit__(self, *args):
assert config.cast_policy == 'custom'
autocast_float.dtypes = self.old_dtypes
def constant_or_value(x, rtype, name=None, ndim=None, dtype=None):
"""Return a symbolic `Constant` with value `x`.
Raises
------
TypeError
`x` could not be converted to a numpy.ndarray.
ValueError
`x` could not be expanded to have ndim dimensions.
"""
if dtype is not None:
# in this case, the semantics are that the caller is forcing the dtype
x_ = theano._asarray(x, dtype=dtype)
else:
# In this case, this function should infer the dtype according to the
# autocasting rules. See autocasting above.
x_ = None
if rtype is TensorConstant and isinstance(x, integer_types):
try:
x_ = autocast_int(x)
except OverflowError:
# This is to imitate numpy behavior which tries to fit
# bigger numbers into a uint64.
x_ = theano._asarray(x, dtype='uint64')
elif rtype is TensorConstant and isinstance(x, float):
x_ = autocast_float(x)
elif isinstance(x, numpy.ndarray):
x_ = x
# Currently we do not have a bool dtype in Theano.
# So we upcast it to uint8 to avoid breaking our interface for
# constant.
if x.dtype == 'bool':
x_ = numpy.asarray(x_, dtype='uint8')
else:
# Here x is probably a list or a tuple. If it contains a long,
# we will behave like the current NumPy version: 1.7 and below,
# it will only work if the long fits in int64. For NumPy 1.7.1+,
# it will work if the long fits in int64 or uint64.
x_ = numpy.asarray(x)
assert type(x_) in [numpy.ndarray, numpy.memmap]
bcastable = [d == 1 for d in x_.shape]
if ndim is not None:
if len(bcastable) < ndim:
bcastable = [True] * (ndim - len(bcastable)) + bcastable
elif len(bcastable) > ndim:
# TODO: strip off dimensions of size 1
raise ValueError(
'ndarray could not be cast to constant with %i dimensions' %
ndim)
assert len(bcastable) == ndim
try:
if rtype is TensorConstant:
rval = rtype(
TensorType(dtype=x_.dtype, broadcastable=bcastable),
x_.copy(),
name=name)
return rval
else:
# leave the shape out of the type
return rtype(TensorType(dtype=x_.dtype, broadcastable=bcastable),
x_, name=name)
except Exception:
raise TypeError("Could not convert %s to TensorType" % x, type(x))
def constant(x, name=None, ndim=None, dtype=None):
ret = constant_or_value(x, rtype=TensorConstant, name=name, ndim=ndim,
dtype=dtype)
# We create a small cache of frequently used constant.
# This speed up the Merge optimization for big graph.
# We want to cache all scalar to don't merge as frequently constants.
# But we don't want to cache too much stuff
# So we cache integer with dtype [u]int and float where the value is
# between -10 and 10
# We want to cache all broadcast pattern for scalar.
if not constant.enable:
return ret
sig = ret.signature()
if (sig not in constant_cache and ret.data.size == 1 and
(-10) <= ret.data <= 10 and
(ret.dtype in int_dtypes or ret.dtype in uint_dtypes or
(ret.dtype in float_dtypes and int(ret.data) == ret.data))):
constant_cache[sig] = ret
# This is needed to raise a good error to the user.
ret.cached = True
return constant_cache.get(sig, ret)
constant.enable = True
constant_cache = {}
def _obj_is_wrappable_as_tensor(x):
try:
constant(x)
return True
except TypeError:
return False
if int(config.tensor.cmp_sloppy) > 1:
# This config variable is a quick-and-dirty way to get low-precision
# comparisons. For a more precise setting of these tolerances set
# them explicitly in your user code by assigning, for example,
# "theano.tensor.basic.float32_atol = ..."
# When config.tensor.cmp_sloppy>1 we are even more sloppy. This is
# useful to test the GPU as they don't use extended precision and
# this cause some difference bigger then the normal sloppy.
float16_atol = 5e-3
float16_rtol = 1e-2
float32_atol = 5e-4
float32_rtol = 1e-3
float64_rtol = 1e-4
float64_atol = 1e-3
elif int(config.tensor.cmp_sloppy):
float16_atol = 1e-3
float16_rtol = 5e-3
float32_atol = 1e-4
float32_rtol = 1e-3
float64_rtol = 1e-4
float64_atol = 1e-3
else:
# If you change those value in test don't forget to put them back
# when the test end. Don't forget the case when the test fail.
float16_atol = 5e-4
float16_rtol = 5e-4
float32_atol = 1e-5
float32_rtol = 1e-5
# defaults in numpy.allclose
# Don't be more strict then numpy rtol
# It cause useless error.
float64_rtol = 1.0000000000000001e-05
float64_atol = 1e-8
def _get_atol_rtol(a, b):
tiny = ('float16',)
narrow = ('float32', 'complex64')
if (str(a.dtype) in tiny) or (str(b.dtype) in tiny):
atol = float16_atol
rtol = float16_rtol
elif (str(a.dtype) in narrow) or (str(b.dtype) in narrow):
atol = float32_atol
rtol = float32_rtol
else:
atol = float64_atol
rtol = float64_rtol
return atol, rtol
def _allclose(a, b, rtol=None, atol=None):
a = numpy.asarray(a)
b = numpy.asarray(b)
atol_, rtol_ = _get_atol_rtol(a, b)
if rtol is not None:
rtol_ = rtol
if atol is not None:
atol_ = atol
# Work around bug in Numpy, see
# http://projects.scipy.org/numpy/ticket/1684
if str(b.dtype) in int_dtypes and (numpy.absolute(b) < 0).any():
b = theano._asarray(b, dtype='float64')
return numpy.allclose(a, b, atol=atol_, rtol=rtol_)
class NotScalarConstantError(Exception):
"""
Raised by get_scalar_constant_value if called on something that is
not a scalar constant.
"""
class EmptyConstantError(NotScalarConstantError):
"""
Raised by get_scalar_const_value if called on something that is a
zero dimensional constant.
"""
def numpy_scalar(data):
""" Return a scalar stored in a numpy ndarray.
Raises
------
NotScalarConstantError
If the numpy ndarray is not a scalar.
"""
# handle case where data is numpy.array([])
if (data.ndim > 0 and
(len(data.shape) == 0 or
__builtins__['max'](data.shape) == 0)):
assert numpy.all(numpy.array([]) == data)
raise EmptyConstantError()
try:
numpy.complex(data) # works for all numeric scalars
return data
except Exception:
raise NotScalarConstantError(
'v.data is non-numeric, non-scalar, or has more than one'
' unique value', data)
get_scalar_constant_value_elemwises = (
scal.Cast, scal.Switch,
scal.NEQ, scal.EQ,
scal.LT, scal.GT, scal.LE, scal.GE,
scal.Sub, scal.Add, scal.Mod, scal.Mul,
scal.IntDiv, scal.TrueDiv, scal.Minimum, scal.Maximum)
def get_scalar_constant_value(orig_v, elemwise=True,
only_process_constants=False):
"""Return the constant scalar(0-D) value underlying variable `v`.
If `v` is the output of dimshuffles, fills, allocs, rebroadcasts,
cast, OutputGuard, DeepCopyOp, ScalarFromTensor, ScalarOp, Elemwise
and some pattern with Subtensor, this function digs through them.
If `v` is not some view of constant scalar data, then raise a
NotScalarConstantError.
Parameters
----------
elemwise : bool
If False, we won't try to go into elemwise. So this call is faster.
only_process_constants : bool
If True, we only attempt to obtain the value of `orig_v` if it's
directly constant and don't try to dig through dimshuffles, fills,
allocs, and other to figure out its value.
Notes
-----
There may be another function similar to this one in the code,
but I'm not sure where it is.
"""
v = orig_v
while True:
if v is None:
# None is not a scalar (and many uses of this function seem
# to depend on passing it None)
raise NotScalarConstantError()
if isinstance(v, (numpy.integer, integer_types, float)):
return numpy.asarray(v)
if isinstance(v, numpy.ndarray):
return numpy_scalar(v)
if isinstance(v, Constant):
if getattr(v.tag, 'unique_value', None) is not None:
data = v.tag.unique_value
else:
data = v.data
return numpy_scalar(data)
if not only_process_constants and getattr(v, 'owner', None):
if isinstance(v.owner.op, (Alloc, DimShuffle, Rebroadcast,
compile.ops.OutputGuard,
compile.DeepCopyOp)):
v = v.owner.inputs[0]
continue
elif isinstance(v.owner.op, theano.compile.ops.Shape_i):
if isinstance(v.owner.inputs[0], Constant):
return numpy.asarray(
v.owner.inputs[0].data.shape[v.owner.op.i])
# Don't act as the constant_folding optimization here as this
# fct is used too early in the optimization phase. This would
# mess with the stabilization optimization and be too slow.
# We put all the scalar Ops used by get_canonical_form_slice()
# to allow it to determine the broadcast pattern correctly.
elif isinstance(v.owner.op, (ScalarFromTensor, TensorFromScalar)):
return get_scalar_constant_value(v.owner.inputs[0])
elif isinstance(v.owner.op, scal.ScalarOp):
if isinstance(v.owner.op, scal.Second):
# We don't need both input to be constant for second
shp, val = v.owner.inputs
v = val
continue
if isinstance(v.owner.op, get_scalar_constant_value_elemwises):
const = [get_scalar_constant_value(i)
for i in v.owner.inputs]
ret = [[None]]
v.owner.op.perform(v.owner, const, ret)
return ret[0][0]
elif elemwise and isinstance(v.owner.op, Elemwise):
if isinstance(v.owner.op.scalar_op, scal.Second):
# We don't need both input to be constant for second
shp, val = v.owner.inputs
v = val
continue
elif isinstance(v.owner.op.scalar_op,
get_scalar_constant_value_elemwises):
const = [get_scalar_constant_value(i)
for i in v.owner.inputs]
ret = [[None]]
v.owner.op.perform(v.owner, const, ret)
return ret[0][0]
elif (isinstance(v.owner.op, theano.tensor.subtensor.Subtensor) and
v.ndim == 0):
if isinstance(v.owner.inputs[0], TensorConstant):
cdata = tuple(v.owner.op.get_constant_idx(v.owner.inputs))
try:
return v.owner.inputs[0].data.__getitem__(cdata)
except IndexError:
raise IndexError(
str(tuple(v.owner.op.idx_list)) +
" is not a valid index into " +
str(v.owner.inputs[0].data))
# The index list 'idx_list' should have length the same
# shape as the input.
# TODO: implement the case where we take a scalar in a matrix
assert len(v.owner.op.idx_list) == v.owner.inputs[0].ndim
# Needed to make better graph in this test in
# theano/tensor/tests/test_sharedvar.py:
# test_shared_options.test_specify_shape_partial
if ((v.owner.inputs[0].owner and
isinstance(v.owner.inputs[0].owner.op, Join) and
len(v.owner.op.idx_list) == 1)):
# Ensure the Join is joining only scalar variables (so that
# the constant value can be found at the same index as the
# one used in the sub-tensor).
if python_all(var.ndim == 0 for var in
v.owner.inputs[0].owner.inputs[1:]):
idx = v.owner.op.idx_list[0]
if isinstance(idx, gof.Type):
idx = get_scalar_constant_value(v.owner.inputs[1])
# Note the '+ 1' is because the first argument to Join
# is the axis.
ret = v.owner.inputs[0].owner.inputs[idx + 1]
ret = get_scalar_constant_value(ret)
# join can cast implicitly its input in some case.
return theano._asarray(ret, dtype=v.type.dtype)
if python_all(var.ndim == 1 for var in
v.owner.inputs[0].owner.inputs[1:]):
idx = v.owner.op.idx_list[0]
if isinstance(idx, gof.Type):
idx = get_scalar_constant_value(v.owner.inputs[1])
try:
# TODO: assert joined axis is 0.
length = 0
for joined in v.owner.inputs[0].owner.inputs[1:]:
ll = get_vector_length(joined)
if idx < length + ll:
return get_scalar_constant_value(
joined[idx - length])
length += ll
except TypeError:
pass
except ValueError:
pass
elif (v.owner.inputs[0].owner and
isinstance(v.owner.inputs[0].owner.op,
theano.tensor.opt.MakeVector) and
# MakeVector normally accept only scalar as input.
# We put this check in case there is change in the future
python_all(var.ndim == 0 for var in
v.owner.inputs[0].owner.inputs) and
len(v.owner.op.idx_list) == 1):
idx = v.owner.op.idx_list[0]
if isinstance(idx, gof.Type):
idx = get_scalar_constant_value(v.owner.inputs[1])
# Python 2.4 does not support indexing with numpy.integer
# So we cast it.
idx = int(idx)
ret = v.owner.inputs[0].owner.inputs[idx]
ret = get_scalar_constant_value(ret)
# MakeVector can cast implicitly its input in some case.
return theano._asarray(ret, dtype=v.type.dtype)
# This is needed when we take the grad as the Shape op
# are not already changed into MakeVector
owner = v.owner
leftmost_parent = owner.inputs[0]
if (leftmost_parent.owner and
isinstance(leftmost_parent.owner.op,
theano.tensor.Shape)):
op = owner.op
idx_list = op.idx_list
idx = idx_list[0]
if isinstance(idx, gof.Type):
idx = get_scalar_constant_value(owner.inputs[1])
grandparent = leftmost_parent.owner.inputs[0]
gp_broadcastable = grandparent.type.broadcastable
ndim = grandparent.type.ndim
if grandparent.owner and isinstance(grandparent.owner.op,
Rebroadcast):
ggp_broadcastable = grandparent.owner.inputs[0].broadcastable
l = [b1 or b2 for b1, b2 in zip(ggp_broadcastable,
gp_broadcastable)]
gp_broadcastable = tuple(l)
assert ndim == len(gp_broadcastable)
if not (idx < len(gp_broadcastable)):
msg = ("get_scalar_constant_value detected " +
"deterministic IndexError: x.shape[%d] " +
"when x.ndim=%d.") % (idx, ndim)
if config.exception_verbosity == 'high':
msg += ' x=%s' % min_informative_str(v)
else:
msg += ' x=%s' % str(v)
raise ValueError(msg)
if gp_broadcastable[idx]:
return numpy.asarray(1)
raise NotScalarConstantError(v)
# Easy constructors
def tensor(*args, **kwargs):
name = kwargs.pop('name', None)
return TensorType(*args, **kwargs)(name=name)
def _multi(*fns):
def f2(f, *names):
if names and isinstance(names[0], integer_types):
if names == 1:
return f()
else:
return [f() for i in xrange(names[0])]
if isinstance(names, tuple):
if len(names) == 1:
names = names[0]
if len(names) == 1:
return f(names)
else:
return [f(name) for name in names]
if len(fns) == 1:
return partial(f2, fns)
else:
return [partial(f2, f) for f in fns]
cscalar = TensorType('complex64', ())
zscalar = TensorType('complex128', ())
fscalar = TensorType('float32', ())
dscalar = TensorType('float64', ())
bscalar = TensorType('int8', ())
wscalar = TensorType('int16', ())
iscalar = TensorType('int32', ())
lscalar = TensorType('int64', ())
def scalar(name=None, dtype=None):
"""Return a symbolic scalar variable.
Parameters
----------
dtype: numeric
None means to use theano.config.floatX.
name
A name to attach to this variable.
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, ())
return type(name)
scalars, fscalars, dscalars, iscalars, lscalars = _multi(
scalar, fscalar, dscalar, iscalar, lscalar)
int_types = bscalar, wscalar, iscalar, lscalar
float_types = fscalar, dscalar
complex_types = cscalar, zscalar
int_scalar_types = int_types
float_scalar_types = float_types
complex_scalar_types = complex_types
cvector = TensorType('complex64', (False, ))
zvector = TensorType('complex128', (False, ))
fvector = TensorType('float32', (False, ))
dvector = TensorType('float64', (False, ))
bvector = TensorType('int8', (False,))
wvector = TensorType('int16', (False,))
ivector = TensorType('int32', (False, ))
lvector = TensorType('int64', (False, ))
def vector(name=None, dtype=None):
"""Return a symbolic vector variable.
Parameters
----------
dtype: numeric
None means to use theano.config.floatX.
name
A name to attach to this variable
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, (False, ))
return type(name)
vectors, fvectors, dvectors, ivectors, lvectors = _multi(
vector, fvector, dvector, ivector, lvector)
int_vector_types = bvector, wvector, ivector, lvector
float_vector_types = fvector, dvector
complex_vector_types = cvector, zvector
cmatrix = TensorType('complex64', (False, False))
zmatrix = TensorType('complex128', (False, False))
fmatrix = TensorType('float32', (False, False))
dmatrix = TensorType('float64', (False, False))
bmatrix = TensorType('int8', (False, False))
wmatrix = TensorType('int16', (False, False))
imatrix = TensorType('int32', (False, False))
lmatrix = TensorType('int64', (False, False))
def matrix(name=None, dtype=None):
"""Return a symbolic matrix variable.
Parameters
----------
dtype: numeric
None means to use theano.config.floatX.
name
A name to attach to this variable.
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, (False, False))
return type(name)
matrices, fmatrices, dmatrices, imatrices, lmatrices = _multi(
matrix, fmatrix, dmatrix, imatrix, lmatrix)
int_matrix_types = bmatrix, wmatrix, imatrix, lmatrix
float_matrix_types = fmatrix, dmatrix
complex_matrix_types = cmatrix, zmatrix
crow = TensorType('complex64', (True, False))
zrow = TensorType('complex128', (True, False))
frow = TensorType('float32', (True, False))
drow = TensorType('float64', (True, False))
brow = TensorType('int8', (True, False))
wrow = TensorType('int16', (True, False))
irow = TensorType('int32', (True, False))
lrow = TensorType('int64', (True, False))
def row(name=None, dtype=None):
"""Return a symbolic row variable (ndim=2, broadcastable=[True,False]).
Parameters
----------
dtype: numeric type
None means to use theano.config.floatX.
name
A name to attach to this variable.
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, (True, False))
return type(name)
rows, frows, drows, irows, lrows = _multi(row, frow, drow, irow, lrow)
ccol = TensorType('complex64', (False, True))
zcol = TensorType('complex128', (False, True))
fcol = TensorType('float32', (False, True))
dcol = TensorType('float64', (False, True))
bcol = TensorType('int8', (False, True))
wcol = TensorType('int16', (False, True))
icol = TensorType('int32', (False, True))
lcol = TensorType('int64', (False, True))
def col(name=None, dtype=None):
"""Return a symbolic column variable (ndim=2, broadcastable=[False,True]).
Parameters
----------
dtype : numeric
None means to use theano.config.floatX.
name
A name to attach to this variable.
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, (False, True))
return type(name)
cols, fcols, dcols, icols, lcols = _multi(col, fcol, dcol, icol, lcol)
ctensor3 = TensorType('complex64', ((False,) * 3))
ztensor3 = TensorType('complex128', ((False,) * 3))
ftensor3 = TensorType('float32', ((False,) * 3))
dtensor3 = TensorType('float64', ((False,) * 3))
btensor3 = TensorType('int8', ((False,) * 3))
wtensor3 = TensorType('int16', ((False,) * 3))
itensor3 = TensorType('int32', ((False,) * 3))
ltensor3 = TensorType('int64', ((False,) * 3))
def tensor3(name=None, dtype=None):
"""Return a symbolic 3-D variable.
Parameters
----------
dtype: numeric type
None means to use theano.config.floatX.
name
A name to attach to this variable.
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, (False, False, False))
return type(name)
tensor3s, ftensor3s, dtensor3s, itensor3s, ltensor3s = _multi(
tensor3, ftensor3, dtensor3, itensor3, ltensor3)
ctensor4 = TensorType('complex64', ((False,) * 4))
ztensor4 = TensorType('complex128', ((False,) * 4))
ftensor4 = TensorType('float32', ((False,) * 4))
dtensor4 = TensorType('float64', ((False,) * 4))
btensor4 = TensorType('int8', ((False,) * 4))
wtensor4 = TensorType('int16', ((False,) * 4))
itensor4 = TensorType('int32', ((False,) * 4))
ltensor4 = TensorType('int64', ((False,) * 4))
def tensor4(name=None, dtype=None):
"""Return a symbolic 4-D variable.
Parameters
----------
dtype: numeric type
None means to use theano.config.floatX.
name
A name to attach to this variable.
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, (False, False, False, False))
return type(name)
tensor4s, ftensor4s, dtensor4s, itensor4s, ltensor4s = _multi(
tensor4, ftensor4, dtensor4, itensor4, ltensor4)
Tensor = TensorType
# This bizarre push-import avoids a circular dependency.
elemwise.as_tensor_variable = as_tensor_variable
elemwise.TensorType = TensorType
elemwise.TensorVariable = TensorVariable
elemwise.TensorConstant = TensorConstant
#########################
# Utilities
#########################
def _scal_elemwise_with_nfunc(nfunc, nin, nout):
"""
Replace a symbol definition with an elementwise version of the
corresponding scalar Op. If it is not None, the nfunc argument
should be a string such that getattr(numpy, nfunc) implements
a vectorized version of the elemwise operation. nin is the number
of inputs expected by that function, and nout is the number of
**destination** inputs it takes. That is, the function should
take nin+nout inputs. nout == 0 means that the numpy function
does not take a numpy array argument to put its result in.
"""
def construct(symbol):
symbolname = symbol.__name__
inplace = symbolname.endswith('_inplace')
if inplace:
msg = "inplace"
else:
msg = "no_inplace"
n = "Elemwise{%s,%s}" % (symbolname, msg)
if inplace:
scalar_op = getattr(scal, symbolname[:-len('_inplace')])
inplace_scalar_op = scalar_op.__class__(scal.transfer_type(0))
rval = elemwise.Elemwise(inplace_scalar_op, {0: 0}, name=n,
nfunc_spec=(nfunc and (nfunc, nin, nout)))
else:
scalar_op = getattr(scal, symbolname)
rval = elemwise.Elemwise(scalar_op, name=n,
nfunc_spec=(nfunc and (nfunc, nin, nout)))
if getattr(symbol, '__doc__', False):
rval.__doc__ = symbol.__doc__ + '\n' + rval.__doc__
# for the meaning of this see the ./epydoc script
# it makes epydoc display rval as if it were a function, not an object
rval.__epydoc_asRoutine = symbol
rval.__module__ = 'tensor'
pprint.assign(rval, printing.FunctionPrinter(symbolname))
return rval
return construct
_scal_elemwise = _scal_elemwise_with_nfunc(None, None, None)
def _pack(x):
"""
Convert x to a list if it is an iterable, otherwise wrap it in a list.
"""
try:
return list(x)
except TypeError:
return [x]
#########################
# Casting Operations
#########################
class TensorFromScalar(Op):
__props__ = ()
def make_node(self, s):
assert isinstance(s.type, scal.Scalar)
return Apply(self,
[s],
[tensor(dtype=s.type.dtype,
broadcastable=())])
def perform(self, node, inp, out_):
s, = inp
out, = out_
out[0] = numpy.asarray(s)
def infer_shape(self, node, in_shapes):
return [()]
def grad(self, inp, grads):
s, = inp
dt, = grads
if s.type.dtype in float_dtypes:
assert dt.type.dtype in float_dtypes
return [scalar_from_tensor(dt)]
# If the input dtype is an integer, then so is the output dtype,
# and the "zero" gradient can be represented in that int dtype.
# Currently, theano.grad insists that the dtype of the returned
# gradient has a float dtype, so we use floatX.
if s.type.dtype in discrete_dtypes:
return [s.zeros_like().astype(theano.config.floatX)]
raise NotImplementedError("grad not implemented for complex dtypes")
tensor_from_scalar = TensorFromScalar()
class ScalarFromTensor(Op):
__props__ = ()
def make_node(self, t):
assert isinstance(t.type, TensorType)
assert t.type.broadcastable == ()
return Apply(self,
[t],
[scal.get_scalar_type(dtype=t.type.dtype).make_variable()]
)
def perform(self, node, inp, out_):
s, = inp
out, = out_
out[0] = s.flatten()[0]
def infer_shape(self, node, in_shapes):
return [()]
def grad(self, inp, grads):
s, = inp
dt, = grads
return [tensor_from_scalar(dt)]
def R_op(self, inputs, eval_points):
if None in eval_points:
return [None]
return self.make_node(*eval_points).outputs
def c_code(self, node, name, inputs, outputs, sub):
x, = inputs
z, = outputs
fail = sub['fail']
return """
%(z)s = ((dtype_%(x)s*)(PyArray_DATA(%(x)s)))[0];
""" % locals()
def c_code_cache_version(self):
return (1,)
scalar_from_tensor = ScalarFromTensor()
# to be removed as we get the epydoc routine-documenting thing going
# -JB 20080924
def _conversion(real_value, name):
__oplist_tag(real_value, 'casting')
real_value.__module__ = 'tensor.basic'
pprint.assign(real_value, printing.FunctionPrinter(name))
return real_value
# These _conver_to_<type> functions have leading underscores to indicate that
# they should not be called directly. They do not perform sanity checks about
# what types you are casting to what. That logic is implemented by the
# `cast()` function below.
_convert_to_int8 = _conversion(
elemwise.Elemwise(scal.convert_to_int8), 'int8')
"""Cast to 8-bit integer"""
_convert_to_int16 = _conversion(
elemwise.Elemwise(scal.convert_to_int16), 'int16')
"""Cast to 16-bit integer"""
_convert_to_int32 = _conversion(
elemwise.Elemwise(scal.convert_to_int32), 'int32')
"""Cast to 32-bit integer"""
_convert_to_int64 = _conversion(
elemwise.Elemwise(scal.convert_to_int64), 'int64')
"""Cast to 64-bit integer"""
_convert_to_uint8 = _conversion(
elemwise.Elemwise(scal.convert_to_uint8), 'uint8')
"""Cast to unsigned 8-bit integer"""
_convert_to_uint16 = _conversion(
elemwise.Elemwise(scal.convert_to_uint16), 'uint16')
"""Cast to unsigned 16-bit integer"""
_convert_to_uint32 = _conversion(
elemwise.Elemwise(scal.convert_to_uint32), 'uint32')
"""Cast to unsigned 32-bit integer"""
_convert_to_uint64 = _conversion(
elemwise.Elemwise(scal.convert_to_uint64), 'uint64')
"""Cast to unsigned 64-bit integer"""
_convert_to_float16 = _conversion(
elemwise.Elemwise(scal.convert_to_float16), 'float16')
"""Cast to half-precision floating point"""
_convert_to_float32 = _conversion(
elemwise.Elemwise(scal.convert_to_float32), 'float32')
"""Cast to single-precision floating point"""
_convert_to_float64 = _conversion(
elemwise.Elemwise(scal.convert_to_float64), 'float64')
"""Cast to double-precision floating point"""
_convert_to_complex64 = _conversion(
elemwise.Elemwise(scal.convert_to_complex64), 'complex64')
"""Cast to single-precision complex"""
_convert_to_complex128 = _conversion(
elemwise.Elemwise(scal.convert_to_complex128), 'complex128')
"""Cast to double-precision complex"""
_cast_mapping = {
'int8': _convert_to_int8,
'int16': _convert_to_int16,
'int32': _convert_to_int32,
'int64': _convert_to_int64,
'uint8': _convert_to_uint8,
'uint16': _convert_to_uint16,
'uint32': _convert_to_uint32,
'uint64': _convert_to_uint64,
'float16': _convert_to_float16,
'float32': _convert_to_float32,
'float64': _convert_to_float64,
'complex64': _convert_to_complex64,
'complex128': _convert_to_complex128}
@constructor
def cast(x, dtype):
"""Symbolically cast `x` to a Tensor of type `dtype`."""
if dtype == 'floatX':
dtype = config.floatX
_x = as_tensor_variable(x)
if _x.type.dtype == dtype:
return _x
if _x.type.dtype.startswith('complex') and not dtype.startswith('complex'):
raise TypeError((
'Casting from complex to real is ambiguous: consider real(), '
'imag(), angle() or abs()'))
return _cast_mapping[dtype](x)
##########################
# Unary Operations
##########################
class MaxAndArgmax(Op):
"""
Calculate the max and argmax over a given axis or over all axes.
"""
nin = 2 # tensor, axis
nout = 2 # max val, max idx
E_axis = 'invalid axis'
__props__ = ()
def make_node(self, x, axis=None):
x = _as_tensor_variable(x)
if isinstance(axis, (integer_types, numpy.integer)):
axis = [int(axis)]
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0:
axis = [int(axis)]
elif isinstance(axis, (tuple, list, numpy.ndarray)):
axis = [int(a) for a in axis]
if axis == list(range(x.type.ndim)):
axis = None
elif isinstance(axis, Variable):
if NoneConst.equals(axis):
axis = None
elif not isinstance(axis, TensorConstant):
raise TypeError(
"MaxAndArgmax needs a constant axis. Got %s" % axis)
else:
assert (axis.dtype.startswith("int") or
axis.dtype.startswith("uint"))
if isinstance(axis.data, (integer_types, numpy.integer)) or \
(isinstance(axis.data, numpy.ndarray) and
axis.data.ndim == 0):
axis = [int(axis.data)]
elif isinstance(axis.data, (list, numpy.ndarray)):
axis = [int(i) for i in axis.data]
# Make axis entries non-negative, and sort them
if isinstance(axis, list):
for idx in xrange(len(axis)):
if axis[idx] < 0:
axis[idx] += x.type.ndim
axis.sort()
# Verify that axes are valid
all_axes = []
if isinstance(axis, list):
for ax in axis:
if ax < 0 or ax >= x.type.ndim:
raise ValueError(
'Invalid axis: %s (the number of dimensions of the '
'input is: %s)' % (ax, x.type.ndim))
if ax not in all_axes:
all_axes.append(ax)
else:
all_axes = list(range(x.ndim))
if axis is None or axis == list(range(x.type.ndim)):
axis = NoneConst.clone()
else:
axis = _as_tensor_variable(all_axes)
assert axis.ndim == 1
inputs = [x, axis]
# We keep the original broadcastable flags for dimensions on which
# we do not perform the max / argmax.
broadcastable = [b for i, b in enumerate(x.type.broadcastable)
if i not in all_axes]
outputs = [tensor(x.type.dtype, broadcastable, name='max'),
tensor('int64', broadcastable, name='argmax')]
return Apply(self, inputs, outputs)
def perform(self, node, inp, outs):
x, axes = inp
max, max_idx = outs
if axes is None:
axes = tuple(range(x.ndim))
else:
axes = tuple(int(ax) for ax in axes)
max[0] = theano._asarray(numpy.max(x, axes),
dtype=node.outputs[0].dtype)
# Numpy does not support multiple axes for argmax
# Work around
keep_axes = numpy.array([i for i in range(x.ndim) if i not in axes],
dtype='int64')
# Not-reduced axes in front
transposed_x = numpy.transpose(x, numpy.concatenate((keep_axes, axes)))
reshaped_x = transposed_x.reshape(transposed_x.shape[:len(keep_axes)] +
(-1,))
max_idx[0] = theano._asarray(numpy.argmax(reshaped_x, axis=-1),
dtype='int64')
def c_code(self, node, name, inp, out, sub):
x, axis = inp
max, argmax = out
fail = sub["fail"]
if NoneConst.equals(node.inputs[1]):
axis_code = "axis = NPY_MAXDIMS;"
else:
assert node.inputs[1].ndim == 1
# Fall back to perform() if there are multiple axes
if len(node.inputs[1].data) > 1:
raise NotImplementedError()
axis_code = """
axis = ((dtype_%(axis)s*)PyArray_DATA(%(axis)s))[0];
if(axis > PyArray_NDIM(%(x)s)-1 || axis < -PyArray_NDIM(%(x)s)){
PyErr_SetString(PyExc_ValueError,
"MaxAndArgmax, bad axis argument");
%(fail)s
}
""" % locals()
ret = """
int axis;
Py_CLEAR(%(max)s);
Py_CLEAR(%(argmax)s);//todo pass them as out parameter.
%(axis_code)s
%(max)s = (PyArrayObject*)PyArray_Max(%(x)s, axis, NULL);
if(%(max)s == NULL){
PyErr_SetString(PyExc_ValueError,
"MaxAndArgmax, max failed");
%(fail)s;
}
if(!PyArray_CheckExact(%(max)s)){
%(max)s = (PyArrayObject*)PyArray_FromAny((PyObject*)%(max)s, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL);
if(%(max)s == NULL){
%(fail)s;
}
}
%(argmax)s = (PyArrayObject*)PyArray_ArgMax(%(x)s, axis, NULL);
if(%(argmax)s == NULL){
PyErr_SetString(PyExc_ValueError, "MaxAndArgmax, argmax failed");
Py_CLEAR(%(max)s);
%(fail)s;
}
if(!PyArray_CheckExact(%(argmax)s)){
%(argmax)s = (PyArrayObject*)PyArray_FromAny((PyObject*)%(argmax)s, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL);
if(%(argmax)s == NULL){
%(fail)s;
}
}
if(PyArray_TYPE(%(argmax)s) != NPY_INT64){
PyObject * tmp = PyArray_Cast(%(argmax)s, NPY_INT64);
if (NULL == tmp){
%(fail)s;
}
Py_DECREF(%(argmax)s);
%(argmax)s = (PyArrayObject*)tmp;
}
"""
return ret % locals()
def c_code_cache_version(self):
return (3,)
def infer_shape(self, node, shapes):
ishape, axis_shape = shapes
axis = node.inputs[1]
if axis.data is None:
return [(), ()]
rval = tuple([ishape[i] for (i, b) in enumerate(
node.inputs[0].type.broadcastable) if i not in axis.data])
return [rval, rval]
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return [None, None]
if not isinstance(inputs[1], theano.Constant):
raise ValueError(('R_op supported for arg_max only for '
'constant axis!'))
if inputs[1].data > 1:
raise ValueError(('R_op supported for arg_max only when '
' axis is 0 or 1'))
if inputs[0].ndim != 2:
raise ValueError(('R_op supported for arg_max only when '
' input is a matrix'))
max_vals, max_pos = self.make_node(*inputs).outputs
if inputs[1].data == 0:
return [eval_points[0][max_pos,
arange(eval_points[0].shape[1])], None]
else:
return [eval_points[0][arange(eval_points[0].shape[0]),
max_pos], None]
def grad(self, inp, grads):
# The strict sense mathematical gradient of the maximum function is
# not calculated here for it is not defined at every point where some
# coordinates are identical. However, since the latter set has null
# Lebesgue measure, the result may be interpreted as weak gradient.
# @note: This function should work correctly for L{vector}s.
# (x, y), (gz, gw)
# gz*dz/dx + gw*dw/dx, gz*dz/dy + gw*dw/dy
# gMax * dMax/dx + gArgMax * dArgMax/dx,
# gMax * dMax/daxis + gArgMax * dArgMax/daxis
# g_max has one less dimension than x, so you need to complete
# g_max to x's shape when axis=0 the broadcasting mechanism
# does it automatically
x, axis = inp
g_max, g_max_idx = grads
g_max_disconnected = isinstance(g_max.type, DisconnectedType)
g_max_idx_disconnected = isinstance(g_max_idx.type, DisconnectedType)
# if the op is totally disconnected, so are its inputs
if g_max_disconnected and g_max_idx_disconnected:
return [DisconnectedType()(), DisconnectedType()()]
axis_grad = grad_undefined(
self, 1, axis,
"argmax is not defined for non-integer axes so"
" argmax(x, axis+eps) is undefined")
# if the max is disconnected but the argmax is not,
# the gradient on its inputs is zero
if g_max_disconnected:
return [x.zeros_like(), axis_grad]
if NoneConst.equals(axis):
axis_ = list(range(x.ndim))
else:
axis_ = axis
xmax = max(x, axis_)
# Raise the g_max and xmax to the same number of dim as the input.
pattern = []
out_dim = 0
if NoneConst.equals(axis):
# We are taking the max/argmax over all dimensions.
axis = None
for i in xrange(x.ndim):
if axis is None or i in axis.data:
pattern.append('x')
else:
pattern.append(out_dim)
out_dim += 1
g_max_pad = DimShuffle(g_max.broadcastable, pattern)(g_max)
xmax_pad = DimShuffle(xmax.broadcastable, pattern)(xmax)
# Set the grad to the correct position.
g_x = eq(xmax_pad, x) * g_max_pad
return g_x, axis_grad
_max_and_argmax = MaxAndArgmax()
def makeKeepDims(x, y, axis):
"""
Reintroduces in y with length one the axes of x which have been left out
in a prior reduction of x. With this option, the resulting tensor will
broadcast correctly against the original tensor x.
"""
x = as_tensor_variable(x)
y = as_tensor_variable(y)
if axis is None:
axis = list(range(x.type.ndim))
elif isinstance(axis, (integer_types, numpy.integer)):
axis = [axis]
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0:
axis = [int(axis)]
else:
axis = [int(a) for a in axis]
newaxis = []
for a in axis:
if not isinstance(a, integer_types):
raise ValueError(
"keepdims option can be used only with constant axis")
if a < 0:
a += x.type.ndim
newaxis.append(a)
i = 0
new_dims = []
for j, _ in enumerate(x.type.broadcastable):
if j in newaxis:
new_dims.append('x')
else:
new_dims.append(i)
i += 1
return DimShuffle(y.type.broadcastable, new_dims)(y)
@constructor
def max_and_argmax(a, axis=None, keepdims=False):
"""
Returns maximum elements and their indices obtained by iterating over
given axis.
When axis is None (the default value), the max is performed
over the flattened tensor.
Parameters
----------
keepdims : bool
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
out, argout = _max_and_argmax(a, axis)
if keepdims:
out = makeKeepDims(a, out, axis)
argout = makeKeepDims(a, argout, axis)
return [out, argout]
@constructor
def max(x, axis=None, keepdims=False):
"""
Returns maximum elements obtained by iterating over given axis.
When axis is None (the default value), the max is performed
over the flattened tensor.
Parameters
----------
keepdims: bool
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
Notes
-----
We return an error as numpy when we reduce a dim with a shape of 0.
"""
# We have a choice of implementing this call with the
# CAReduce op or the MaxAndArgmax op.
# MaxAndArgmax supports grad and Rop, so we prefer to use that.
# CAReduce is faster, but optimizations will replace MaxAndArgmax[0]
# with CAReduce at compile time, so at this stage the important
# thing is supporting all user interface features, not speed.
# Some cases can be implemented only with CAReduce.
# We thus prefer to use MaxAndArgmax, if possible. It does not
# support all axis arguments, so we may need to fall back to CAReduce.
try:
out = max_and_argmax(x, axis)[0]
except Exception:
out = CAReduce(scal.maximum, axis)(x)
if keepdims:
out = makeKeepDims(x, out, axis)
return out
@constructor
def argmax(x, axis=None, keepdims=False):
"""
Returns indices of maximum elements obtained by iterating over given axis.
When axis is None (the default value), the argmax is performed
over the flattened tensor.
Parameters
----------
keepdims : bool
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
# In python (using MaxAndArgmax.perform()) this leads to a wasteful
# implementation that goes through the data twice instead of once
# but when Argmax.c_impl() is in place, it should be fine.
argout = max_and_argmax(x, axis)[1]
if keepdims:
argout = makeKeepDims(x, argout, axis)
return argout
@constructor
def min(x, axis=None, keepdims=False):
"""
Returns minimum elements obtained by iterating over given axis.
When axis is None (the default value), the min is performed
over the flattened tensor.
Parameters
----------
keepdims: bool
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
x = as_tensor_variable(x)
str_x_type = str(x.dtype)
if str_x_type.startswith('float') or str_x_type in int_dtypes:
return -max(-x, axis=axis, keepdims=keepdims)
else:
# Be careful about unsigned integers, complex
raise NotImplementedError()
@constructor
def argmin(x, axis=None, keepdims=False):
"""
Returns indices of minimum elements obtained by iterating over given axis.
When axis is None (the default value), the argmin is performed
over the flattened tensor.
Parameters
----------
keepdims: bool
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
x = as_tensor_variable(x)
str_x_type = str(x.dtype)
if str_x_type.startswith('float') or str_x_type in int_dtypes:
return argmax(-x, axis=axis, keepdims=keepdims)
else:
# Be careful about unsigned integers, complex
raise NotImplementedError()
@constructor
def smallest(*args):
"""
Return the [elementwise] smallest of a variable number of arguments.
Like python's min.
"""
if len(args) == 2:
a, b = args
return switch(a < b, a, b)
else:
return min(stack(args), axis=0)
@constructor
def largest(*args):
"""
Return the [elementwise] largest of a variable number of arguments.
Like python's max.
"""
if len(args) == 2:
a, b = args
return switch(a > b, a, b)
else:
return max(stack(args), axis=0)
##########################
# Comparison
##########################
@_scal_elemwise
def lt(a, b):
"""a < b"""
@_scal_elemwise
def gt(a, b):
"""a > b"""
@_scal_elemwise
def le(a, b):
"""a <= b"""
@_scal_elemwise
def ge(a, b):
"""a >= b"""
@_scal_elemwise
def eq(a, b):
"""a == b"""
@_scal_elemwise
def neq(a, b):
"""a != b"""
@_scal_elemwise
def isnan(a):
"""isnan(a)"""
@_scal_elemwise
def isinf(a):
"""isinf(a)"""
def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Implement Numpy's ``allclose`` on tensors.
``absolute(a - b) <= (atol + rtol * absolute(b))``
Parameters
----------
a : tensor
Input to compare.
b : tensor
Input to compare.
rtol : float
The relative tolerance parameter.
atol : float
The absolute tolerance parameter.
equal_nan: bool
Whether to consider nan's in the same place to be close.
Returns
-------
bool
A boolean value (of type int8 returned by the tensor elementwise `all`
function) whether all elements in a and b are in the tolerance range
defined above.
Notes
-----
Not a symmetric equation. See Numpy's documentation.
"""
return all(isclose(a, b, rtol, atol, equal_nan))
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Implements Numpy's ``isclose`` on tensors.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
``absolute(a - b) <= (atol + rtol * absolute(b))``
Parameters
----------
a : tensor
Input to compare.
b : tensor
Input to compare.
rtol : float
The relative tolerance parameter.
atol : float
The absolute tolerance parameter.
equal_nan : bool
Whether to consider nan's in the same place to be close
Returns
-------
int8
A boolean (int8) array where two arrays are element-wise equal
within a tolerance.
Notes
-----
Not a symmetric equation. See Numpy's documentation.
Examples
--------
>>> import theano
>>> import numpy as np
>>> a = theano._asarray([1e10, 1e-7], dtype="float64")
>>> b = theano._asarray([1.00001e10, 1e-8], dtype="float64")
>>> theano.tensor.isclose(a, b).eval()
array([1, 0], dtype=int8)
>>> a = theano._asarray([1e10, 1e-8], dtype="float64")
>>> b = theano._asarray([1.00001e10, 1e-9], dtype="float64")
>>> theano.tensor.isclose(a, b).eval()
array([1, 1], dtype=int8)
>>> a = theano._asarray([1e10, 1e-8], dtype="float64")
>>> b = theano._asarray([1.0001e10, 1e-9], dtype="float64")
>>> theano.tensor.isclose(a, b).eval()
array([0, 1], dtype=int8)
>>> a = theano._asarray([1.0, np.nan], dtype="float64")
>>> b = theano._asarray([1.0, np.nan], dtype="float64")
>>> theano.tensor.isclose(a, b).eval()
array([1, 0], dtype==int8)
>>> a = theano._asarray([1.0, np.nan], dtype="float64")
>>> b = theano._asarray([1.0, np.nan], dtype="float64")
>>> theano.tensor.isclose(a, b, equal_nan=True).eval()
array([1, 1], dtype==int8)
>>> a = theano._asarray([1.0, np.inf], dtype="float64")
>>> b = theano._asarray([1.0, -np.inf], dtype="float64")
>>> theano.tensor.isclose(a, b).eval()
array([1, 0], dtype==int8)
>>> a = theano._asarray([1.0, np.inf], dtype="float64")
>>> b = theano._asarray([1.0, np.inf], dtype="float64")
>>> theano.tensor.isclose(a, b).eval()
array([1, 1], dtype==int8)
"""
# close will be an int8 array of 1 where within tolerance
# and 0 where not within tolerance or there was a nan or inf value.
diff = abs(a - b)
tolerance = atol + rtol * abs(b)
close_prelim = le(diff, tolerance)
a_nan = isnan(a)
b_nan = isnan(b)
nans = bitwise_or(a_nan, b_nan)
a_inf = isinf(a)
b_inf = isinf(b)
infs = bitwise_or(a_inf, b_inf)
nans_or_infs = bitwise_or(nans, infs)
# close is now an array of 0's except where elements are not nan or inf
# and are withing the tolerance.
close = bitwise_and(close_prelim, bitwise_not(nans_or_infs))
# deal with signed inf values. this will make an array inf_eq of 0's
# except where inf values have the same sign.
both_infs = bitwise_and(a_inf, b_inf)
inf_signs_eq = eq(a_inf * sgn(a), b_inf * sgn(b))
inf_eq = bitwise_and(both_infs, inf_signs_eq)
# now create the potential result combining close and inf_eq
close_with_infs = bitwise_or(close, inf_eq)
# deal with comparing nan's.
if equal_nan:
both_nans = bitwise_and(a_nan, b_nan)
return bitwise_or(close_with_infs, both_nans)
# otherwise nan's aren't considered close.
else:
return close_with_infs
##########################
# Condition
##########################
@_scal_elemwise
def switch(cond, ift, iff):
"""if cond then ift else iff"""
where = switch
##########################
# Bit-wise
##########################
@_scal_elemwise
def and_(a, b):
"""bitwise a & b"""
bitwise_and = and_ # numpy name for it
@_scal_elemwise
def or_(a, b):
"""bitwise a | b"""
bitwise_or = or_ # numpy name for it
@_scal_elemwise
def xor(a, b):
"""bitwise a ^ b"""
bitwise_xor = xor # numpy name for it
@_scal_elemwise
def invert(a):
"""bitwise ~a"""
bitwise_not = invert # numpy alias for it
##########################
# Math
##########################
@_scal_elemwise
def abs_(a):
"""|`a`|
TensorVariable overloads the `TensorVariable.__abs__` operator so that
this function is called when you type abs(a).
"""
pprint.assign(abs_, printing.PatternPrinter(('|%(0)s|', -1000)))
@_scal_elemwise
def exp(a):
"""e^`a`"""
@_scal_elemwise
def exp2(a):
"""2^`a`"""
@_scal_elemwise
def expm1(a):
"""e^`a` - 1"""
@_scal_elemwise
def neg(a):
"""-a"""
# numpy.reciprocal does integer division on integer inputs
# (which is not very interesting)
@_scal_elemwise
def inv(a):
"""1.0/a"""
@_scal_elemwise
def log(a):
"""base e logarithm of a"""
@_scal_elemwise
def log2(a):
"""base 2 logarithm of a"""
@_scal_elemwise
def log10(a):
"""base 10 logarithm of a"""
@_scal_elemwise
def log1p(a):
"""log(1+a)"""
@_scal_elemwise
def sgn(a):
"""sign of a"""
@_scal_elemwise
def ceil(a):
"""ceiling of a"""
@_scal_elemwise
def floor(a):
"""floor of a"""
@_scal_elemwise
def trunc(a):
"""trunc of a"""
@constructor
def iround(a, mode="half_away_from_zero"):
"""cast(round(a,mode),'int64')"""
return cast(round(a, mode), 'int64')
@constructor
def round(a, mode="half_away_from_zero"):
"""round_mode(a) with mode in [half_away_from_zero, half_to_even]"""
if mode == "half_away_from_zero":
return round_half_away_from_zero(a)
elif mode == "half_to_even":
return round_half_to_even(a)
else:
raise Exception("round mode %s is not implemented." % mode)
@_scal_elemwise
def round_half_to_even(a):
"""round_half_to_even(a)"""
@_scal_elemwise
def round_half_away_from_zero(a):
"""round_half_away_from_zero(a)"""
@_scal_elemwise
def sqr(a):
"""square of a"""
# alias to sqr, included to maintain similarity with numpy interface
square = sqr
@_scal_elemwise
def sqrt(a):
"""square root of a"""
@_scal_elemwise
def deg2rad(a):
"""convert degree a to radian"""
@_scal_elemwise
def rad2deg(a):
"""convert radian a to degree"""
@_scal_elemwise
def cos(a):
"""cosine of a"""
@_scal_elemwise
def arccos(a):
"""arccosine of a"""
@_scal_elemwise
def sin(a):
"""sine of a"""
@_scal_elemwise
def arcsin(a):
"""arcsine of a"""
@_scal_elemwise
def tan(a):
"""tangent of a"""
@_scal_elemwise
def arctan(a):
"""arctangent of a"""
@_scal_elemwise
def arctan2(a, b):
"""arctangent of a / b"""
@_scal_elemwise
def cosh(a):
"""hyperbolic cosine of a"""
@_scal_elemwise
def arccosh(a):
"""hyperbolic arc cosine of a"""
@_scal_elemwise
def sinh(a):
"""hyperbolic sine of a"""
@_scal_elemwise
def arcsinh(a):
"""hyperbolic arc sine of a"""
@_scal_elemwise
def tanh(a):
"""hyperbolic tangent of a"""
@_scal_elemwise
def arctanh(a):
"""hyperbolic arc tangent of a"""
@_scal_elemwise
def erf(a):
"""error function"""
@_scal_elemwise
def erfc(a):
"""complementary error function"""
@_scal_elemwise
def erfcx(a):
"""scaled complementary error function"""
@_scal_elemwise
def erfinv(a):
"""inverse error function"""
@_scal_elemwise
def erfcinv(a):
"""inverse complementary error function"""
@_scal_elemwise
def gamma(a):
"""gamma function"""
@_scal_elemwise
def gammaln(a):
"""log gamma function"""
@_scal_elemwise
def psi(a):
"""derivative of log gamma function"""
@_scal_elemwise
def chi2sf(x, k):
"""chi squared survival function"""
@_scal_elemwise
def j0(a):
"""Bessel function of the 0'th kind"""
@_scal_elemwise
def j1(a):
"""Bessel function of the 1'th kind"""
@_scal_elemwise
def real(z):
"""Return real component of complex-valued tensor `z`"""
_tensor_py_operators.real = property(real)
@_scal_elemwise
def imag(z):
"""Return imaginary component of complex-valued tensor `z`"""
_tensor_py_operators.imag = property(imag)
@_scal_elemwise
def angle(z):
"""Return polar-coordinate angle of complex-valued tensor `z`"""
@_scal_elemwise # numpy.complex cannot build tensors
def complex(real, imag):
"""Return complex-valued tensor with `real` and `imag` components"""
@_scal_elemwise
def conj(z):
"""Return the complex conjugate of `z`."""
@_scal_elemwise
def complex_from_polar(abs, angle):
"""Return complex-valued tensor from polar coordinate specification."""
##########################
# Misc
##########################
# fill, _fill_inplace = _elemwise(scal.second, 'fill',
# """fill WRITEME (elemwise)""")
@_scal_elemwise
def second(a, b):
"""Create a matrix by filling the shape of a with b"""
fill = second
pprint.assign(fill, printing.FunctionPrinter('fill'))
@constructor
def ones_like(model, dtype=None):
"""equivalent of numpy.ones_like"""
if dtype is None:
dtype = model.type.dtype
ret = fill(model, constant(1.0, dtype=dtype))
return ret
@constructor
def zeros_like(model, dtype=None):
"""equivalent of numpy.zeros_like"""
if dtype is None:
dtype = model.type.dtype
return fill(model, constant(0.0, dtype=dtype))
def zeros(shape, dtype=None):
"""
Create a Tensor filled with zeros, closer to Numpy's syntax than ``alloc``.
"""
if not isinstance(shape, (list, tuple, TensorVariable)):
shape = [shape]
if dtype is None:
dtype = config.floatX
return alloc(numpy.array(0, dtype=dtype), *shape)
def ones(shape, dtype=None):
"""
Create a Tensor filled with ones, closer to Numpy's syntax than ``alloc``.
"""
if not isinstance(shape, (list, tuple, TensorVariable)):
shape = [shape]
if dtype is None:
dtype = config.floatX
return alloc(numpy.array(1, dtype=dtype), *shape)
class Nonzero(gof.Op):
"""
Return the indices of the elements that are non-zero.
Returns a matrix of shape (ndim, number of nonzero elements) such that
element (i,j) is the index in the ith dimension of the jth non-zero
element.
Note this is different than NumPy, which returns a tuple of arrays, one for
each dimension of the input array.
Parameters
----------
a : array_like
Input array.
Returns
-------
matrix
Matrix containing the indices of the non-zero elements of a.
See Also
--------
nonzero_values : Return the non-zero elements of the input array
flatnonzero : Return the indices of the non-zero elements of the
flattened input array.
"""
__props__ = ()
def make_node(self, a):
a = as_tensor_variable(a)
if a.ndim == 0:
raise ValueError('Nonzero only supports non-scalar arrays.')
output = [TensorType(dtype='int64', broadcastable=(False, False))()]
return gof.Apply(self, [a], output)
def perform(self, node, inp, out_):
a = inp[0]
out, = out_
result_tuple = numpy.nonzero(a)
if len(result_tuple[0]) > 0:
result = numpy.vstack(result_tuple)
else:
result = numpy.zeros((len(result_tuple), 0))
out[0] = result.astype('int64')
def grad(self, inp, grads):
return [grad_undefined(self, 0, inp[0])]
_nonzero = Nonzero()
def nonzero(a, return_matrix=False):
"""
Returns one of the following:
If return_matrix is False (default, same as NumPy):
A tuple of vector arrays such that the ith element of the jth array
is the index of the ith non-zero element of the input array in the
jth dimension.
If return_matrix is True (same as Theano Op):
Returns a matrix of shape (ndim, number of nonzero elements) such
that element (i,j) is the index in the ith dimension of the jth
non-zero element.
Parameters
----------
a : array_like
Input array.
return_matrix : bool
If True, returns a symbolic matrix. If False, returns a tuple of
arrays. Defaults to False.
Returns
-------
tuple of vectors or matrix
See Also
--------
nonzero_values : Return the non-zero elements of the input array
flatnonzero : Return the indices of the non-zero elements of the
flattened input array.
"""
matrix_result = _nonzero(a)
if return_matrix:
return matrix_result
else:
if a.ndim > 0:
tuple_result = tuple([matrix_result[i] for i in xrange(a.ndim)])
else:
tuple_result = tuple([matrix_result[0]])
return tuple_result
def flatnonzero(a):
"""
Return a vector of indices that are non-zero in the flattened version of a.
This is equivalent to nonzero(a.flatten(), return_matrix=True)[0]
Parameters
----------
a : tensor
Input tensor
Returns
-------
vector
Output vector, containing the indices of the elements of `a.flatten()`
that are non-zero.
See Also
--------
nonzero : Return the indices of the non-zero elements of the input array.
nonzero_values : Return the non-zero elements of the input array
"""
if a.ndim == 0:
raise ValueError('Nonzero only supports non-scalar arrays.')
return nonzero(a.flatten(), return_matrix=True)[0]
def nonzero_values(a):
"""
Return a vector of non-zero elements contained in the input array.
The following behavior works to extract non-zero elements from an array
in NumPy but is *NOT* supported by Theano:
a[numpy.nonzero(a)]
Instead, the nonzero_values function or method should be used:
tensor.nonzero_values(a)
a.nonzero_values()
This is equivalent to the following:
a.flatten()[tensor.flatnonzero(a)]
Parameters
----------
a : tensor
Input tensor
Returns
-------
vector
Output vector, containing the non-zero elements of a.
See Also
--------
nonzero : Return the indices of the non-zero elements of the input array.
flatnonzero : Return the indices of the non-zero elements of the
flattened input array.
"""
return a.flatten()[flatnonzero(a)]
class Tri(gof.Op):
__props__ = ("dtype",)
def __init__(self, dtype=None):
if dtype is None:
dtype = config.floatX
self.dtype = dtype
def make_node(self, N, M, k):
N = as_tensor_variable(N)
M = as_tensor_variable(M)
k = as_tensor_variable(k)
return gof.Apply(
self,
[N, M, k],
[TensorType(dtype=self.dtype, broadcastable=(False, False))()])
def perform(self, node, inp, out_):
N, M, k = inp
out, = out_
out[0] = numpy.tri(N, M, k, dtype=self.dtype)
def infer_shape(self, node, in_shapes):
out_shape = [node.inputs[0], node.inputs[1]]
return [out_shape]
def grad(self, inp, grads):
return [grad_undefined(self, i, inp[i]) for i in xrange(3)]
def tri(N, M=None, k=0, dtype=None):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
Array of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
"""
if dtype is None:
dtype = config.floatX
if M is None:
M = N
op = Tri(dtype)
return op(N, M, k)
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
array, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : Same thing, only for the upper triangle.
"""
return m * tri(m.shape[0], m.shape[1], k=k, dtype=m.dtype)
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : Lower triangle of an array.
"""
return m * (1 - tri(m.shape[0], m.shape[1], k=k - 1, dtype=m.dtype))
class Eye(gof.Op):
__props__ = ("dtype", )
def __init__(self, dtype=None):
if dtype is None:
dtype = config.floatX
self.dtype = dtype
def make_node(self, n, m, k):
n = as_tensor_variable(n)
m = as_tensor_variable(m)
k = as_tensor_variable(k)
assert n.ndim == 0
assert m.ndim == 0
assert k.ndim == 0
return gof.Apply(
self,
[n, m, k],
[TensorType(dtype=self.dtype, broadcastable=(False, False))()])
def perform(self, node, inp, out_):
n, m, k = inp
out, = out_
out[0] = numpy.eye(n, m, k, dtype=self.dtype)
def infer_shape(self, node, in_shapes):
out_shape = [node.inputs[0], node.inputs[1]]
return [out_shape]
def grad(self, inp, grads):
return [grad_undefined(self, i, inp[i]) for i in xrange(3)]
def eye(n, m=None, k=0, dtype=None):
"""Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
n : int
Number of rows in the output.
m : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
"""
if dtype is None:
dtype = config.floatX
if m is None:
m = n
localop = Eye(dtype)
return localop(n, m, k)
def identity_like(x):
return eye(x.shape[0], x.shape[1], k=0, dtype=x.dtype)
class Alloc(gof.Op):
"""Create a Tensor from an initial value and a desired shape.
alloc(value, shape0, shape1, ..., shapeN)
Returns an N-dimensional tensor initialized by `value` using something
equivalent to
z = numpy.zeros(shape, value.dtype)
z += value
The result has N dimensions, has the dtype of `value` and is obtained by
broadcasting value over the output ndarray.
This Op is used to replace fill() during optimizations because after shapes
are lifted, the first argument to fill can often be pruned from the graph.
"""
__props__ = ()
def validate_shape(self, shape):
sh = [as_tensor_variable(s) for s in shape]
bcast = []
for i, s in enumerate(sh):
if config.exception_verbosity == 'high':
s_as_str = '\n' + min_informative_str(s)
else:
s_as_str = str(s)
if s.type.dtype[:3] not in ('int', 'uin'):
raise TypeError('Shape arguments to Alloc must be integers, '
'but argument %s is not for apply node: %s' %
(i, s_as_str))
if s.ndim != 0:
raise TypeError(
"Each shape dimension to Alloc must be a scalar, ",
'but dimension %s have %d dimensions for apply node: %s' %
(i, s.ndim, s_as_str))
# if s is constant 1, then we're broadcastable in that dim
try:
const_shp = get_scalar_constant_value(s)
except NotScalarConstantError:
const_shp = None
bcast.append(1 == const_shp)
return sh, bcast
def make_node(self, value, *shape):
v = as_tensor_variable(value)
sh, bcast = self.validate_shape(shape)
if v.ndim > len(sh):
raise TypeError("The Alloc value to use has more dimensions"
" than the specified dimensions",
v.ndim, len(sh))
otype = TensorType(dtype=v.dtype, broadcastable=bcast)
return gof.Apply(self, [v] + sh, [otype()])
def perform(self, node, inputs, out_):
out, = out_
v = inputs[0]
sh = tuple([int(i) for i in inputs[1:]])
if out[0] is None or out[0].shape != sh:
if v.size == 1 and v.item() == 0:
out[0] = numpy.zeros(sh, dtype=v.dtype)
else:
out[0] = numpy.empty(sh, dtype=v.dtype)
out[0][...] = v # broadcast v to fill us up
else:
# reuse the allocated memory.
out[0][...] = v # broadcast v to fill us up
def c_code(self, node, name, inp, out, sub):
vv = inp[0]
ndim = len(inp[1:])
zz, = out
fail = sub['fail']
code = """
npy_intp shape[%(ndim)s];
""" % dict(ndim=ndim)
# Initialize shape
for i, shp_i in enumerate(inp[1:]):
code += """
shape[%(i)s] = ((dtype_%(shp_i)s*) PyArray_DATA(%(shp_i)s))[0];
""" % dict(i=i, shp_i=shp_i)
code += """
int need_new_out = (NULL == %(zz)s);
for (int i = 0; i < %(ndim)s; i++)
need_new_out = (need_new_out
|| (PyArray_DIMS(%(zz)s)[i] != shape[i]));
if (need_new_out)
{
Py_XDECREF(%(zz)s);
%(zz)s = (PyArrayObject*) PyArray_SimpleNew(%(ndim)s,
shape, PyArray_TYPE((PyArrayObject*) py_%(vv)s));
if (!%(zz)s)
{
PyErr_SetString(PyExc_MemoryError, "alloc failed");
%(fail)s
}
}
// This function takes care of broadcasting
PyArray_CopyInto(%(zz)s, %(vv)s);
""" % dict(vv=vv, ndim=ndim, zz=zz, fail=fail)
return code
def c_code_cache_version(self):
return (1,)
def infer_shape(self, node, input_shapes):
return [node.inputs[1:]]
def connection_pattern(self, node):
rval = [[True]]
for ipt in node.inputs[1:]:
rval.append([False])
return rval
def grad(self, inputs, grads):
x = inputs[0]
gz = grads[0]
n_axes_to_sum = gz.ndim - x.ndim
# The number of dimensions added
axis = list(range(n_axes_to_sum))
# The broadcasted dimensions
axis_broadcasted = []
axis_kept = []
for i, (ib, gb) in enumerate(
zip(inputs[0].broadcastable,
# We need the dimensions corresponding to x
grads[0].broadcastable[-inputs[0].ndim:])):
if ib and not gb:
axis_broadcasted.append(i + n_axes_to_sum)
else:
axis_kept.append(i)
gx = gz.sum(axis=axis + axis_broadcasted)
if axis_broadcasted:
new_order = ['x'] * x.ndim
for idx, axis in enumerate(axis_kept):
new_order[axis] = idx
gx = gx.dimshuffle(new_order)
# Dimshuffle to add back the broadcasted dims
# The *elements* of the output are not connected to
# the inputs that specify the shape. If you grow the
# shape by epsilon, the existing elements do not
# change.
return [gx] + [DisconnectedType()() for i in inputs[1:]]
def __call__(self, val, *shapes, **kwargs):
"""
If the alloc would be useless, this function returns val.
If this function is called outside of a graph optimization context
(for instance, it is manually called by a user building a graph),
then we always return an Alloc node, to allow for DebugMode to check
for size mismatches.
If you always want an Alloc node, call make_node.
"""
ret = super(Alloc, self).__call__(val, *shapes, **kwargs)
try:
# It makes optimization difficult when useless allocs are thrown
# into the graph at every stage of optimization. This little logic
# tries to help at least in some cases.
if hasattr(val, 'fgraph') and (val.type == ret.type):
return val
except AttributeError:
pass
return ret
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return [None]
return self(eval_points[0], *inputs[1:], **dict(return_list=True))
def do_constant_folding(self, node):
if not getattr(node.outputs[0], 'clients', []):
# If there are no clients then there is no point doing constant
# folding.
return False
for client in node.outputs[0].clients:
if client[0] == 'output':
# If the output is a constant, it will have to be deepcopied
# each time the function is called. So we do not fold.
return False
elif (
# The following ops work inplace of their input id 0.
client[1] == 0 and
isinstance(client[0].op, (
# Ops that will work inplace on the Alloc. So if they
# get constant_folded, they would copy the
# constant and this is less efficients.
# Not doing the constant folding could also lower
# the peak memory usage, as we the "constant" won't
# always exists.
theano.tensor.subtensor.IncSubtensor,
theano.tensor.subtensor.AdvancedIncSubtensor1,
theano.tensor.subtensor.AdvancedIncSubtensor,
theano.tensor.blas.Gemv,
theano.tensor.blas_c.CGemv,
theano.tensor.blas.Ger,
theano.tensor.blas_c.CGer,
theano.tensor.blas_scipy.ScipyGer))):
return False
# If the clients is a transfer to the GPU, we don't want to
# fold. We let the Alloc being moved to the GPU, then we
# let the GPU algo decide if it need to fold it or not.
elif client[0].op.__class__.__name__.lower().startswith("gpu"):
return False
return True
alloc = Alloc()
pprint.assign(alloc, printing.FunctionPrinter('alloc'))
def transfer(var, target):
"""
Return a version of `var` transferred to `target`.
`cpu` mean a TensorType (on the CPU). Other types may define
additional targets.
Parameters
----------
var : variable
A theano variable
target : str
The target of the transfer
"""
if target == 'cpu':
return as_tensor_variable(var)
else:
for trans in transfer._others:
res = trans(var, target)
if res is not None:
return res
raise ValueError("Can't transfer to target %s" % (target,))
transfer._others = []
def register_transfer(fn):
"""
Register a transfer function for alternative targets.
Parameters
----------
fn : callable
"""
transfer._others.append(fn)
"""Create a duplicate of `a` (with duplicated storage)"""
tensor_copy = elemwise.Elemwise(scal.identity)
pprint.assign(tensor_copy, printing.IgnorePrinter())
@constructor
def sum(input, axis=None, dtype=None, keepdims=False, acc_dtype=None):
"""
Computes the sum along the given axis(es) of a tensor `input`.
When axis is None (the default value), the sum is performed
over the flattened tensor.
For full documentation see ``tensor.elemwise.Sum``.
In particular please pay attention to the important warning when using
a custom acc_dtype.
Parameters
----------
keepdims: bool
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
out = elemwise.Sum(axis=axis, dtype=dtype, acc_dtype=acc_dtype)(input)
if keepdims:
out = makeKeepDims(input, out, axis)
return out
pprint.assign(Sum(), printing.FunctionPrinter('sum'))
@constructor
def prod(input, axis=None, dtype=None, keepdims=False, acc_dtype=None,
no_zeros_in_input=False):
"""
Computes the product along the given axis(es) of a tensor `input`.
When axis is None (the default value), the product is performed
over the flattened tensor.
For full documentation see ``tensor.elemwise.Prod``.
Parameters
----------
keepdims: bool
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
out = elemwise.Prod(axis, dtype=dtype, acc_dtype=acc_dtype,
no_zeros_in_input=no_zeros_in_input)(input)
if keepdims:
out = makeKeepDims(input, out, axis)
return out
class Mean(elemwise.CAReduce):
def __init__(self, axis=None):
elemwise.CAReduce.__init__(self, scal.add, axis)
assert self.axis is None or len(self.axis) == 1
def __str__(self):
if self.axis is not None:
return "Mean{%s}" % (", ".join(str(x) for x in self.axis))
else:
return "Mean"
def _output_dtype(self, idtype):
# we want to protect against overflow
return 'float64'
def perform(self, node, inp, out):
input, = inp
output, = out
if self.axis is None:
axis = None
else:
axis = self.axis[0]
# numpy.asarray is needed as otherwise we can end up with a
# numpy scalar.
output[0] = numpy.asarray(numpy.mean(input, dtype='float64',
axis=axis))
def c_code(self, node, name, inames, onames, sub):
if self.axis is not None:
return super(Op, self).c_code(node, name, inames, onames, sub)
ret = elemwise.CAReduce.c_code(self, node, name, inames, onames, sub)
# TODO: c_code perform support only axis is None
return ret + """
*((double *)PyArray_DATA(%s)) /= PyArray_SIZE(%s);
""" % (onames[0], inames[0])
# TODO: implement the grad. When done and tested, you can make this the default
# version.
# def grad(self, (x,), (gout,)):
# import pdb;pdb.set_trace()
# return grad(mean(x, self.axis, op=False),[x])
@constructor
def mean(input, axis=None, dtype=None, op=False, keepdims=False,
acc_dtype=None):
"""
Computes the mean value along the given axis(es) of a tensor `input`.
Parameters
----------
axis : None or int or (list of int) (see `Sum`)
Compute the mean along this axis of the tensor.
None means all axes (like numpy).
dtype: None or string
Dtype to cast the result of the inner summation into.
For instance, by default, a sum of a float32 tensor will be
done in float64 (acc_dtype would be float64 by default),
but that result will be casted back in float32.
keepdims: bool
If this is set to True, the axes which are reduced are
left in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original tensor.
acc_dtype: None or string
Dtype to use for the inner summation. This will not
necessarily be the dtype of the output (in particular
if it is a discrete (int/uint) dtype, the output will
be in a float type). If None, then we use the same rules as `sum()`.
Notes
-----
For gpu, if you specify dtype=float32, everything will be done on the gpu.
"""
input = as_tensor_variable(input)
if op:
if dtype not in (None, 'float64'):
raise NotImplementedError(
'The Mean op does not support the dtype argument, '
'and will always use float64. If you want to specify '
'the dtype, call tensor.mean(..., op=False).',
dtype)
if acc_dtype not in (None, 'float64'):
raise NotImplementedError(
'The Mean op does not support the acc_dtype argument, '
'and will always use float64. If you want to specify '
'acc_dtype, call tensor.mean(..., op=False).',
dtype)
out = Mean(axis)(input)
if keepdims:
out = makeKeepDims(input, out, axis)
return out
if dtype is not None:
# The summation will be done with the specified dtype.
# sum() will complain if it is not suitable.
sum_dtype = dtype
else:
sum_dtype = None
# float16 overflows way too fast for sum
if ((sum_dtype == 'float16' or input.dtype == 'float16') and
acc_dtype != 'float16'):
sum_dtype == 'float32'
s = sum(input, axis=axis, dtype=sum_dtype, keepdims=keepdims,
acc_dtype=acc_dtype)
shp = shape(input)
# Cast shp into a float type
# TODO Once we have a consistent casting policy, we could simply
# use true_div.
if s.dtype in ('float16', 'float32', 'complex64'):
shp = cast(shp, 'float32')
else:
shp = cast(shp, 'float64')
if axis is None:
axis = list(range(input.ndim))
elif isinstance(axis, (integer_types, numpy.integer)):
axis = [axis]
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0:
axis = [int(axis)]
else:
axis = [int(a) for a in axis]
# This sequential division will possibly be optimized by Theano:
for i in axis:
s = true_div(s, shp[i])
if dtype == 'float16' or (dtype is None and input.dtype == 'float16'):
s = cast(s, 'float16')
s.name = 'mean'
return s
@constructor
def var(input, axis=None, keepdims=False):
"""
Computes the variance along the given axis(es) of a tensor `input`.
Parameters
----------
axis: None or int or (list of int) (see `Sum`)
Compute the variance along this axis of the tensor.
None means all axes (like numpy).
keepdims : bool
If this is set to True, the axes which are reduced are
left in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original tensor.
Notes
-----
It uses the two-pass algorithm for more stable results.
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
There exist other implementations that are even more stable, but probably
slower.
"""
input_ndim = input.type.ndim
if axis is None:
axis = list(range(input_ndim))
elif isinstance(axis, (integer_types, numpy.integer)):
axis = [axis]
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0:
axis = [int(axis)]
else:
axis = [int(a) for a in axis]
# compute the axis-wise mean
mean_input = mean(input, axis, keepdims=True)
# center the input
centered_input = input - mean_input
# return the mean sqr
v = mean((centered_input ** 2), axis, keepdims=keepdims)
v.name = 'var'
return v
@constructor
def std(input, axis=None, keepdims=False):
"""
Computes the standard deviation along the given axis(es) of a tensor `input`.
Parameters
----------
axis : None or int or (list of int) (see `Sum`)
Compute the standard deviation along this axis of the tensor.
None means all axes (like numpy).
keepdims : bool
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result will
broadcast correctly against the original tensor.
Notes
-----
It calls `var()` and `var()` uses the two-pass algorithm for more stable
results.
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
There exist other implementations that are even more stable, but probably
slower.
"""
ret = sqrt(var(input=input, axis=axis, keepdims=keepdims))
ret.name = 'std'
return ret
class Default(gof.Op):
"""
Takes an input x and a default value.
If the input is not None, a reference to it is returned.
If the input is None, a copy of the default value is returned instead.
The input and the default must have exactly the same type.
"""
view_map = {0: [0]}
__props__ = ()
def make_node(self, x, default):
x, default = as_tensor_variable(x), as_tensor_variable(default)
if x.type != default.type:
raise TypeError('Both default() arguments must have same type',
x, default)
return gof.Apply(self, [x, default], [default.type()])
def perform(self, node, inp, out_):
x, default = inp
out, = out_
if x is None:
# why copy? Theano can't yet understand out[0] being a view of
# either x or y, so we can be a view of x, but only a copy of y.
out[0] = default.copy()
else:
out[0] = x
default = Default()
setdefault = default # legacy
##########################
# Arithmetics
##########################
@_scal_elemwise
def maximum(x, y):
"""elemwise maximum. See max for the maximum in one tensor"""
# see decorator for function body
@_scal_elemwise
def minimum(x, y):
"""elemwise minimum. See min for the minimum in one tensor"""
# see decorator for function body
def div_proxy(x, y):
"""Proxy for either true_div or int_div, depending on types of x, y."""
f = scal.int_or_true_div(
as_tensor_variable(x).dtype in discrete_dtypes,
as_tensor_variable(y).dtype in discrete_dtypes)
if f is scal.int_div:
return int_div(x, y)
else:
return true_div(x, y)
def divmod(x, y):
"""elementvise divmod, using floor_div and mod_check"""
return floor_div(x, y), mod_check(x, y)
@_scal_elemwise
def add(a, *other_terms):
"""elementwise addition"""
# see decorator for function body
@_scal_elemwise
def sub(a, b):
"""elementwise subtraction"""
# see decorator for function body
@_scal_elemwise
def mul(a, *other_terms):
"""elementwise multiplication"""
# see decorator for function body
@_scal_elemwise
def true_div(a, b):
"""elementwise [true] division (inverse of multiplication)"""
# see decorator for function body
@_scal_elemwise
def int_div(a, b):
"""elementwise [floor] division (inverse of multiplication)"""
# see decorator for function body
# floor_div and int_div are the same thing
floor_div = int_div
def ceil_intdiv(a, b):
"""
Safely compute ceil(float_division(a, b)).
Works for all dtypes, but mostly useful when a and b are int.
"""
# If a and b are int with not many significant bits, we could
# cast them to float to avoid doing the modulo. We do not know if this
# is faster or not. But this is not safe for int64 as the cast will
# lose precision.
# e.g.: cast(cast(a, scalar.upcast(a, 'float32')) / b, scal.upcast(a, b))
# We cast for the case when a and b are uint*. Otherwise neq will
# force their upcast to int.
div = int_div(a, b)
ret = cast(neq(a % b, 0), div.dtype) + div
assert ret.dtype == scal.upcast(div.owner.inputs[0], div.owner.inputs[1])
return ret
def mod_check(x, y):
"""Make sure we do not try to use complex numbers."""
if ((as_tensor_variable(x).dtype in complex_dtypes or
as_tensor_variable(y).dtype in complex_dtypes)):
# Currently forbidden.
raise scal.Mod.complex_error
else:
return mod(x, y)
@_scal_elemwise
def mod(a, b):
"""elementwise modulo"""
# see decorator for function body
@_scal_elemwise
def pow(a, b):
"""elementwise power"""
# see decorator for function body
@_scal_elemwise
def clip(x, min, max):
"""
Clip x to be between min and max.
Notes
-----
When `x` is equal to the boundaries, the output is considered
to be `x`, so at these points, the gradient of the cost wrt the output
will be propagated to `x`, not to `min` nor `max`. In other words,
on these points, the gradient wrt `x` will be equal to the gradient wrt
the output, and the gradient wrt `min` and `max` will be zero.
"""
# see decorator for function body
# for grep: clamp, bound
pprint.assign(add, printing.OperatorPrinter('+', -2, 'either'))
pprint.assign(mul, printing.OperatorPrinter('*', -1, 'either'))
pprint.assign(sub, printing.OperatorPrinter('-', -2, 'left'))
pprint.assign(neg, printing.OperatorPrinter('-', 0, 'either'))
pprint.assign(true_div, printing.OperatorPrinter('/', -1, 'left'))
pprint.assign(int_div, printing.OperatorPrinter('//', -1, 'left'))
pprint.assign(pow, printing.OperatorPrinter('**', 1, 'right'))
##########################
# View Operations
##########################
def extract_constant(x, elemwise=True, only_process_constants=False):
"""
This function is basically a call to tensor.get_scalar_constant_value.
The main difference is the behaviour in case of failure. While
get_scalar_constant_value raises an TypeError, this function returns x,
as a tensor if possible. If x is a ScalarVariable from a
scalar_from_tensor, we remove the conversion. If x is just a
ScalarVariable, we convert it to a tensor with tensor_from_scalar.
"""
try:
x = get_scalar_constant_value(x,
elemwise,
only_process_constants)
except NotScalarConstantError:
pass
if ((isinstance(x, scal.ScalarVariable) or
isinstance(x, scal.sharedvar.ScalarSharedVariable))):
if x.owner and isinstance(x.owner.op, ScalarFromTensor):
x = x.owner.inputs[0]
else:
x = tensor_from_scalar(x)
return x
def transpose(x, axes=None):
"""
Reorder the dimensions of x. (Default: reverse them)
This is a macro around dimshuffle that matches the numpy.transpose function.
"""
if axes is None:
axes = list(range((x.ndim - 1), -1, -1))
ret = DimShuffle(x.broadcastable, axes, inplace=False)(x)
if x.name and axes == list(range((x.ndim - 1), -1, -1)):
ret.name = x.name + '.T'
return ret
def batched_dot(a, b):
"""
Compute the batched dot product of two variables:
batched_dot(a, b)[i] = dot(a[i], b[i])
Note that this batched_dot function does one of three things, in the
following sequence:
1. If either a or b is a vector, it returns the batched elementwise
product without calling the Theano BatchedDot op.
2. If both a and b have either 2 or 3 dimensions, it calls Theano's
BatchedDot op on a and b.
3. If either a or b has more than 3 dimensions, it calls Theano's
batched_tensordot function with appropriate axes. The
batched_tensordot function expresses high-dimensional batched
dot products in terms of batched matrix-matrix dot products, so
it may be possible to futherize optimize for performance.
"""
a, b = as_tensor_variable(a), as_tensor_variable(b)
if a.ndim == 0:
raise TypeError("a must have at least one (batch) axis")
elif b.ndim == 0:
raise TypeError("b must have at least one (batch) axis")
elif a.ndim == 1:
return a.dimshuffle(*([0] + ["x"] * (b.ndim - 1))) * b
elif b.ndim == 1:
return a * b.dimshuffle(*([0] + ["x"] * (a.ndim - 1)))
elif a.ndim > 3 or b.ndim > 3:
return batched_tensordot(
a, b, [[a.ndim - 1], [numpy.maximum(1, b.ndim - 2)]])
else:
# avoid circular import
return theano.tensor.blas.BatchedDot()(a, b)
def batched_tensordot(x, y, axes=2):
"""
Compute a batched tensordot product.
A hybrid of batched_dot and tensordot, this function computes the
tensordot product between the two tensors, by iterating over the
first dimension to perform a sequence of tensordots.
Parameters
----------
x : tensor
A Tensor with sizes e.g.: for 3D (dim1, dim3, dim2)
y : tensor
A Tensor with sizes e.g.: for 3D (dim1, dim2, dim4)
axes: int or array-like of length 2
If an integer, the number of axes to sum over.
If an array, it must have two array elements containing the axes to sum
over in each tensor.
If an integer i, it is converted to an array containing
the last i dimensions of the first tensor and the first
i dimensions of the second tensor (excluding the first
(batch) dimension):
axes = [list(range(a.ndim - i, b.ndim)), list(range(1,i+1))]
If an array, its two elements must contain compatible axes
of the two tensors. For example, [[1, 2], [2, 4]] means sum
over the 2nd and 3rd axes of a and the 3rd and 5th axes of b.
(Remember axes are zero-indexed!) The 2nd axis of a and the
3rd axis of b must have the same shape; the same is true for
the 3rd axis of a and the 5th axis of b.
Like tensordot, this function uses a series of dimshuffles and
reshapes to reduce the tensor dot product to a matrix or vector
dot product. Finally, it calls batched_dot to compute the result.
"""
return _tensordot_as_dot(x, y, axes, dot=batched_dot, batched=True)
def split(x, splits_size, n_splits, axis=0):
the_split = Split(n_splits)
return the_split(x, axis, splits_size)
class Split(Op):
"""Partition a `TensorVariable` along some axis.
Examples
--------
>>> x = vector()
>>> splits = lvector()
You have to declare right away how many split_points there will be.
>>> ra, rb, rc = split(x, splits, n_splits = 3, axis = 0)
>>> f = function([x, splits], [ra, rb, rc])
>>> a, b, c = f([0,1,2,3,4,5], [3, 2, 1])
a == [0,1,2]
b == [3, 4]
c == [5]
"""
len_splits = None
"""A Split instance will have this many outputs, and require that
the splits argument to `perform` have exactly this many elements.
"""
__props__ = ("len_splits",)
def __init__(self, len_splits):
self.len_splits = int(len_splits)
def __str__(self):
return self.__class__.__name__ + "{%s}" % self.len_splits
def make_node(self, x, axis, splits):
"""WRITEME"""
x = as_tensor_variable(x)
axis = as_tensor_variable(axis)
splits = as_tensor_variable(splits)
if splits.type not in int_vector_types:
raise TypeError('splits must have type tensor.lvector',
splits.type)
if axis.type not in int_types:
raise TypeError('axis must have type lscalar', axis.type)
# # The following lines are necessary if we allow splits of zero
# if isinstance(axis, gof.Constant):
# x = unbroadcast(x, int(axis.data))
# else:
# x = unbroadcast(x, *range(x.type.ndim))
inputs = [x, axis, splits]
outputs = [x.type() for i in xrange(self.len_splits)]
return Apply(self, inputs, outputs)
def perform(self, node, inputs, outputs):
"""WRITEME"""
x, axis, splits = inputs
# in python 2.4, x.shape[numpy.asarray(1)] don't work.
if sys.version_info[0:2] == (2, 4) and axis.size == 1:
axis = int(axis)
try:
len_along_axis = x.shape[axis]
except:
raise ValueError('Split.perform() with axis=(%s) is invalid'
' for x.shape==(%s)'
% (axis, x.shape))
if len(splits) != self.len_splits:
raise ValueError('In Split.perform(), len(splits) != len_splits.',
(len(splits), self.len_splits))
if numpy.sum(splits) != len_along_axis:
raise ValueError('The splits sum to %s, expected %s' %
(numpy.sum(splits), len_along_axis))
if python_any([nb < 0 for nb in splits]):
raise ValueError('Split: you tried to make an ndarray with a '
'negative number of elements.')
# Checking is done, let's roll the splitting algorithm!
# Basically we step along the given axis of x, extracting
# subtensors of size splits[i] as we go along.
general_key = [slice(None, None, None) for s in x.shape]
lower_idx = 0
for i in xrange(self.len_splits):
upper_idx = lower_idx + splits[i]
general_key[axis] = slice(lower_idx, upper_idx, None)
outputs[i][0] = x.__getitem__(tuple(general_key)).copy()
lower_idx = upper_idx
def infer_shape(self, node, in_shapes):
axis = node.inputs[1]
splits = node.inputs[2]
shp_x, shp_axis, shp_splits = in_shapes
out_shapes = []
for i in xrange(self.len_splits):
temp = as_tensor_variable(shp_x)
temp = theano.tensor.subtensor.set_subtensor(temp[axis], splits[i])
temp = [temp[i] for i in xrange(len(shp_x))]
out_shapes.append(temp)
return out_shapes
def grad(self, inputs, g_outputs):
"""Join the gradients along the axis that was used to split x."""
x, axis, n = inputs
outputs = self(*inputs, **dict(return_list=True))
# If all the output gradients are disconnected, then so are the inputs
if python_all([isinstance(g.type, DisconnectedType)
for g in g_outputs]):
return [DisconnectedType()(),
grad_undefined(self, 1, axis),
grad_undefined(self, 2, n)]
# Else, we have to make them zeros before joining them
new_g_outputs = []
for o, g in zip(outputs, g_outputs):
if isinstance(g.type, DisconnectedType):
new_g_outputs.append(o.zeros_like())
else:
new_g_outputs.append(g)
return [join(axis, *new_g_outputs),
grad_undefined(self, 1, axis),
grad_undefined(self, 2, n)]
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return [None for i in self.len_splits]
return self.make_node(eval_points[0], *inputs[1:]).outputs
def addbroadcast(x, *axes):
"""
Make the input broadcastable in the specified axes.
For example, addbroadcast(x, 0) will make the first dimension of
x broadcastable. When performing the function, if the length of
x along that dimension is not 1, a ValueError will be raised.
We apply the opt here not to pollute the graph especially during
the gpu optimization
Parameters
----------
x : tensor_like
Input theano tensor.
axis : an int or an iterable object such as list or tuple of int values
The dimension along which the tensor x should be broadcastable.
If the length of x along these dimensions is not 1, a ValueError will
be raised.
Returns
-------
tensor
A theano tensor, which is broadcastable along the specified dimensions.
"""
rval = Rebroadcast(*[(axis, True) for axis in axes])(x)
return theano.tensor.opt.apply_rebroadcast_opt(rval)
def unbroadcast(x, *axes):
"""
Make the input impossible to broadcast in the specified axes.
For example, addbroadcast(x, 0) will make the first dimension
of x broadcastable. When performing the function, if the length
of x along that dimension is not 1, a ValueError will be raised.
We apply the opt here not to pollute the graph especially during
the gpu optimization
Parameters
----------
x : tensor_like
Input theano tensor.
axis : an int or an iterable object such as list or tuple of int values
The dimension along which the tensor x should be unbroadcastable.
If the length of x along these dimensions is not 1, a ValueError will
be raised.
Returns
-------
tensor
A theano tensor, which is unbroadcastable along the specified dimensions.
"""
rval = Rebroadcast(*[(axis, False) for axis in axes])(x)
return theano.tensor.opt.apply_rebroadcast_opt(rval)
def patternbroadcast(x, broadcastable):
"""
Make the input adopt a specific broadcasting pattern.
Broadcastable must be iterable. For example,
patternbroadcast(x, (True, False)) will make the first
dimension of x broadcastable and the second dimension
not broadcastable, so x will now be a row.
We apply the opt here not to pollute the graph especially during the gpu
optimization.
Parameters
----------
x : tensor_like
Input theano tensor.
broadcastable : an iterable object such as list or tuple of bool values
A set of boolean values indicating whether a dimension should be
broadcastable or not. If the length of x along these dimensions is
not 1, a ValueError will be raised.
Returns
-------
tensor
A theano tensor, which is unbroadcastable along the specified dimensions.
"""
rval = Rebroadcast(*[(i, broadcastable[i])
for i in xrange(len(broadcastable))])(x)
return theano.tensor.opt.apply_rebroadcast_opt(rval)
class Join(Op):
"""
Concatenate multiple `TensorVariable`s along some axis.
The axis must be given as first argument. All tensors must have the same
shape along all dimensions other than this axis.
Of course, TensorVariable instances do not have a shape, so this error
cannot be caught until runtime. See `perform()`.
See Also
--------
stack : For joins involving scalar values
Examples
--------
>>> x, y, z = tensor.matrix(), tensor.matrix(), tensor.matrix()
>>> u = tensor.vector()
>>> r = join(0, x, y, z)
>>> c = join(1, x, y, z)
>>> join(2, x, y, z) # WRONG: the axis has to be an index into the shape
>>> join(0, x, u) # WRONG: joined tensors must have the same rank
"""
check_input = False
__props__ = ()
def make_node(self, *axis_and_tensors):
"""
Parameters
----------
axis: an Int or integer-valued Variable
tensors
A variable number (but not zero) of tensors to
concatenate along the specified axis. These tensors must have
the same shape along all dimensions other than this axis.
Returns
-------
A symbolic Variable
It has the same ndim as the input tensors, and the most inclusive
dtype.
"""
axis, tensors = axis_and_tensors[0], axis_and_tensors[1:]
if not tensors:
raise ValueError('Cannot join an empty list of tensors')
as_tensor_variable_args = [as_tensor_variable(x) for x in tensors]
dtypes = [x.type.dtype for x in as_tensor_variable_args]
out_dtype = scal.upcast(*dtypes)
def output_maker(bcastable):
return tensor(dtype=out_dtype, broadcastable=bcastable)
return self._make_node_internal(
axis, tensors, as_tensor_variable_args, output_maker)
def _make_node_internal(self, axis, tensors,
as_tensor_variable_args, output_maker):
if not python_all(targs.type.ndim for targs
in as_tensor_variable_args):
raise TypeError('Join cannot handle arguments of dimension 0.'
' For joining scalar values, see @stack')
# Handle single-tensor joins immediately.
if len(as_tensor_variable_args) == 1:
bcastable = list(as_tensor_variable_args[0].type.broadcastable)
else:
# When the axis is fixed, a dimension should be
# broadcastable if at least one of the inputs is
# broadcastable on that dimension (see justification below),
# except for the axis dimension.
# Initialize bcastable all false, and then fill in some trues with
# the loops.
bcastable = [False] * len(
as_tensor_variable_args[0].type.broadcastable)
ndim = len(bcastable)
# Axis can also be a constant
if not isinstance(axis, integer_types):
try:
# Note : `get_scalar_constant_value` returns a ndarray not
# an int
axis = int(get_scalar_constant_value(axis))
except NotScalarConstantError:
pass
if isinstance(axis, integer_types):
# Basically, broadcastable -> length 1, but the
# converse does not hold. So we permit e.g. T/F/T
# joins, and if they fail at runtime they fail, but if
# they don't then it means that the argument where
# that broadcastable flag was False had length 1 along
# this dimension, and therefore this dimension should
# be broadcastable for the output.
if axis < -ndim:
raise IndexError("Join axis %d out of bounds [0, %d)" %
(axis, ndim))
if axis < 0:
axis += ndim
for x in as_tensor_variable_args:
for current_axis, bflag in enumerate(x.type.broadcastable):
# Constant negative axis can no longer be negative at
# this point. It safe to compare this way.
if current_axis == axis:
continue
if bflag:
bcastable[current_axis] = True
try:
bcastable[axis] = False
except IndexError:
raise ValueError('Join argument "axis" is out of range'
' (given input dimensions)')
else:
# When the axis may vary, no dimension can be guaranteed to be
# broadcastable.
bcastable = [False] * len(
as_tensor_variable_args[0].type.broadcastable)
if not python_all([x.ndim == len(bcastable)
for x in as_tensor_variable_args[1:]]):
raise TypeError("Join() can only join tensors with the same "
"number of dimensions.")
inputs = [as_tensor_variable(axis)] + list(as_tensor_variable_args)
if inputs[0].type not in int_types:
raise TypeError('Axis could not be cast to an integer type',
axis, inputs[0].type, int_types)
outputs = [output_maker(bcastable)]
node = Apply(self, inputs, outputs)
return node
def perform(self, node, axis_and_tensors, out_):
out, = out_
axis, tensors = axis_and_tensors[0], axis_and_tensors[1:]
ndim = tensors[0].ndim
if axis < -ndim:
raise IndexError("Join axis %d out of bounds [0, %d)" %
(axis, ndim))
out[0] = theano._asarray(numpy.concatenate(tensors, axis=axis),
dtype=node.outputs[0].type.dtype)
def c_code_cache_version(self):
return (3,)
def c_code(self, node, name, inputs, outputs, sub):
axis, tensors = inputs[0], inputs[1:]
input_1 = tensors[0]
l = len(tensors)
out, = outputs
fail = sub['fail']
adtype = node.inputs[0].type.dtype_specs()[1]
code = """
PyObject* list = PyList_New(%(l)s);
""" % locals()
for i, inp in enumerate(tensors):
code += """
Py_INCREF(%(inp)s);
PyList_SetItem(list, %(i)s, (PyObject*)%(inp)s);
""" % locals()
code += """
//PyObject* PyArray_Concatenate(PyObject* obj, int axis)
int axis = ((%(adtype)s *)PyArray_DATA(%(axis)s))[0];
int ndim = PyArray_NDIM(%(input_1)s);
if( axis < -ndim ){
PyErr_Format(PyExc_IndexError,
"Join axis %%d out of bounds [0, %%d)", axis, ndim);
%(fail)s
}
Py_XDECREF(%(out)s);
%(out)s = (PyArrayObject *)PyArray_Concatenate(list, axis);
Py_DECREF(list);
if(!%(out)s){
%(fail)s
}
""" % locals()
return code
def R_op(self, inputs, eval_points):
if None in eval_points[1:]:
return [None]
return self.make_node(inputs[0], *eval_points[1:]).outputs
def grad(self, axis_and_tensors, grads):
""" The gradient wrt a join op is a `Split`, used to partition
the gradient along the `axis` which was used for joining.
"""
gz, = grads
axis, tensors = axis_and_tensors[0], axis_and_tensors[1:]
rval = [grad_undefined(self, 0, axis)]
dtypes = [as_tensor_variable(x).type.dtype for x in tensors]
out_dtype = scal.upcast(*dtypes)
if 'float' in out_dtype or 'complex' in out_dtype:
# assume that this is differentiable
split = Split(len(tensors))
split_gz = split(gz, axis, stack([shape(x)[axis]
for x in tensors]))
# If there is only one split, it might not be in a list.
if not isinstance(split_gz, list):
split_gz = [split_gz]
# Split.make_node isn't always able to infer the right
# broadcast. As the grad need to keep the information,
# read it if needed.
split_gz = [patternbroadcast(g, t.broadcastable)
for t, g in zip(tensors, split_gz)]
rval = rval + split_gz
else:
# the output has integer type, so the gradient through it
# is 0
rval = rval + [tensor.zeros_like(dtype=config.floatX)
for tensor in tensors]
return rval
def infer_shape(self, node, ishapes):
# ishapes[0] contains the size of the axis on which we join
# Join op should get at least one input to join
assert len(ishapes) > 1
n_dim = len(ishapes[1])
for shp in ishapes[1:]:
assert shp is not None
assert len(shp) == n_dim
# The joining dimension could be negative, but we need it to be
# in [0, n_dim) in the loop below.
# An axis < -n_dim or >= ndim would be invalid, but this is
# not checked here. An Assert op would be a way of addressing that,
# but it may disrupt optimizations.
join_dim = switch(ge(node.inputs[0], 0),
node.inputs[0],
node.inputs[0] + n_dim)
out_shapes = []
for dim in xrange(n_dim):
# we have to deal with 2 possible cases in here :
# a) we are dealing with the dimension for which we join
# (called t_side from true side of the if, where the if
# compares current dimension with the joining dimension)
# b) a non joining dimension ( in which maybe a symbolic
# assertion can be used to make sure all tensors have
# the same number of elements on this non-joined dimension
# this is f_side
# initialize
t_side = ishapes[1][dim]
f_side = ishapes[1][dim]
# loop over tensors and sum for the joining dimension
for shp in ishapes[2:]:
t_side = t_side + shp[dim]
# return the dimensions found
out_shapes.append(switch(eq(dim, join_dim),
t_side, f_side))
return [tuple(out_shapes)]
"""
Convenience function to concatenate `TensorType`s along the given axis.
Parameters
----------
tensors : list of tensors (or list-like)
A list of tensors to be concatenated along the given axis.
The shapes of the tensors to be concatenated must be all
identical, except in the dimension (`axis`) on which they are to
be joined.
axis : int (symbolic or literal)
On which dimension should the tensors be joined? The `axis`
must be a valid index into the shape of the tensors to be
concatenated.
The `axis` parameter may either be an integer or an object that
can be converted to a scalar using `as_scalar`(`axis`). In the
former case, the axis is fixed at construction, while in the
latter it may vary over time depending on the value of the
`axis` variable.
"""
join = Join()
pprint.assign(lambda pstate, r: r.owner and isinstance(r.owner.op, Join),
printing.FunctionPrinter('join'))
def roll(x, shift, axis=None):
"""
Convenience function to roll TensorTypes along the given axis.
Syntax copies numpy.roll function.
Parameters
----------
x : tensor_like
Input tensor.
shift : int (symbolic or literal)
The number of places by which elements are shifted.
axis : int (symbolic or literal), optional
The axis along which elements are shifted. By default, the array
is flattened before shifting, after which the original
shape is restored.
Returns
-------
tensor
Output tensor, with the same shape as ``x``.
"""
if axis is None:
if x.ndim > 1:
y = x.flatten()
return roll(y, shift, axis=0).reshape(x.shape)
else:
axis = 0
# A slice of all elements in a dimension ':'
allslice = slice(None)
# List of slices describing the front half [:, :, shift:, :]
front_slice = slice(-shift, None)
front_list = ([allslice] * axis + [front_slice] +
[allslice] * (x.ndim - axis - 1))
# List of slices describing the back half [:, :, :shift, :]
end_slice = slice(0, -shift)
end_list = ([allslice] * axis + [end_slice] +
[allslice] * (x.ndim - axis - 1))
return join(axis,
x.__getitem__(tuple(front_list)),
x.__getitem__(tuple(end_list)))
@constructor
def shape_padleft(t, n_ones=1):
"""Reshape `t` by left-padding the shape with `n_ones` 1s.
See Also
--------
shape_padaxis
shape_padright
Dimshuffle
"""
_t = as_tensor_variable(t)
pattern = ['x'] * n_ones + [i for i in xrange(_t.type.ndim)]
return DimShuffle(_t.broadcastable, pattern)(_t)
@constructor
def shape_padright(t, n_ones=1):
"""Reshape `t` by right-padding the shape with `n_ones` 1s.
See Also
--------
shape_padaxis
shape_padleft
Dimshuffle
"""
_t = as_tensor_variable(t)
pattern = [i for i in xrange(_t.type.ndim)] + ['x'] * n_ones
return DimShuffle(_t.broadcastable, pattern)(_t)
@constructor
def shape_padaxis(t, axis):
"""Reshape `t` by inserting 1 at the dimension `axis`.
Example
-------
>>> tensor = theano.tensor.tensor3()
>>> theano.tensor.shape_padaxis(tensor, axis=0)
DimShuffle{x,0,1,2}.0
>>> theano.tensor.shape_padaxis(tensor, axis=1)
DimShuffle{0,x,1,2}.0
>>> theano.tensor.shape_padaxis(tensor, axis=3)
DimShuffle{0,1,2,x}.0
>>> theano.tensor.shape_padaxis(tensor, axis=-1)
DimShuffle{0,1,2,x}.0
See Also
--------
shape_padleft
shape_padright
Dimshuffle
"""
_t = as_tensor_variable(t)
ndim = _t.ndim + 1
if not -ndim <= axis < ndim:
msg = 'axis {0} is out of bounds [-{1}, {1})'.format(axis, ndim)
raise IndexError(msg)
if axis < 0:
axis += ndim
pattern = [i for i in xrange(_t.type.ndim)]
pattern.insert(axis, 'x')
return DimShuffle(_t.broadcastable, pattern)(_t)
@constructor
def stack(*tensors, **kwargs):
"""Stack tensors in sequence on given axis (default is 0).
Take a sequence of tensors and stack them on given axis to make a single
tensor. The size in dimension `axis` of the result will be equal to the number
of tensors passed.
Note: The interface stack(*tensors) is deprecated, you should use
stack(tensors, axis=0) insted.
Parameters
----------
tensors : list or tuple of tensors
A list of tensors to be stacked.
axis : int
The index of the new axis. Default value is 0.
Examples
--------
>>> a = theano.tensor.scalar()
>>> b = theano.tensor.scalar()
>>> c = theano.tensor.scalar()
>>> x = theano.tensor.stack([a, b, c])
>>> x.ndim # x is a vector of length 3.
1
>>> a = theano.tensor.tensor4()
>>> b = theano.tensor.tensor4()
>>> c = theano.tensor.tensor4()
>>> x = theano.tensor.stack([a, b, c])
>>> x.ndim # x is a 5d tensor.
5
>>> rval = x.eval(dict((t, np.zeros((2, 2, 2, 2))) for t in [a, b, c]))
>>> rval.shape # 3 tensors are stacked on axis 0
(3, 2, 2, 2, 2)
>>> x = theano.tensor.stack([a, b, c], axis=3)
>>> x.ndim
5
>>> rval = x.eval(dict((t, np.zeros((2, 2, 2, 2))) for t in [a, b, c]))
>>> rval.shape # 3 tensors are stacked on axis 3
(2, 2, 2, 3, 2)
>>> x = theano.tensor.stack([a, b, c], axis=-2)
>>> x.ndim
5
>>> rval = x.eval(dict((t, np.zeros((2, 2, 2, 2))) for t in [a, b, c]))
>>> rval.shape # 3 tensors are stacked on axis -2
(2, 2, 2, 3, 2)
"""
# ---> Remove this when moving to the new interface:
if not tensors and not kwargs:
raise Exception('theano.tensor.stack(tensors, axis) must have at least'
' one parameter')
if not kwargs and not isinstance(tensors[0], (list, tuple)):
warnings.warn('stack(*tensors) interface is deprecated, use'
' stack(tensors, axis=0) instead.', DeprecationWarning,
stacklevel=3)
axis = 0
elif 'tensors' in kwargs:
tensors = kwargs['tensors']
if 'axis' in kwargs:
axis = kwargs['axis']
else:
axis = 0
else:
if len(tensors) == 2:
axis = tensors[1]
elif 'axis' in kwargs:
axis = kwargs['axis']
else:
axis = 0
tensors = tensors[0]
# <--- Until here.
if len(tensors) == 0:
raise Exception('tensors is empty. You should at least provide one'
' tensor to theano.tensor.stack(tensors, axis).')
# If all tensors are scalars of the same type, call make_vector.
# It makes the graph simpler, by not adding DimShuffles and Rebroadcasts
# This should be an optimization!
# Doing it here make the graph less canonicalized
# (more type need to be understood by all optimization)
# And DebugMode can't detect error in this code as it is not in an
# optimization.
# See ticket #660
if numpy.all(
[ # in case there is direct int in tensors.
isinstance(t, (numpy.number, float, integer_types,
python_complex)) or
(isinstance(t, Variable) and
isinstance(t.type, TensorType) and
t.ndim == 0)
for t in tensors]):
# in case there is direct int
tensors = list(map(as_tensor_variable, tensors))
dtype = scal.upcast(*[i.dtype for i in tensors])
return theano.tensor.opt.MakeVector(dtype)(*tensors)
return join(axis, *[shape_padaxis(t, axis) for t in tensors])
@constructor
def concatenate(tensor_list, axis=0):
"""Alias for `join`(axis, *tensor_list).
This function is similar to `join`, but uses the signature of
numpy's concatenate function.
Raises
------
TypeError
The tensor_list must be a tuple or list.
"""
# Check someone did not make the common mistake to do something like:
# c = concatenate(x, y)
# instead of
# c = concatenate((x, y))
if not isinstance(tensor_list, (tuple, list)):
raise TypeError(
"The 'tensors' argument must be either a tuple "
"or a list, make sure you did not forget () or [] around "
"arguments of concatenate.", tensor_list)
return join(axis, *tensor_list)
def get_vector_length(v):
"""Return the run-time length of a symbolic vector.
Parameters
----------
v
A rank-1 TensorType variable.
Raises
------
TypeError
`v` hasn't the proper type.
ValueError
No special case applies, the length is not known.
In general this is not possible, but for a number of special cases
the length can be determined at compile / graph-construction time.
This function implements these special cases.
"""
v = as_tensor_variable(v)
if v.ndim != 1:
raise TypeError("argument must be symbolic vector, got '%s'" %
v)
if v.type.broadcastable[0]:
return 1
if isinstance(v, gof.Constant) and v.type.ndim == 1:
return len(v.data)
if v.owner and isinstance(v.owner.op, theano.tensor.opt.MakeVector):
return len(v.owner.inputs)
if v.owner and isinstance(v.owner.op, Shape):
return v.owner.inputs[0].type.ndim
# If we take a slice, we know how many elements it will result in
if ((v.owner and
isinstance(v.owner.op, theano.tensor.subtensor.Subtensor) and
isinstance(v.owner.op.idx_list[0], slice) and
v.owner.inputs[0].owner and
isinstance(v.owner.inputs[0].owner.op, theano.compile.ops.Shape))):
start = extract_constant(theano.tensor.subtensor.get_idx_list(
v.owner.inputs, v.owner.op.idx_list)[0].start)
stop = extract_constant(theano.tensor.subtensor.get_idx_list(
v.owner.inputs, v.owner.op.idx_list)[0].stop)
step = extract_constant(theano.tensor.subtensor.get_idx_list(
v.owner.inputs, v.owner.op.idx_list)[0].step)
ndim = v.owner.inputs[0].owner.inputs[0].ndim
types = (numbers.Integral, numpy.integer)
if start is None:
start = 0
elif isinstance(start, types) and start < 0:
start += ndim
if start < 0:
start = 0
if stop is None:
stop = ndim
elif isinstance(stop, types):
if stop > ndim:
stop = ndim
elif stop < 0:
stop += ndim
if step is None:
step = 1
if (isinstance(stop, types) and
isinstance(start, types) and
isinstance(step, types) and
start >= 0 and stop >= 0 and
step > 0 and stop >= start):
return (stop - start - 1) // step + 1
if isinstance(v, Variable):
msg = theano.printing.debugprint(v, file='str')
else:
msg = str(v)
raise ValueError("length not known: %s" % msg)
@constructor
def horizontal_stack(*args):
"""
Horizontally stack two L{TensorType}s.
Stack two L{TensorType}s along the second axis (column wise). These
L{TensorType}s must have the same shape along all dimensions but the
second.
"""
# Note: 'horizontal_stack' and 'vertical_stack' do not behave exactly like
# Numpy's hstack and vstack functions. This is intended, because Numpy's
# functions have potentially confusing/incoherent behavior (try them on 1D
# arrays). If this is fixed in a future version of Numpy, it may be worth
# trying to get closer to Numpy's way of doing things. In the meantime,
# better keep different names to emphasize the implementation divergences.
assert len(args) >= 2
for arg in args:
assert arg.type.ndim == 2
return concatenate(args, axis=1)
@constructor
def vertical_stack(*args):
assert len(args) >= 2
for arg in args:
assert arg.type.ndim == 2
return concatenate(args, axis=0)
class Reshape(Op):
"""Perform a reshape operation of the input x to the new shape shp.
The number of dimensions to which to reshape to (ndim) must be
known at graph build time.
"""
view_map = {0: [0]} # output 0 is potentially aliased to inputs [0]
_f16_ok = True
check_input = False
__props__ = ("ndim",)
# name does not participate because it doesn't affect computations
def __init__(self, ndim, name=None):
self.ndim = ndim
self.name = name
def __str__(self):
return '%s{%s}' % (self.__class__.__name__, self.ndim)
def make_node(self, x, shp):
x = as_tensor_variable(x)
shp_orig = shp
shp = as_tensor_variable(shp, ndim=1)
if not (shp.dtype.startswith('int') or
(isinstance(shp, TensorConstant) and shp.data.size == 0)):
# It raises an error if shp is not of integer type,
# except when shp is constant and empty
# (in this case, shp.dtype does not matter anymore).
raise TypeError("Shape must be integers", shp, shp.dtype)
assert shp.ndim == 1
if isinstance(shp, TensorConstant):
bcast = [s == 1 for s in shp.data]
return gof.Apply(self, [x, shp], [tensor(x.type.dtype, bcast)])
else:
bcasts = [False] * self.ndim
shp_list = shp_orig
if hasattr(shp_orig, "ndim") and shp_orig.ndim == 0:
shp_list = [shp_orig]
for index in xrange(self.ndim):
y = shp_list[index]
y = as_tensor_variable(y)
# Try to see if we can infer that y has a constant value of 1.
# If so, that dimension should be broadcastable.
try:
bcasts[index] = (
hasattr(y, 'get_scalar_constant_value') and
y.get_scalar_constant_value() == 1)
except NotScalarConstantError:
pass
return gof.Apply(self, [x, shp], [tensor(x.type.dtype, bcasts)])
def perform(self, node, inp, out_):
x, shp = inp
out, = out_
if (len(shp) != self.ndim):
raise ValueError('shape argument to Reshape.perform has incorrect'
' length %i'
', should be %i' % (len(shp), self.ndim), shp)
try:
out[0] = numpy.reshape(x, shp)
except Exception:
raise ValueError('Cannot reshape input of shape %s to shape %s' %
(x.shape, shp))
if not out[0].flags.aligned:
raise RuntimeError("numpy.reshape returned a not aligned tensor."
" NumPy versions 1.6.2, 1.7.0 and 1.7.1 have"
" this problem for some input shape/new shape"
" combinations. Use another NumPy version."
" Input shape: %s, input stride: %s,"
" new_shape: %s, new_strides: %s." % (
x.shape, x.strides, shp, out[0].strides))
def connection_pattern(self, node):
return [[True], [False]]
def grad(self, inp, grads):
x, shp = inp
g_out, = grads
return [reshape(g_out, shape(x), ndim=x.ndim),
DisconnectedType()()]
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return [None]
return self(eval_points[0], *inputs[1:], **dict(return_list=True))
def infer_shape(self, node, ishapes):
# inputs[1] can contain at most one value of '-1', meaning the actual
# shape of the output will be automatically computed by reshape, so
# that the total number of elements stays the same.
# TODO: Maybe put that formula here?
# It's not trivial, because we would have to check if the product of
# all the non-minus-one shapes is a divisor of the product of the
# original shapes.
# The following expression leads to cycles in feature_shape,
# because it tries to replace the Shape_i node by the switch
# statement, which depends on Shape_i.
# return [tuple([switch(eq(node.inputs[1][i], -1),
# theano.tensor.opt.Shape_i(i)(node.outputs[0]),
# node.inputs[1][i])
# for i in xrange(self.ndim)]
# )]
# Here, we only simplify if the shape (node.inputs[1]) is a constant,
# ideally it would suffice to check that it is always non-negative.
# If current variable is a scalar and its dimensionality should
# change to self.ndim, then use size 1 for all new dimensions.
if len(ishapes[0]) == 0:
return [(1,) * self.ndim]
requ = node.inputs[1]
if isinstance(requ, theano.tensor.TensorConstant):
requ = list(requ.data)
requ_part = [ele for ele in requ if ele != -1]
crit = len(requ) - len(requ_part)
if crit == 1 and len(requ_part) > 0:
missing = mul(*ishapes[0]) // mul(*requ_part)
for i, ele in enumerate(requ):
if ele == -1:
requ[i] = missing
elif crit == 1: # we reshape to -1
requ = [mul(*ishapes[0])] if ishapes[0] else [1]
elif crit > 1:
raise ValueError('shape argument to Reshape.perform'
' must have at most one entry equal to -1')
return [requ]
else:
new_dims = [node.inputs[1][i] for i in xrange(self.ndim)]
# since new_dims has one negative value (-1), the
# multiplication of all values should be negated
# to give a positive value.
# To avoid optimization complexity, we avoid checking
# for the case when there are two or more '-1' values.
return [tuple([switch(eq(new_dims[i], -1),
theano.tensor.mul(*ishapes[0]) //
(-theano.tensor.mul(*new_dims)),
new_dims[i])
for i in xrange(self.ndim)])]
def c_code_cache_version(self):
return (6,)
def c_code(self, node, name, inputs, outputs, sub):
if isinstance(node.inputs[0], TensorVariable):
x, shp = inputs
z, = outputs
new_ndim = self.ndim
sdtype = node.inputs[1].type.dtype_specs()[1]
fail = sub['fail']
return """
assert (PyArray_NDIM(%(shp)s) == 1);
npy_intp new_dims[%(new_ndim)s];
PyArray_Dims newshape;
newshape.ptr = new_dims;
newshape.len = %(new_ndim)s;
for (int ii = 0; ii < %(new_ndim)s; ++ii)
{
// -- We do not want an explicit cast here. the shp can be any
// -- int* dtype. The compiler will explicitly upcast it, but
// -- will err if this will downcast. This could happen if the
// -- user pass an int64 dtype, but npy_intp endup being int32.
new_dims[ii] = ((%(sdtype)s*)(
PyArray_BYTES(%(shp)s) +
ii * PyArray_STRIDES(%(shp)s)[0]))[0];
}
Py_XDECREF(%(z)s);
%(z)s = (PyArrayObject *) PyArray_Newshape(%(x)s, &newshape,
NPY_CORDER);
if (!%(z)s)
{
//The error message should have been set by PyArray_Newshape
%(fail)s;
}
if (!PyArray_ISALIGNED(%(z)s)) {
PyErr_Format(
PyExc_RuntimeError,
"PyArray_Newshape returned an object that isn't aligned!"
" NumPy versions 1.6.2, 1.7.0 and 1.7.1 have"
" this problem for some input shape/new shape"
" combinations. Use another NumPy version.");
%(fail)s;
}
""" % locals()
else:
return Op.c_code(self, node, name, inputs, outputs, sub)
def reshape(x, newshape, ndim=None, name=None):
if ndim is None:
newshape = as_tensor_variable(newshape)
if newshape.ndim != 1:
raise TypeError(
"New shape in reshape must be a vector or a list/tuple of"
" scalar. Got %s after conversion to a vector." % newshape)
try:
ndim = get_vector_length(newshape)
except ValueError:
raise ValueError(
"The length of the provided shape (%s) cannot "
"be automatically determined, so Theano is not able "
"to know what the number of dimensions of the reshaped "
"variable will be. You can provide the 'ndim' keyword "
"argument to 'reshape' to avoid this problem." % newshape)
op = Reshape(ndim, name)
rval = op(x, newshape)
return rval
class Flatten(Op):
"""
Flatten a tensor.
Flattens a tensor to `outdim` dimensions by preserving the leading
outdim - 1 shape components.
.. note:: The interface Flatten(Op) is deprecated, you should use flatten.
"""
view_map = {0: [0]}
check_input = False
__props__ = ("outdim",)
def __init__(self, outdim=1):
warnings.warn(
"Flatten class is deprecated, "
"please use flatten method instead.",
DeprecationWarning,
stacklevel=4)
self.outdim = int(outdim)
def __str__(self):
return '%s{%s}' % (self.__class__.__name__, self.outdim)
def make_node(self, x):
t_x = as_tensor_variable(x)
if self.outdim < 1 or (x.ndim and self.outdim > x.ndim):
raise ValueError('invalid output ndimensions (%i) for tensor of '
'rank %i' % (self.outdim, t_x.ndim))
# Infer the broadcastable pattern of the output. For every dimension
# unaffected by the flatten, the broadcast flag should be unchanged.
# For the dimension resulting from the collapse of other dimensions,
# it should be broadcastable iff all the collapsed dimensions were
# broadcastable.
bcast_kept_dims = x.broadcastable[:self.outdim - 1]
bcast_new_dim = python_all(x.broadcastable[self.outdim - 1:])
broadcastable = bcast_kept_dims + (bcast_new_dim,)
return gof.Apply(self, [t_x], [tensor(x.type.dtype,
broadcastable)])
def perform(self, node, inp, out_):
x, = inp
out, = out_
outdim = self.outdim
if outdim == 1:
try:
out[0] = x.reshape(x.size)
except AttributeError:
out[0] = x.reshape((numpy.prod(x.shape),))
elif outdim == len(x.shape):
out[0] = x
else:
newshape = (x.shape[:outdim - 1] +
(numpy.prod(x.shape[outdim - 1:]),))
out[0] = x.reshape(newshape)
def infer_shape(self, node, in_shapes):
in_shp, = in_shapes
part1 = in_shp[:self.outdim - 1]
part2 = in_shp[self.outdim - 1:]
if len(part2) > 1:
part2 = (prod(part2, dtype='int64'),)
elif len(part2) == 1:
# We do not want to force an upcast of part2 if its length is 1
pass
else:
if len(in_shp) == 0 and self.outdim == 1:
part2 = (1,)
else:
raise ValueError('invalid output ndimensions (%i) for tensor '
'of rank %i' % (self.outdim, len(in_shp)))
out_shape = (part1 + part2)
return [out_shape]
def grad(self, inp, grads):
x, = inp
g_out, = grads
return [reshape(g_out, shape(x), x.ndim)]
def R_op(self, inputs, eval_points):
if None in eval_points:
return [None]
return self.make_node(*eval_points).outputs
def c_code_cache_version(self):
return (1, 1)
def c_code(self, node, name, inputs, outputs, sub):
x, = inputs
out, = outputs
outdim = self.outdim
fail = sub['fail']
return """
if (%(outdim)s == PyArray_NDIM(%(x)s))
{
Py_XDECREF(%(out)s);
Py_XINCREF(%(x)s);
%(out)s = %(x)s;
}
else
{
Py_XDECREF(%(out)s);
if (%(outdim)s == 1)
{
npy_intp size = PyArray_SIZE(%(x)s);
PyArray_Dims newshape;
newshape.ptr = &size;
newshape.len = 1;
%(out)s = (PyArrayObject*)PyArray_Newshape(%(x)s,
&newshape,
NPY_CORDER);
}
else
{
npy_intp *oldshape = PyArray_DIMS(%(x)s);
npy_intp newshape_dims[%(outdim)s];
int i;
for (i = 0; i < %(outdim)s - 1; ++i)
newshape_dims[i] = oldshape[i];
newshape_dims[i] = 1;
for (int j = %(outdim)s - 1; j < PyArray_NDIM(%(x)s); ++j)
newshape_dims[i] *= oldshape[j];
PyArray_Dims newshape;
newshape.ptr = newshape_dims;
newshape.len = %(outdim)s;
%(out)s = (PyArrayObject*)PyArray_Newshape(%(x)s,
&newshape,
NPY_CORDER);
}
}
if (!%(out)s)
{
//The error message should have been set by
// PyArray_Newshape
%(fail)s;
}
if (!PyArray_ISALIGNED(%(out)s)) {
PyErr_Format(
PyExc_RuntimeError,
"PyArray_Newshape returned an object that isn't"
" aligned! NumPy versions 1.6.2, 1.7.0 and 1.7.1 have"
" this problem for some input shape/new shape"
" combinations. Use another NumPy version.");
%(fail)s;
}
""" % locals()
def is_flat(var, outdim=1):
"""
Verifies the dimensionality of the var is equal to
outdim. This method is usually called after flatten method on a
variable, where the first outdim-1 dimension size(s) of the variable
is kept intact, and the last dimension size of the variable is made
equal to the multiplication of its remaining dimension size(s), such that
the variable would end up with as many dimension as outdim.
Parameters
----------
var : theano.tensor.var.TensorVariable
the theano var on which the dimensionality is checked.
outdim : int
the expected dimensionality of var.
Returns
-------
bool
the comparison result of var's dim
and the expected outdim.
"""
return var.ndim == outdim
def flatten(x, outdim=1):
"""
Reshapes the variable x by keeping
the first outdim-1 dimension size(s) of x the same,
and making the last dimension size of x equal to
the multiplication of its remaining dimension size(s).
Parameters
----------
x : theano.tensor.var.TensorVariable
the variable that should be reshaped.
outdim : int
the number of dimensions of the returned variable
Returns
-------
theano.tensor.var.TensorVariable
the flattend variable with dimensionality of outdim
"""
# Any input variable can be flattened to have outdim of 1,
# even if it's a scalar. Otherwise, outdim must be positive
# and smaller than x.ndim.
if outdim < 1 or (outdim > 1 and outdim > x.ndim):
raise ValueError('outdim %s out of bound [1, %d)'
% (outdim, x.ndim + 1))
if outdim > 1:
dims = tuple(x.shape[:outdim - 1]) + (-1,)
else:
dims = (-1,)
x_reshaped = x.reshape(dims)
bcast_kept_dims = x.broadcastable[:outdim - 1]
bcast_new_dim = python_all(x.broadcastable[outdim - 1:])
broadcastable = bcast_kept_dims + (bcast_new_dim,)
x_reshaped = theano.tensor.addbroadcast(
x_reshaped, *filter(lambda i: broadcastable[i], range(outdim)))
return x_reshaped
# class TileGrad(Op):
# """
# Calculates the gradient of the Tile Op.
# """
# # this is so weird, I can't think of how to make this a general thing.
# def make_node(self, x, reps, g_out):
# return gof.Apply(self, [x, reps, g_out], [x.type()])
#
# def perform(self, node, inp, out):
# x, reps, g_out = inp
# gx, = out
# xsh = x.shape
# if len(reps) == 2 and reps[1] == 1 and len(x.shape) == 1:
# gx[0] = numpy.sum(g_out, axis=0)
# else:
# raise NotImplementedError('x.shape, reps combination not '
# 'supported', (x.shape, reps))
#
# tilegrad = TileGrad()
class Tile(Op):
"""
Construct an array by repeating the input x according to reps pattern.
.. note:: Deprecated
Use tile() instead.
Tiles its input according to reps. The length of reps is the number of
dimension of x and contains the number of times to tile x in each
dimension.
See Also
--------
numpy.tile : http://docs.scipy.org/doc/numpy/reference/generated/numpy.tile.html
"""
__props__ = ("ndim",)
def __init__(self, ndim):
self.ndim = ndim
def __str__(self):
return self.__class__.__name__ + "{ndim=%d}" % self.ndim
def make_node(self, x, reps):
warnings.warn((
"Tile op is deprecated, use tile function instead."), stacklevel=3)
x = as_tensor_variable(x)
reps = as_tensor_variable(reps)
return gof.Apply(self, [x, reps], [tensor(x.type.dtype, [False] *
self.ndim)])
def perform(self, node, inp, out_):
x, reps = inp
out, = out_
res = numpy.tile(x, reps)
if res.ndim != self.ndim:
raise ValueError(
'Tile.perform produced incorrect number of dimensions')
if (numpy.asarray(reps) == 1).all():
# In that case, some NumPy version return a view! As this
# op isn't declared as inplace, we need to check that and
# copy the data.
if numpy.may_share_memory(res, x):
res = res.copy()
out[0] = res
def infer_shape(self, node, in_shapes):
# Note: in contrast with numpy, it is assumed that x.shape and reps
# have equal length; see also tile function below
# Note: if reps were to be allowed not to be a constant and x.shape
# and reps to be unequal, the following block of code could be used:
# prepend 1 to x.shape if needed
# if self.ndim > x.ndim:
# shp = concatenate(ones(self.ndim - x.ndim), shp)
# prepend 1 to reps if needed
# reps = concatenate(ones(self.ndim - reps.shape[0]), reps)
x, reps = node.inputs
shp = in_shapes[0]
tiled_shp = shp * reps
out_shape = []
for i in xrange(self.ndim):
out_shape.append(tiled_shp[i])
return [out_shape]
def grad(self, inp, grads):
x, reps = inp
g_out, = grads
# return [tilegrad(x, reps, g_out), None]
raise NotImplementedError()
def tile(x, reps, ndim=None):
"""
Tile input array `x` according to `reps`.
See the docstring of `numpy.tile` for details.
'reps' can be constant integer (e.g. 3), constant vector(e.g. [2 3]),
symbolic scalar (e.g. tensor.iscalar()), symbolic vector (e.g. tensor.ivector())
or a list of symbolic scalar (e.g. [tensor.iscalar(), tensor.iscalar()]).
ndim is the number of the dimensions of the output, if it is provided, ndim
should be equal or larger than x.ndim and len(reps), otherwise, we will use
max(x.ndim, len(reps)) as ndim. If reps is symbolic vector, the ndim has to
be provided.
"""
if ndim is not None and ndim < x.ndim:
raise ValueError("ndim should be equal or larger than x.ndim")
# if reps is tensor.scalar, integer or tensor.vector, we convert it to a list.
if not isinstance(reps, (list, tuple)):
reps_astensor = as_tensor_variable(reps)
ndim_check = reps_astensor.ndim
if reps_astensor.dtype not in theano.tensor.discrete_dtypes:
raise ValueError("elements of reps must be integer dtype")
# tensor.scalar/integer case
if ndim_check == 0:
reps = [reps]
# tensor.vector case
elif ndim_check == 1:
if ndim is None:
raise ValueError("if reps is tensor.vector, you should specify "
"the ndim")
else:
offset = ndim - reps.shape[0]
# assert that reps.shape[0] does not exceed ndim
offset = theano.tensor.opt.assert_(offset, ge(offset, 0))
# if reps.ndim is less than x.ndim, we pad the reps with
# "1" so that reps will have the same ndim as x.
reps_ = [switch(i < offset, 1, reps[i - offset]) for i in range(ndim)]
reps = reps_
# other raise error
else:
raise ValueError("the dimension of reps should not exceed 1")
else:
if ndim is not None and len(reps) > ndim:
raise ValueError("len(reps) should be equal or less than ndim")
if not numpy.all([isinstance(r, integer_types) or
(isinstance(r, TensorVariable) and
r.dtype in theano.tensor.discrete_dtypes) for r in reps]):
raise ValueError("elements of reps must be scalars of integer dtype")
# if reps.ndim is less than x.ndim, we pad the reps with
# "1" so that reps will have the same ndim as x.
reps = list(reps)
if ndim is None:
ndim = builtins.max(len(reps), x.ndim)
if len(reps) < ndim:
reps = [1] * (ndim - len(reps)) + reps
shape = [1] * (ndim - x.ndim) + [x.shape[i] for i in xrange(x.ndim)]
alloc_shape = reps + shape
y = alloc(x, *alloc_shape)
shuffle_ind = numpy.arange(ndim * 2).reshape(2, ndim)
shuffle_ind = shuffle_ind.transpose().flatten()
y = y.dimshuffle(*shuffle_ind)
new_shapes = [sh * reps[i] for i, sh in enumerate(shape)]
y = y.reshape(new_shapes)
return y
class ARange(Op):
"""Create an array containing evenly spaced values within a given interval.
Parameters and behaviour are the same as numpy.arange().
"""
__props__ = ("dtype",)
def __init__(self, dtype):
self.dtype = dtype
def make_node(self, start, stop, step):
start, stop, step = map(as_tensor_variable, (start, stop, step))
assert start.ndim == 0
assert stop.ndim == 0
assert step.ndim == 0
inputs = [start, stop, step]
outputs = [tensor(self.dtype, (False,))]
return Apply(self, inputs, outputs)
@theano.configparser.change_flags(warn_float64='ignore')
def infer_shape(self, node, i_shapes):
# Note start, stop and step can be float numbers.
start, stop, step = node.inputs
def is_constant_value(var, value):
try:
v = get_scalar_constant_value(var)
return numpy.all(v == value)
except NotScalarConstantError:
pass
return False
def upcast(var):
if ('int' in var.dtype and
# We do not want to cast uint64 to int64 as this can
# loose information. If we upcast uint64 with int64,
# this give float64. This is safer then checking for
# uint64 in case we support [u]int128 or other in the
# future.
scal.upcast(var.dtype, 'int64') == 'int64'):
return cast(var, 'int64')
return var
if is_constant_value(step, 1):
if is_constant_value(start, 0):
return [(cast(stop, 'int64'),)]
else:
stop = upcast(stop)
start = upcast(start)
return [(maximum(cast(stop - start, 'int64'), 0),)]
else:
stop = upcast(stop)
start = upcast(start)
return [(maximum(cast(ceil(cast((stop - start), 'float64') / step),
'int64'), 0),)]
def perform(self, node, inp, out_):
start, stop, step = inp
out, = out_
start = start.item()
stop = stop.item()
step = step.item()
out[0] = numpy.arange(start, stop, step, dtype=self.dtype)
def connection_pattern(self, node):
return [[True], [False], [True]]
def grad(self, inputs, grads):
start, stop, step = inputs
gz, = grads
# start and step affect the output values
# but the outputs are integers so there's
# no gradient through them
# stop does not affect the output values,
# just the output shape, so it is disconnected
return [start.zeros_like(),
DisconnectedType()(),
step.zeros_like()]
def R_op(self, inputs, eval_points):
return [None]
_arange = {}
def arange(start, stop=None, step=1, dtype=None):
# If only one argument is provided, it is in fact the "stop" argument,
# and start is 0.
if stop is None:
start, stop = 0, start
start, stop, step = map(as_tensor_variable, (start, stop, step))
# If dtype is not provided, infer it from the other arguments
if dtype is None:
dtype = scal.upcast(start.type.dtype, stop.type.dtype, step.type.dtype)
# don't try to be stingy and byte-optimize, this leads to
# overflow problems.
if dtype.startswith('int'):
dtype = 'int64'
if dtype.startswith('uint'):
dtype = 'uint64'
if config.cast_policy in ('numpy', 'numpy+floatX'):
# We enforce numpy semantics, except in the special case where
# `config.cast_policy` is 'numpy+floatX' and we want to use float32
# rather than float64.
# As an example, if `start`, `stop` and `step` are all int32,
# `numpy.arange` returns an int64 array (on 64-bit platforms),
# while the upcast above returns int32.
numpy_dtype = numpy.arange(
start=numpy.array(0, dtype=start.dtype),
stop=numpy.array(1, dtype=stop.dtype),
step=numpy.array(1, dtype=step.dtype)).dtype
if numpy_dtype != dtype:
if (config.cast_policy == 'numpy+floatX' and
config.floatX == 'float32' and
numpy_dtype == 'float64' and
# No explicit float64 in the three arguments?
python_all(
dt != 'float64'
for dt in [s.dtype for s in (start, stop, step)])):
# We use float32 instead.
assert dtype != 'float64'
dtype = 'float32'
else:
# We use the same dtype as numpy instead of the result of
# the upcast.
dtype = str(numpy_dtype)
if dtype not in _arange:
_arange[dtype] = ARange(dtype)
return _arange[dtype](start, stop, step)
class _nd_grid(object):
"""Create a dense n-dimensional 'meshgrid' with equally spaced points.
Used to create the instance ``mgrid`` and ``ogrid`` which act similarly
to their numpy equivalents.
Parameters
----------
sparse : boolean, optional, default=True
Specifying False leads to the equivalent of numpy's mgrid functionality.
Specifying True leads to the equivalent of ogrid.
Examples
--------
>>> a = T.mgrid[0:5, 0:3]
>>> a[0].eval()
array([[0, 0, 0],
[1, 1, 1],
[2, 2, 2],
[3, 3, 3],
[4, 4, 4]], dtype=int8)
>>> a[1].eval()
array([[0, 1, 2],
[0, 1, 2],
[0, 1, 2],
[0, 1, 2],
[0, 1, 2]], dtype=int8)
>>> b = T.ogrid[0:5, 0:3]
>>> b[0].eval()
array([[0],
[1],
[2],
[3],
[4]], dtype=int8)
>>> b[1].eval()
array([[0, 1, 2, 3]], dtype=int8)
"""
def __init__(self, sparse=False):
self.sparse = sparse
def __getitem__(self, *args):
ndim = len(args[0])
for sl in args[0]:
if isinstance(sl.step, python_complex):
raise NotImplementedError("Not implemented for slices "
"whose step is complex")
ranges = [arange(sl.start or 0,
sl.stop,
sl.step or 1) for sl in args[0]]
shapes = [tuple([1] * j + [r.shape[0]] + [1] * (ndim - 1 - j))
for j, r in enumerate(ranges)]
ranges = [r.reshape(shape) for r, shape in zip(ranges, shapes)]
if self.sparse:
grids = ranges
else:
grids = []
ones = [ones_like(r) for r in ranges]
for i in range(ndim):
grid = 1
for j in range(ndim):
if j == i:
grid = grid * ranges[j]
else:
grid = grid * ones[j]
grids.append(grid)
return grids
mgrid = _nd_grid()
ogrid = _nd_grid(sparse=True)
class PermuteRowElements(Op):
"""Permute the elements of each row (inner-most dim) of a tensor.
A permutation will be applied to every row (vector) of the input tensor x.
Depending on the dimensionality of x and the permutation tensor y,
different cases are possible.
If y.ndim = 1, y is a single permutation, that will be applied to every
vector of x. For instance, if x is a matrix, the same permutation will be
applied to each row of x.
If x.ndim = y.ndim, each row of x corresponds to a row of y, containing
a permutation that will be applied to that row. For instance, if x and y
are two matrices, a different permutation will be applied to each row of x.
If x.ndim > y.ndim, y will be broadcasted to fit x, then each row (vector)
of x will be reordered according to the corresponding row of y. (This is
a generalization of the first case).
If x.ndim = 1, every permutation in y will be applied to x, and the output
will contain all the results.
If x.ndim < y.ndim, x will be broadcasted to fit y, and different
permutations contained in y will be applied to each vector in x. (This is
a generalization of the previous case).
If the "inverse" argument is True, the Op will perform the inverse
permutation instead.
"""
__props__ = ()
def make_node(self, x, y, inverse):
x = as_tensor_variable(x)
y = as_tensor_variable(y)
if inverse: # as_tensor_variable does not accept booleans
inverse = as_tensor_variable(1)
else:
inverse = as_tensor_variable(0)
# y should contain integers
assert (y.type.dtype.startswith('int') or
y.type.dtype.startswith('uint'))
# Inverse should be an integer scalar
assert (inverse.type.ndim == 0 and
(inverse.type.dtype.startswith('int') or
inverse.type.dtype.startswith('uint')))
# Match shapes of x and y
x_dim = x.type.ndim
y_dim = y.type.ndim
if x_dim > y_dim:
y = shape_padleft(y, n_ones=(x_dim - y_dim))
elif x_dim < y_dim:
x = shape_padleft(x, n_ones=(y_dim - x_dim))
# Compute the broadcastable pattern of the output
out_broadcastable = [xb and yb for xb, yb in
izip(x.type.broadcastable, y.type.broadcastable)]
out_type = tensor(dtype=x.type.dtype, broadcastable=out_broadcastable)
inputlist = [x, y, inverse]
outputlist = [out_type]
return Apply(self, inputlist, outputlist)
def _rec_perform(self, node, x, y, inverse, out, curdim):
"""Perform the permutation by doing a recursion over the input
dimensions.
For every dimension, starting with the leftmost, the right set of
indices is determined (depending if broadcasting or not), then
the function is recursively called on the appropriate subtensors.
The terminal case is reached when the current tensors are vector,
then the permutation contained in y is applied to x.
Parameters
----------
x : tensor
The input tensor, on which the permutation is applied.
y : tensor
Tensor containing the permutations to apply.
out : tensor
Tensor storing the output result.
curdim : int
Counter of the current depth of recursion.
inverse
Wether to apply permutations or their inverse.
"""
if len(x.shape) == 1:
# Numpy advanced indexing works in this case
if inverse:
out[y] = x[:]
else:
out[:] = x[y]
if (numpy.__version__ <= '1.6.1' and
out.size != numpy.uint32(out.size)):
warnings.warn(
'Numpy versions 1.6.1 and below have a bug preventing '
'advanced indexing from correctly filling arrays that '
'are too big (>= 2^32 elements). It is possible that '
'out (%s), with shape %s, is not correctly filled.'
% (out, out.shape))
else:
xs0 = x.shape[0]
ys0 = y.shape[0]
if xs0 == ys0:
for i in xrange(xs0):
self._rec_perform(node, x[i], y[i], inverse, out[i],
curdim + 1)
elif ys0 == 1 and node.inputs[1].type.broadcastable[curdim]:
# Broadcast y
for i in xrange(xs0):
self._rec_perform(node, x[i], y[0], inverse, out[i],
curdim + 1)
elif xs0 == 1 and node.inputs[0].type.broadcastable[curdim]:
# Broadcast x
for i in xrange(ys0):
self._rec_perform(node, x[0], y[i], inverse, out[i],
curdim + 1)
else:
raise ValueError('Dimension mismatch: %s, %s' % (xs0, ys0))
def perform(self, node, inp, out):
x, y, inverse = inp
outs, = out
x_s = x.shape
y_s = y.shape
assert len(x_s) == len(y_s)
# Make sure the output is big enough
out_s = []
for xdim, ydim in izip(x_s, y_s):
if xdim == ydim:
outdim = xdim
elif xdim == 1:
outdim = ydim
elif ydim == 1:
outdim = xdim
else:
raise ValueError('Dimension mismatch: %s, %s' % (xdim, ydim))
out_s.append(outdim)
if outs[0] is None or outs[0].shape != out_s:
outs[0] = numpy.empty(out_s, dtype=x.dtype)
self._rec_perform(node, x, y, inverse, outs[0], curdim=0)
def infer_shape(self, node, in_shapes):
shp_x = in_shapes[0]
shp_y = in_shapes[1]
assert len(shp_x) == len(shp_y)
out_shape = []
for i in xrange(len(shp_x)):
out_shape.append(maximum(shp_x[i], shp_y[i]))
return [out_shape]
def grad(self, inp, grads):
x, y, inverse = inp
gz, = grads
# First, compute the gradient wrt the broadcasted x.
# If 'inverse' is False (0), apply the inverse of y on gz.
# Else, apply y on gz.
gx = permute_row_elements(gz, y, eq(inverse, 0))
# If x has been broadcasted along some axes, we need to sum
# the gradient over these axes, but keep the dimension (as
# broadcastable)
broadcasted_dims = [dim for dim in xrange(gz.type.ndim)
if x.type.broadcastable[dim] and
not gz.type.broadcastable[dim]]
gx = Sum(axis=broadcasted_dims)(gx)
# Sum(...) removed the dimensions in broadcasted_dims,
# so we need to put them back.
newdims = []
i = 0
for dim in xrange(gz.type.ndim):
if dim in broadcasted_dims:
newdims.append('x')
else:
newdims.append(i)
i += 1
gx = DimShuffle(gx.type.broadcastable, newdims)(gx)
assert gx.type.broadcastable == x.type.broadcastable
# if x is an integer type, then so is the output.
# this means f(x+eps) = f(x) so the gradient with respect
# to x is zero
if x.type.dtype.find('int') != -1:
gx = x.zeros_like()
# The elements of y and of inverse both affect the output,
# so they are connected to the output,
# and the transformation isn't defined if their values
# are non-integer, so the gradient with respect to them is
# undefined
return [gx, grad_undefined(self, 1, y),
grad_undefined(self, 1, inverse)]
_permute_row_elements = PermuteRowElements()
def permute_row_elements(x, y, inverse=0):
return _permute_row_elements(x, y, inverse)
def inverse_permutation(perm):
"""Computes the inverse of permutations.
Each row of input should contain a permutation of the first integers.
"""
return permute_row_elements(
arange(perm.shape[-1], dtype=perm.dtype),
perm,
inverse=True)
#########################
# Linalg : Dot
#########################
#
# For BLAS-related ops see blas.py
#
# TODO: Dotinv should go here, Eigs, Svd, etc.
class Dot(Op):
"""
Computes the dot product of two variables. For two matrices, this is
equivalent to matrix multiplication. For two vectors, this is the inner
product.
Notes
-----
Matrix-matrix products are sometimes optimized to Dot22 or Gemm ops
(see tensor.blas).
Vector-vector products are sometimes optimized to Ger or CGer (see
tensor.blas).
Matrix-vector products are sometimes optimized to Gemv, CGemv (see
tensor.blas).
"""
__props__ = ()
# the rationale for Dot22 is related to getting GEMM Ops into the
# graph. See Dot22 in tensor.blas for details.
def make_node(self, *inputs):
inputs = list(map(as_tensor_variable, inputs))
if len(inputs) != 2:
raise TypeError(
'theano.tensor.Dot: 2 arguments required, %d given ' %
len(inputs))
if inputs[0].ndim not in (1, 2):
raise TypeError(
'theano.tensor.Dot: input 0 (0-indexed) must have ndim of '
'1 or 2, %d given. Consider calling theano.tensor.dot '
'instead.' % inputs[0].ndim)
if inputs[1].ndim not in (1, 2):
raise TypeError(
'theano.tensor.Dot: input 1 (0-indexed) must have ndim of '
'1 or 2, %d given. Consider calling theano.tensor.dot '
'instead.' % inputs[1].ndim)
i_broadcastables = [input.type.broadcastable for input in inputs]
bx, by = i_broadcastables
if len(by) == 2: # y is a matrix
bz = bx[:-1] + by[-1:]
elif len(by) == 1: # y is vector
bz = bx[:-1]
i_dtypes = [input.type.dtype for input in inputs]
outputs = [tensor(scal.upcast(*i_dtypes), bz)]
return Apply(self, inputs, outputs)
def perform(self, node, inp, out):
x, y = inp
z, = out
# the asarray is here because dot between two vectors
# gives a numpy float object but we need to return a 0d
# ndarray
z[0] = numpy.asarray(numpy.dot(x, y))
def grad(self, inp, grads):
x, y = inp
gz, = grads
xdim, ydim, gdim = x.type.ndim, y.type.ndim, gz.type.ndim
# grad is scalar, so x is vector and y is vector
if gdim == 0:
xgrad = gz * y
ygrad = gz * x
# x is vector, y is matrix, grad is vector
elif xdim == 1 and ydim == 2:
xgrad = dot(gz, y.T)
ygrad = outer(x.T, gz)
# x is matrix, y is vector, grad is vector
elif xdim == 2 and ydim == 1:
xgrad = outer(gz, y.T)
ygrad = dot(x.T, gz)
# x is matrix, y is matrix, grad is matrix
elif xdim == ydim == 2:
xgrad = dot(gz, y.T)
ygrad = dot(x.T, gz)
# If x or y contain broadcastable dimensions but only one of
# them know that a matching dimensions is broadcastable, the
# above code don't always return the right broadcast pattern.
# This cause problem down the road. See gh-1461.
if xgrad.broadcastable != x.broadcastable:
xgrad = patternbroadcast(xgrad, x.broadcastable)
if ygrad.broadcastable != y.broadcastable:
ygrad = patternbroadcast(ygrad, y.broadcastable)
rval = xgrad, ygrad
for elem in rval:
assert elem.dtype.find('float') != -1
return rval
def R_op(self, inputs, eval_points):
# R_op for a \dot b evaluted at c for a and d for b is
# simply c \dot b + a \dot d
assert len(inputs) == 2
assert len(eval_points) == 2
if eval_points[0] is None and eval_points[1] is None:
return [None]
debugger_available = config.compute_test_value != 'off'
if debugger_available:
try:
iv0 = gof.op.get_test_value(inputs[0])
except AttributeError:
gof.op.missing_test_message(
'first input passed to Dot.R_op has no test value')
debugger_available = False
try:
iv1 = gof.op.get_test_value(inputs[1])
except AttributeError:
gof.op.missing_test_message(
'second input passed to Dot.R_op has no test value')
debugger_available = False
if eval_points[0]:
try:
ev0 = gof.op.get_test_value(eval_points[0])
except AttributeError:
gof.op.missing_test_message(
'first eval point passed to Dot.R_op '
'has no test value')
debugger_available = False
if eval_points[1]:
try:
ev1 = gof.op.get_test_value(eval_points[1])
except AttributeError:
gof.op.missing_test_message(
'second eval point passed to Dot.R_op '
'has no test value')
debugger_available = False
if debugger_available:
input_values = [iv0, iv1]
eval_point_values = [ev0, ev1]
for i in xrange(2):
if eval_point_values[i] is not None and \
input_values[i].shape != eval_point_values[i].shape:
raise ValueError(
'input ' + str(i) + ' and eval_point ' + str(i) +
' to Dot.R_op should have the same shape, but '
'their shapes are %s and %s, respectively' % (
str(input_values[i].shape),
str(eval_point_values[i].shape)))
if eval_points[0]:
t1 = self(eval_points[0], inputs[1])
if eval_points[1]:
t2 = self(inputs[0], eval_points[1])
if eval_points[0] and eval_points[1]:
return [t1 + t2]
elif eval_points[0]:
return [t1]
else:
return [t2]
def infer_shape(self, node, shapes):
xshp, yshp = shapes
x, y = node.inputs
# vector / vector
if x.ndim == 1 and y.ndim == 1:
return [()]
# matrix / vector
if x.ndim == 2 and y.ndim == 1:
return [xshp[:-1]]
# vector / matrix
if x.ndim == 1 and y.ndim == 2:
return [yshp[-1:]]
# matrix / matrix
if x.ndim == 2 and y.ndim == 2:
return [xshp[:-1] + yshp[-1:]]
raise NotImplementedError()
def __str__(self):
return "dot"
_dot = Dot()
pprint.assign(_dot, printing.OperatorPrinter(printing.special['middle_dot'],
-1, 'left'))
def dot(a, b):
"""
Computes the dot product of two variables.
For two matrices, this is equivalent to matrix multiplication.
For two vectors, this is the inner product.
When one variable is a scalar, this is like elementwise multiplication.
For N dimensions, this is a sum product over the last axis
of the first array and the second-to-last axis of the second array:
dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
Note that this dot function does one of three things, in the following
sequence:
1. If either a or b is scalar, it returns the elementwise product
without calling the Theano Dot op.
2. If either a or b has more than 2 dimensions, it calls Theano's
tensordot function with appropriate axes. The tensordot function
expresses high-dimensional dot products in terms of 2D matrix
multiplications, so it may be possible to futherize optimize for
performance.
3. If both a and b have either 1 or 2 dimensions, it calls Theano's
Dot op on a and b.
Notes
-----
Matrix-matrix products are sometimes optimized to Dot22 or Gemm ops
(see tensor.blas).
Vector-vector products are sometimes optimized to Ger or CGer (see
tensor.blas).
Matrix-vector products are sometimes optimized to Gemv, CGemv (see
tensor.blas).
"""
a, b = as_tensor_variable(a), as_tensor_variable(b)
if a.ndim == 0 or b.ndim == 0:
return a * b
elif a.ndim > 2 or b.ndim > 2:
return tensordot(a, b, [[a.ndim - 1], [numpy.maximum(0, b.ndim - 2)]])
else:
return _dot(a, b)
#########################
# Linalg : TensorDot
#########################
def _tensordot_as_dot(a, b, axes, dot, batched):
"""
Reduces a tensor dot product to a matrix or vector dot product. Based
on code from Tijmen Tieleman's gnumpy
(http://www.cs.toronto.edu/~tijmen/gnumpy.html).
Please see the documentation of tensordot for the meaning of the a, b
and axes arguments.
:param dot: a function that accepts two symbolic variables and computes
the appropriate dot product (e.g. dot, batched_dot)
:type dot: function
:param batched: whether to treat the first axis of a and b as a batch
axis. If so, this axis will be preserved in the output,
allowing this function to be used also for batched
tensor dot products.
:type batched: boolean
:returns: a tensor with shape equal to the concatenation of a's shape
(less any dimensions that were summed over) and b's shape
(less the first dimension and any dimensions that were summed
over).
:rtype: symbolic tensor
"""
a, b = as_tensor_variable(a), as_tensor_variable(b)
if not numpy.isscalar(axes) and len(axes) != 2:
raise ValueError('Axes should be an integer or a '
'list/tuple of len 2 (%s was provided)'
% str(axes))
# if 'axes' is a number of axes to multiply and sum over (trailing axes
# of a, leading axes of b), we can just reshape and use dot.
elif numpy.isscalar(axes):
axes = int(axes)
for operand_name, operand in (("a", a), ("b", b)):
if axes > operand.ndim:
raise ValueError(
'axes can not be larger than the dimension of %s '
'(%s.ndim=%i, axes=%i)'
% (operand_name, operand_name, operand.ndim, axes))
if batched and axes == operand.ndim:
raise ValueError(
'axes to sum over must not include the batch axis '
'of %s (%s.ndim=%i, axes=%i)'
% (operand_name, operand_name, operand.ndim, axes))
batch_axes = 1 if batched else 0
a_outaxes = slice(0, a.ndim - axes)
b_outaxes = slice(batch_axes + axes, b.ndim)
outshape = concatenate([a.shape[a_outaxes], b.shape[b_outaxes]])
outbcast = a.broadcastable[a_outaxes] + b.broadcastable[b_outaxes]
outndim = len(outbcast)
a_shape = [1] * 2
b_shape = [1] * 2
# compute total size of summed axes
for i in xrange(0, axes):
a_shape[1] *= a.shape[-(i + 1)]
b_shape[0] *= b.shape[batch_axes + i]
# compute total size of other axes
for i in xrange(0, a.ndim - axes - batch_axes):
a_shape[0] *= a.shape[batch_axes + i]
for i in xrange(0, b.ndim - axes - batch_axes):
b_shape[1] *= b.shape[-(i + 1)]
if batched:
a_shape.insert(0, a.shape[0])
b_shape.insert(0, b.shape[0])
a_reshaped = a.reshape(a_shape)
b_reshaped = b.reshape(b_shape)
out_reshaped = dot(a_reshaped, b_reshaped)
out = out_reshaped.reshape(outshape, outndim)
# Make sure the broadcastable pattern of the result is correct,
# since some shape information can be lost in the reshapes.
return patternbroadcast(out, outbcast)
# if 'axes' is a list, transpose a and b such that the summed axes of a
# are last and the summed axes of b are first.
else:
axes = [_pack(axes_) for axes_ in axes]
if len(axes[0]) != len(axes[1]):
raise ValueError('Axes elements must have the same length.')
for i, (operand_name, operand) in enumerate((("a", a),
("b", b))):
if len(axes[i]) > operand.ndim:
raise ValueError(
'axes[%i] should be array_like with length less than '
'the dimensions of %s (%s.ndim=%i, len(axes[0])=%i).' %
(i, operand_name, operand_name, operand.ndim,
len(axes[i])))
if len(axes[i]) > 0 and numpy.max(axes[i]) >= operand.ndim:
raise ValueError(
'axes[%i] contains dimensions greater than or equal '
'to %s.ndim (%s.ndim=%i, max(axes[0])=%i).' %
(i, operand_name, operand_name, operand.ndim,
numpy.max(numpy.array(axes[i]))))
if batched and 0 in axes[i]:
raise ValueError(
'axes to sum over must not contain the batch axis '
'(axes[%i]=%s)' %
(i, axes[i]))
batch_axes = [0] if batched else []
other_axes = [[x for x in xrange(operand.ndim)
if x not in axes[i] and x not in batch_axes]
for i, operand in enumerate((a, b))]
a_shuffled = a.dimshuffle(batch_axes + other_axes[0] + axes[0])
b_shuffled = b.dimshuffle(batch_axes + axes[1] + other_axes[1])
# now that a and b are in the right order, recur with integer axes
return _tensordot_as_dot(a_shuffled, b_shuffled, len(axes[0]),
dot=dot, batched=batched)
def tensordot(a, b, axes=2):
"""
Compute a generalized dot product over provided axes.
Given two tensors a and b, tensordot computes a generalized dot product over
the provided axes. Theano's implementation reduces all expressions to
matrix or vector dot products and is based on code from Tijmen Tieleman's
gnumpy (http://www.cs.toronto.edu/~tijmen/gnumpy.html).
Parameters
----------
a: symbolic tensor
The first tensor variable.
b: symbolic tensor
The second tensor variable
axes: int or array-like of length 2
If an integer, the number of axes to sum over.
If an array, it must have two array elements containing the axes
to sum over in each tensor.
Note that the default value of 2 is not guaranteed to work
for all values of a and b, and an error will be raised if
that is the case. The reason for keeping the default is to
maintain the same signature as numpy's tensordot function
(and np.tensordot raises analogous errors for non-compatible
inputs).
If an integer i, it is converted to an array containing
the last i dimensions of the first tensor and the first
i dimensions of the second tensor:
axes = [list(range(a.ndim - i, b.ndim)), list(range(i))]
If an array, its two elements must contain compatible axes
of the two tensors. For example, [[1, 2], [2, 0]] means sum
over the 2nd and 3rd axes of a and the 3rd and 1st axes of b.
(Remember axes are zero-indexed!) The 2nd axis of a and the
3rd axis of b must have the same shape; the same is true for
the 3rd axis of a and the 1st axis of b.
Returns
-------
symbolic tensor
A tensor with shape equal to the concatenation of a's shape
(less any dimensions that were summed over) and b's shape
(less any dimensions that were summed over).
Examples
--------
It may be helpful to consider an example to see what tensordot does.
Theano's implementation is identical to NumPy's. Here a has shape (2, 3, 4)
and b has shape (5, 6, 4, 3). The axes to sum over are [[1, 2], [3, 2]] --
note that a.shape[1] == b.shape[3] and a.shape[2] == b.shape[2]; these axes
are compatible. The resulting tensor will have shape (2, 5, 6) -- the
dimensions that are not being summed:
>>> a = np.random.random((2,3,4))
>>> b = np.random.random((5,6,4,3))
#tensordot
>>> c = np.tensordot(a, b, [[1,2],[3,2]])
#loop replicating tensordot
>>> a0, a1, a2 = a.shape
>>> b0, b1, _, _ = b.shape
>>> cloop = np.zeros((a0,b0,b1))
#loop over non-summed indices -- these exist
#in the tensor product.
>>> for i in range(a0):
... for j in range(b0):
... for k in range(b1):
... #loop over summed indices -- these don't exist
... #in the tensor product.
... for l in range(a1):
... for m in range(a2):
... cloop[i,j,k] += a[i,l,m] * b[j,k,m,l]
>>> np.allclose(c, cloop)
true
This specific implementation avoids a loop by transposing a and b such that
the summed axes of a are last and the summed axes of b are first. The
resulting arrays are reshaped to 2 dimensions (or left as vectors, if
appropriate) and a matrix or vector dot product is taken. The result is
reshaped back to the required output dimensions.
In an extreme case, no axes may be specified. The resulting tensor
will have shape equal to the concatenation of the shapes of a and b:
>>> c = np.tensordot(a, b, 0)
>>> print(a.shape)
(2,3,4)
>>> print(b.shape)
(5,6,4,3)
>>> print(c.shape)
(2,3,4,5,6,4,3)
See the documentation of numpy.tensordot for more examples.
"""
return _tensordot_as_dot(a, b, axes, dot=dot, batched=False)
def outer(x, y):
"""Return vector-vector outer product.
If an input isn't a vector, we flatten it first.
"""
if x.ndim != 1:
x = x.flatten()
if y.ndim != 1:
y = y.flatten()
return dot(
x.dimshuffle(0, 'x'),
y.dimshuffle('x', 0))
def any(x, axis=None, keepdims=False):
out = elemwise.Any(axis)(x)
if keepdims:
out = makeKeepDims(x, out, axis)
return out
def all(x, axis=None, keepdims=False):
out = elemwise.All(axis)(x)
if keepdims:
out = makeKeepDims(x, out, axis)
return out
# Some NumPy version like 1.9.2 return a view for numpy.diagonal
x = numpy.zeros((4, 4))
numpy_diagonal_return_view = numpy.may_share_memory(numpy.diagonal(x), x)
del x
class Diagonal(Op):
"""Return specified diagonals.
Parameters
----------
x
A tensor variable with x.ndim >= 2.
Returns
-------
vector
A vector representing the diagonal elements.
"""
__props__ = ("offset", "axis1", "axis2")
def __init__(self, offset=0, axis1=0, axis2=1):
if numpy_diagonal_return_view:
self.view_map = {0: [0]}
self.offset = offset
self.axis1 = axis1
self.axis2 = axis2
def make_node(self, x):
x = as_tensor_variable(x)
assert x.ndim >= 2
return Apply(self, [x], [tensor(dtype=x.dtype,
broadcastable=[False] * (x.ndim - 1))])
def perform(self, node, inputs, outputs):
(x,) = inputs
(z,) = outputs
z[0] = x.diagonal(self.offset, self.axis1, self.axis2)
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
return [grad_not_implemented(self, 0, x)]
def infer_shape(self, node, shapes):
in_shape, = shapes
dim1 = in_shape[self.axis1]
dim2 = in_shape[self.axis2]
out_shape = [d for i, d in enumerate(in_shape)
if i not in (self.axis1, self.axis2)]
# The following logic is inspired by C code of PyArray_Diagonal().
offset = self.offset
if offset > 0:
diag_size = clip(dim2 - offset, 0, dim1)
elif offset < 0:
diag_size = clip(dim1 + offset, 0, dim2)
else:
diag_size = minimum(dim1, dim2)
out_shape.append(diag_size)
return [tuple(out_shape)]
def diagonal(a, offset=0, axis1=0, axis2=1):
if (offset, axis1, axis2) == (0, 0, 1):
return theano.tensor.nlinalg.extract_diag(a)
return Diagonal(offset, axis1, axis2)(a)
class Diag(Op):
__props__ = ()
def make_node(self, diag):
diag = as_tensor_variable(diag)
if diag.type.ndim != 1:
raise TypeError('data argument must be a vector', diag.type)
return Apply(self, [diag], [matrix(dtype=diag.dtype)])
def perform(self, node, inputs, outputs):
(z,) = outputs
z[0] = numpy.diag(inputs[0])
def grad(self, inputs, gout):
(gz,) = gout
return [diagonal(gz)]
def infer_shape(self, nodes, shapes):
return [(shapes[0][0],) * 2]
def diag(v, k=0):
if v.ndim == 1:
assert k == 0, "diagonals other than main are not implemented"
return Diag()(v)
elif v.ndim == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def stacklists(arg):
"""
Recursively stack lists of tensors to maintain similar structure.
This function can create a tensor from a shaped list of scalars:
Examples
--------
>>> from theano.tensor import stacklists, scalars, matrices
>>> from theano import function
>>> a, b, c, d = scalars('abcd')
>>> X = stacklists([[a, b], [c, d]])
>>> f = function([a, b, c, d], X)
>>> f(1, 2, 3, 4)
array([[ 1., 2.],
[ 3., 4.]], dtype=float32)
We can also stack arbitrarily shaped tensors. Here we stack matrices into
a 2 by 2 grid:
>>> from numpy import ones
>>> a, b, c, d = matrices('abcd')
>>> X = stacklists([[a, b], [c, d]])
>>> f = function([a, b, c, d], X)
>>> x = ones((4, 4), 'float32')
>>> f(x, x, x, x).shape
(2, 2, 4, 4)
"""
if isinstance(arg, (tuple, list)):
return stack(list(map(stacklists, arg)))
else:
return arg
def ptp(a, axis=None):
"""
Range of values (maximum - minimum) along an axis.
The name of the function comes from the acronym for peak to peak.
Parameters
----------
a
Input tensor.
axis
Axis along which to find the peaks. By default, flatten the array.
Returns
-------
array
A new array holding the result.
"""
a = as_tensor_variable(a)
out = max(a, axis) - min(a, axis)
return out
def power(x, y):
return x ** y
def swapaxes(y, axis1, axis2):
"swap axes of inputted tensor"
y = as_tensor_variable(y)
ndim = y.ndim
li = list(range(0, ndim))
li[axis1], li[axis2] = li[axis2], li[axis1]
return y.dimshuffle(li)
def choose(a, choices, out=None, mode='raise'):
"""
Construct an array from an index array and a set of arrays to choose from.
First of all, if confused or uncertain, definitely look at the Examples -
in its full generality, this function is less simple than it might seem
from the following code description (below ndi = numpy.lib.index_tricks):
np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)]).
But this omits some subtleties. Here is a fully general summary:
Given an ``index`` array (a) of integers and a sequence of n arrays
(choices), a and each choice array are first broadcast, as necessary,
to arrays of a common shape; calling these Ba and
Bchoices[i], i = 0,...,n-1 we have that, necessarily,
Ba.shape == Bchoices[i].shape for each i.
Then, a new array with shape Ba.shape is created as follows:
- if mode=raise (the default), then, first of all, each element of a
(and thus Ba) must be in the range [0, n-1]; now, suppose that
i (in that range) is the value at the (j0, j1, ..., jm) position in Ba -
then the value at the same position in the new array is the value in
Bchoices[i] at that same position;
- if mode=wrap, values in a (and thus Ba) may be any (signed) integer;
modular arithmetic is used to map integers outside the range [0, n-1]
back into that range; and then the new array is constructed as above;
- if mode=clip, values in a (and thus Ba) may be any (signed) integer;
negative integers are mapped to 0; values greater than n-1 are mapped
to n-1; and then the new array is constructed as above.
Parameters
----------
a : int array
This array must contain integers in [0, n-1], where n is the number of
choices, unless mode=wrap or mode=clip, in which cases any integers
are permissible.
choices : sequence of arrays
Choice arrays. a and all of the choices must be broadcastable to
the same shape. If choices is itself an array (not recommended),
then its outermost dimension (i.e., the one corresponding to
choices.shape[0]) is taken as defining the ``sequence``.
out : array, optional
If provided, the result will be inserted into this array.
It should be of the appropriate shape and dtype.
mode : {``raise`` (default), ``wrap``, ``clip``}, optional
Specifies how indices outside [0, n-1] will be treated:
``raise`` : an exception is raised
``wrap`` : value becomes value mod n
``clip`` : values < 0 are mapped to 0, values > n-1 are mapped to n-1
Returns
-------
merged_array - array
The merged result.
Raises
------
ValueError - shape mismatch
If a and each choice array are not all broadcastable to the same shape.
"""
# This is done to keep the same function signature then NumPy.
assert out is None
return Choose(mode)(a, choices)
class Choose(Op):
__props__ = ('mode',)
def __init__(self, mode):
assert mode in ("raise", "wrap", "clip")
self.mode = mode
def infer_shape(self, node, shapes):
if isinstance(node.inputs[1], TensorVariable):
# We have padded node.inputs[0] to the right number of
# dimensions for the output
l = []
for sh1, sh2, b1 in zip(shapes[0],
shapes[1][1:],
node.inputs[0].broadcastable):
if b1:
l.append(sh2)
else:
l.append(sh1)
return [tuple(l)]
else:
import theano.typed_list
assert isinstance(node.inputs[1],
theano.typed_list.TypedListVariable)
raise ShapeError("Case not implemented")
shape = shapes[0]
for i in xrange(len(shapes[0]) - 1):
shape[i] = shapes[1][i]
return [(shape)]
def make_node(self, a, choices):
# Import here as it isn't imported by default and we can't
# import at the top as it would cause circular import.
import theano.typed_list
a = as_tensor_variable(a)
if a.dtype not in theano.tensor.discrete_dtypes:
raise TypeError(
'choose first argument must have an [u]int* dtype. Got %s.'
% a.dtype)
if isinstance(choices, (tuple, list,
theano.typed_list.TypedListVariable)):
choice = theano.typed_list.make_list(choices)
choice_ndim = choice.ttype.ndim
choice_bcast = choice.ttype.broadcastable
else:
choice = as_tensor_variable(choices)
choice_ndim = choice.ndim - 1
choice_bcast = choice.broadcastable[1:]
out_ndim = numpy.max([a.ndim, choice_ndim])
# Make explicit all added broadcastable dimensions.
a = shape_padleft(a, out_ndim - a.ndim)
if len(choice_bcast) != out_ndim:
if isinstance(choice.type, TensorType):
choice = choice.dimshuffle(0,
*(('x',) * (out_ndim - choice_ndim) +
tuple(range(1, choice.ndim))))
choice_ndim = choice.ndim - 1
choice_bcast = choice.broadcastable[1:]
else:
raise NotImplementedError(
"We currently didn't implemented that case. "
"To make it work, explicitly add dimensions "
"of size one for dimensions that will be broadcasted")
bcast = [False] * out_ndim
for idx, (b1, b2) in enumerate(
zip(a.broadcastable,
(True,) * (out_ndim - choice_ndim) + choice_bcast)):
if b1 and b2:
bcast[idx] = True
o = TensorType(choice.dtype, bcast)
return Apply(self, [a, choice], [o()])
def perform(self, node, inputs, outputs):
(z,) = outputs
a = inputs[0]
choice = inputs[1]
# TODO reuse out?
z[0] = numpy.choose(a, choice, mode=self.mode)
class AllocEmpty(gof.Op):
"""Implement Alloc on the cpu, but without initializing memory."""
__props__ = ("dtype",)
# specify the type of the data
def __init__(self, dtype):
assert isinstance(dtype, str)
self.dtype = dtype.lower()
def validate_shape(self, shape):
sh = [as_tensor_variable(s) for s in shape]
bcast = []
for s in sh:
if s.type.dtype[:3] not in ('int', 'uin'):
raise TypeError('Shape arguments must be integers', s)
# if s is constant 1, then we're broadcastable in that dim
try:
const_shp = get_scalar_constant_value(s)
except NotScalarConstantError:
const_shp = None
bcast.append(1 == const_shp)
otype = TensorType(dtype=self.dtype, broadcastable=bcast)
output = otype()
return sh, output
def make_node(self, *shape):
shape, output = self.validate_shape(shape)
output.tag.values_eq_approx = values_eq_approx_always_true
# The outut can contain nan/inf. output.type is a new
# instance, so we can do this only for that variable.
output.type.filter_checks_isfinite = False
# We can't reuse filter_checks_isfinite as by default it is
# False and it is set to true only in DebugMode.
# We can't set it in the type as other make_node can reuse the type.
# We can't set it in the variable as it isn't copied when we copy
# the variale. So we set it in the tag.
output.tag.nan_guard_mode_check = False
return Apply(self, shape, [output])
def debug_perform(self, node, inputs, out_):
self.perform(node, inputs, out_)
out_[0][0].fill(-123456789)
def perform(self, node, inputs, out_):
out, = out_
sh = tuple([int(i) for i in inputs])
if out[0] is None or out[0].shape != sh:
out[0] = numpy.empty(sh, dtype=self.dtype)
def c_code(self, node, name, inputs, out_, sub):
dtype = "NPY_" + self.dtype.upper()
out, = out_
fail = sub['fail']
shps = inputs
nd = len(shps)
str = "npy_intp dims[%(nd)s];\n" % locals()
for idx, sh in enumerate(shps):
str += "dims[%(idx)s] =" \
"((npy_intp)((dtype_%(sh)s*)" \
" PyArray_DATA(%(sh)s))[0]);\n" % locals()
# Validate that the output storage exists
str += "if(%(out)s==NULL\n" % locals()
for idx, sh in enumerate(shps):
str += "||PyArray_DIMS(%(out)s)[%(idx)s]!=dims[%(idx)s]" % locals()
str += """){
/* Reference received to invalid output variable.
Decrease received reference's ref count and allocate new
output variable */
Py_XDECREF(%(out)s);
%(out)s = (PyArrayObject*)PyArray_EMPTY(%(nd)s,
dims,
%(dtype)s,
0);
if (!%(out)s)
{
PyErr_SetString(PyExc_MemoryError, "alloc failed");
%(fail)s;
}
}
""" % locals()
return str
def infer_shape(self, node, input_shapes):
return [node.inputs]
def c_code_cache_version(self):
return (3,)
def do_constant_folding(self, node):
return False
def connection_pattern(self, node):
return [[False] for i in node.inputs]
def grad(self, inputs, grads):
return [DisconnectedType()() for i in inputs]
def R_op(self, inputs, eval_points):
return [zeros(inputs, self.dtype)]
| {
"repo_name": "surgebiswas/poker",
"path": "PokerBots_2017/Johnny/theano/tensor/basic.py",
"copies": "3",
"size": "210812",
"license": "mit",
"hash": -2067400171024641000,
"line_mean": 32.3880266075,
"line_max": 121,
"alpha_frac": 0.5680653853,
"autogenerated": false,
"ratio": 3.853051377186409,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5921116762486408,
"avg_score": null,
"num_lines": null
} |
"""A `Type` and `Op` classes to work with numpy.ndarrays symbolically."""
from six.moves import builtins
import sys
import warnings
import numpy
from six.moves import xrange
import numbers
import theano
from theano.compat import izip
from theano.configparser import config
from theano import gof
from theano.gof import Apply, Constant, Op, Variable
from theano.tensor import elemwise
from theano.tensor.var import (AsTensorError, TensorVariable,
TensorConstant,
_tensor_py_operators)
from theano.tensor.type import TensorType, values_eq_approx_always_true
from theano.tensor.type_other import NoneConst
from theano import scalar as scal
from functools import partial
from six import integer_types
from theano import compile, printing
from theano.printing import pprint, min_informative_str
# For history
from theano.compile import Rebroadcast, Shape, shape
# We use these exceptions as well.
import theano.scalar.sharedvar
from theano.gradient import grad_undefined
from theano.gradient import grad_not_implemented
from theano.gradient import DisconnectedType
# set up the external interface
from theano.tensor.elemwise import Elemwise, DimShuffle, CAReduce, Sum
import logging
_logger = logging.getLogger("theano.tensor.basic")
__docformat__ = "restructuredtext en"
# This is needed as we will hide it later
python_complex = complex
python_any = any
python_all = all
# Define common subsets of dtypes (as strings).
complex_dtypes = list(map(str, scal.complex_types))
continuous_dtypes = list(map(str, scal.continuous_types))
float_dtypes = list(map(str, scal.float_types))
discrete_dtypes = list(map(str, scal.discrete_types))
all_dtypes = list(map(str, scal.all_types))
int_dtypes = list(map(str, scal.int_types))
uint_dtypes = list(map(str, scal.uint_types))
class ShapeError(Exception):
"""Raised when the shape cannot be computed."""
pass
def check_equal_numpy(x, y):
"""
Return True iff x and y are equal.
Checks the dtype and shape if x and y are numpy.ndarray instances.
"""
if isinstance(x, numpy.ndarray) and isinstance(y, numpy.ndarray):
return (x.dtype == y.dtype and x.shape == y.shape and
numpy.any(abs(x - y) < 1e-10))
elif (isinstance(x, numpy.random.RandomState) and
isinstance(y, numpy.random.RandomState)):
return python_all(numpy.all(a == b) for a, b in
izip(x.__getstate__(), y.__getstate__()))
else:
return x == y
compile.register_checker(check_equal_numpy)
__oplist_constructor_list = []
"""List of functions to be listed as op constructors in the oplist
(`gen_oplist`, doc/oplist.txt)."""
def constructor(f):
"""Add `f` to :doc:`oplist`.
Make `f` appear as a constructor in the oplist (`gen_oplist`,
doc/oplist.txt).
"""
__oplist_constructor_list.append(f)
return f
def __oplist_tag(thing, tag):
tags = getattr(thing, '__oplist_tags', [])
tags.append(tag)
thing.__oplist_tags = tags
if 0:
# this starts to feel like we're enumerating all the types
# the one place where this is used we should also allow for sparse
# variables
# - JB 20100226
def as_cuda_or_tensor_variable(x, name=None, ndim=None):
"""
Do the same as_tensor_variable,
but do not transfer the value on the gpu.
"""
if hasattr(x, '_as_CudaNdarrayVariable'):
# TODO: pass name and ndim arguments
return x._as_CudaNdarrayVariable()
return as_tensor_variable(x, name, ndim)
def as_tensor_variable(x, name=None, ndim=None):
"""Return `x`, transformed into a `TensorType`.
This function is often used by `make_node` methods of `Op` subclasses
to turn ndarrays, numbers, `Scalar` instances, `Apply` instances and
`TensorType` instances into valid input list elements.
Parameters
----------
x : Apply instance, Variable instance, numpy.ndarray, or number
This thing will be transformed into a `Variable` in a sensible way. An
ndarray argument will not be copied, but a list of numbers will be
copied to make an ndarray.
name : str or None
If a new `Variable` instance is created, it will be named with this
string.
ndim : None or integer
Return a Variable with this many dimensions. Raise TypeError if it's
not possible.
Raises
------
ValueError
If an `Apply` with more than one output is fetched.
AsTensorError
If `x` cannot be converted to a TensorType Variable.
"""
if hasattr(x, '_as_TensorVariable'):
return x._as_TensorVariable() # TODO: pass name and ndim arguments
if isinstance(x, gof.Apply):
# use Apply's default output mechanism
if (x.op.default_output is None) and (len(x.outputs) != 1):
raise ValueError(
"It is ambiguous which output of a multi-output Op has"
" to be fetched.", x)
x = x.default_output()
if isinstance(x, Variable):
if isinstance(x.type, scal.Scalar):
x = tensor_from_scalar(x)
if not isinstance(x.type, TensorType):
raise AsTensorError(
"Variable type field must be a TensorType.", x, x.type)
if ndim is None:
return x
else:
if (x.type.ndim > ndim):
# strip off leading broadcastable dimensions
first_non_broadcastable = [idx for idx in xrange(x.ndim)
if not x.broadcastable[idx]][0]
x = x.dimshuffle(list(range(x.ndim))[first_non_broadcastable:])
if x.ndim > ndim:
raise ValueError(
'TensorType could not be cast to have %i dimensions'
% ndim, x.type
)
return x
elif (x.type.ndim < ndim):
return shape_padleft(x, n_ones=(ndim - x.type.ndim))
else:
return x
if isinstance(x, (tuple, list)) and python_any(isinstance(xi, Variable)
for xi in x):
try:
return stack(x)
except (TypeError, ValueError):
pass
if isinstance(x, bool):
raise AsTensorError(
"Cannot cast True or False as a tensor variable. Please use 1 or "
"0. This error might be caused by using the == operator on "
"Variables. v == w does not do what you think it does, "
"use theano.tensor.eq(v, w) instead.")
try:
return constant(x, name=name, ndim=ndim)
except TypeError:
try:
str_x = str(x)
except Exception:
str_x = repr(x)
raise AsTensorError("Cannot convert %s to TensorType" % str_x, type(x))
# this has a different name, because _as_tensor_variable is the
# function which ops use to upcast their arguments... this
# internal-use function is a good place to put debugging stuff, better
# than the global astensor.
_as_tensor_variable = as_tensor_variable
as_tensor = as_tensor_variable
class NumpyAutocaster(object):
"""
This class is used to cast python ints and floats to numpy arrays.
The behavior when called on scalar `x` depends on `config.cast_policy`:
- 'numpy' will simply use the same type as found by `numpy.asarray(x)`.
- 'numpy+floatX' will do the same, except it will use float32 instead
of float64 if `x` is a Python float and `config.floatX` is set to
'float32' (note that if `x` is a numpy scalar whose data type is
float64, it is not modified since we assume the user is purposedly
using float64).
- 'custom' lets one define a tuple of data types such that:
- if `x` is already a numpy scalar and its data type is in this
tuple, then it is returned unchanged;
- otherwise, the first data type in this tuple that can represent
`x` without loss of precision will be used, unless `x` is a float
and 'float32' is in the tuple (in which case `x` is cast as a
float32);
- if no data type can represent `x` without loss of precision, then
the last data type in the tuple will be used.
Parameters
----------
dtypes: tuple of strings
The ordered list of preferred data types (only used when
`config.cast_policy` is set to 'custom', see the `NumpyAutocaster`
help for details).
"""
def __init__(self, dtypes):
self.dtypes = tuple(dtypes)
def __call__(self, x):
# Make sure we only deal with scalars.
assert (isinstance(x, integer_types) or
isinstance(x, float) or
(isinstance(x, numpy.ndarray) and x.ndim == 0))
if config.cast_policy == 'numpy':
return numpy.asarray(x)
elif config.cast_policy == 'numpy+floatX':
rval = numpy.asarray(x)
if ((not hasattr(x, 'dtype') and
rval.dtype in ('float64', 'float32') and
rval.dtype != config.floatX)):
rval = theano._asarray(rval, dtype=config.floatX)
return rval
# The following is the original code, corresponding to the 'custom'
# option for `config.cast_policy`.
assert config.cast_policy == 'custom'
try:
# Pass through numpy scalars, since they are already typed on
# purpose typically.
if str(x.dtype) in self.dtypes:
# No need to cast `x` into a new dtype. Note that we still
# need to convert it into an array, because it may not be
# one already (e.g. if x == numpy.float64(1.1)).
return numpy.asarray(x)
except AttributeError:
# Means `x` has no 'dtype' attribute.
pass
# unsafe downcast of float64 variables when config.floatX == 'float32'
# recall: float is numpy.float
if ((isinstance(x, float) and
config.floatX in self.dtypes and
config.floatX != 'float64')):
return theano._asarray(x, dtype=config.floatX)
# Don't autocast to float16 unless config.floatX is float16
try_dtypes = [d for d in self.dtypes
if config.floatX == 'float16' or d != 'float16']
for dtype in try_dtypes:
x_ = theano._asarray(x, dtype=dtype)
if numpy.all(x == x_):
break
# returns either an exact x_==x, or the last cast x_
return x_
autocast_int = NumpyAutocaster(('int8', 'int16', 'int32', 'int64'))
autocast_float = NumpyAutocaster(('float16', 'float32', 'float64'))
# autocast_float dtypes might be manipulated in tensor.__init__
#
# Note: it's a bit weird for a compiler to automatically downcast
# literals like this, and it might have implications for efficiency
# when mixing types. For example when you add 1.0 + dmatrix(), the
# 1.0 could be converted to float32, and require upcasting for the +
# operation at every position in the dmatrix. using
# theano._asarray(1.0, dtype='float64') will circumvent this
# autocasting, and in future, our ops might be smarter about factoring
# out upcasts. The advantage of this mechanism is to combine it with
# floatX so that 1.0 + xmatrix() will always have the same type as the
# xmatrix().
#
class autocast_float_as(object):
"""
Temporarily adjust autocasting behavior.
This class makes it possible to temporarily and locally adjust autocasting
behavior when `config.cast_policy` is set to 'custom'.
If `config.cast_policy` is not 'custom', an exception is raised.
This class might be convenient in some code, but it definitely
helps to test the autocasting mechanism.
Examples
--------
>>> with autocast_float_as('float32'):
... assert (fvector() + 1.1).dtype == 'float32' # temporary downcasting
>>> assert (fvector() + 1.1).dtype == 'float64' # back to default behaviour
"""
def __init__(self, *dtypes):
self.dtypes = dtypes
assert config.cast_policy == 'custom'
def __enter__(self):
assert config.cast_policy == 'custom'
self.old_dtypes = autocast_float.dtypes
autocast_float.dtypes = self.dtypes
def __exit__(self, *args):
assert config.cast_policy == 'custom'
autocast_float.dtypes = self.old_dtypes
def constant_or_value(x, rtype, name=None, ndim=None, dtype=None):
"""Return a symbolic `Constant` with value `x`.
Raises
------
TypeError
`x` could not be converted to a numpy.ndarray.
ValueError
`x` could not be expanded to have ndim dimensions.
"""
if dtype is not None:
# in this case, the semantics are that the caller is forcing the dtype
x_ = theano._asarray(x, dtype=dtype)
else:
# In this case, this function should infer the dtype according to the
# autocasting rules. See autocasting above.
x_ = None
if rtype is TensorConstant and isinstance(x, integer_types):
try:
x_ = autocast_int(x)
except OverflowError:
# This is to imitate numpy behavior which tries to fit
# bigger numbers into a uint64.
x_ = theano._asarray(x, dtype='uint64')
elif rtype is TensorConstant and isinstance(x, float):
x_ = autocast_float(x)
elif isinstance(x, numpy.ndarray):
x_ = x
# Currently we do not have a bool dtype in Theano.
# So we upcast it to uint8 to avoid breaking our interface for
# constant.
if x.dtype == 'bool':
x_ = numpy.asarray(x_, dtype='uint8')
else:
# Here x is probably a list or a tuple. If it contains a long,
# we will behave like the current NumPy version: 1.7 and below,
# it will only work if the long fits in int64. For NumPy 1.7.1+,
# it will work if the long fits in int64 or uint64.
x_ = numpy.asarray(x)
assert type(x_) in [numpy.ndarray, numpy.memmap]
bcastable = [d == 1 for d in x_.shape]
if ndim is not None:
if len(bcastable) < ndim:
bcastable = [True] * (ndim - len(bcastable)) + bcastable
elif len(bcastable) > ndim:
# TODO: strip off dimensions of size 1
raise ValueError(
'ndarray could not be cast to constant with %i dimensions' %
ndim)
assert len(bcastable) == ndim
try:
if rtype is TensorConstant:
rval = rtype(
TensorType(dtype=x_.dtype, broadcastable=bcastable),
x_.copy(),
name=name)
return rval
else:
# leave the shape out of the type
return rtype(TensorType(dtype=x_.dtype, broadcastable=bcastable),
x_, name=name)
except Exception:
raise TypeError("Could not convert %s to TensorType" % x, type(x))
def constant(x, name=None, ndim=None, dtype=None):
ret = constant_or_value(x, rtype=TensorConstant, name=name, ndim=ndim,
dtype=dtype)
# We create a small cache of frequently used constant.
# This speed up the Merge optimization for big graph.
# We want to cache all scalar to don't merge as frequently constants.
# But we don't want to cache too much stuff
# So we cache integer with dtype [u]int and float where the value is
# between -10 and 10
# We want to cache all broadcast pattern for scalar.
if not constant.enable:
return ret
sig = ret.signature()
if (sig not in constant_cache and ret.data.size == 1 and
(-10) <= ret.data <= 10 and
(ret.dtype in int_dtypes or ret.dtype in uint_dtypes or
(ret.dtype in float_dtypes and int(ret.data) == ret.data))):
constant_cache[sig] = ret
# This is needed to raise a good error to the user.
ret.cached = True
return constant_cache.get(sig, ret)
constant.enable = True
constant_cache = {}
def _obj_is_wrappable_as_tensor(x):
try:
constant(x)
return True
except TypeError:
return False
if int(config.tensor.cmp_sloppy) > 1:
# This config variable is a quick-and-dirty way to get low-precision
# comparisons. For a more precise setting of these tolerances set
# them explicitly in your user code by assigning, for example,
# "theano.tensor.basic.float32_atol = ..."
# When config.tensor.cmp_sloppy>1 we are even more sloppy. This is
# useful to test the GPU as they don't use extended precision and
# this cause some difference bigger then the normal sloppy.
float16_atol = 5e-3
float16_rtol = 1e-2
float32_atol = 5e-4
float32_rtol = 1e-3
float64_rtol = 1e-4
float64_atol = 1e-3
elif int(config.tensor.cmp_sloppy):
float16_atol = 1e-3
float16_rtol = 5e-3
float32_atol = 1e-4
float32_rtol = 1e-3
float64_rtol = 1e-4
float64_atol = 1e-3
else:
# If you change those value in test don't forget to put them back
# when the test end. Don't forget the case when the test fail.
float16_atol = 5e-4
float16_rtol = 5e-4
float32_atol = 1e-5
float32_rtol = 1e-5
# defaults in numpy.allclose
# Don't be more strict then numpy rtol
# It cause useless error.
float64_rtol = 1.0000000000000001e-05
float64_atol = 1e-8
def _get_atol_rtol(a, b):
tiny = ('float16',)
narrow = ('float32', 'complex64')
if (str(a.dtype) in tiny) or (str(b.dtype) in tiny):
atol = float16_atol
rtol = float16_rtol
elif (str(a.dtype) in narrow) or (str(b.dtype) in narrow):
atol = float32_atol
rtol = float32_rtol
else:
atol = float64_atol
rtol = float64_rtol
return atol, rtol
def _allclose(a, b, rtol=None, atol=None):
a = numpy.asarray(a)
b = numpy.asarray(b)
atol_, rtol_ = _get_atol_rtol(a, b)
if rtol is not None:
rtol_ = rtol
if atol is not None:
atol_ = atol
# Work around bug in Numpy, see
# http://projects.scipy.org/numpy/ticket/1684
if str(b.dtype) in int_dtypes and (numpy.absolute(b) < 0).any():
b = theano._asarray(b, dtype='float64')
return numpy.allclose(a, b, atol=atol_, rtol=rtol_)
class NotScalarConstantError(Exception):
"""
Raised by get_scalar_constant_value if called on something that is
not a scalar constant.
"""
class EmptyConstantError(NotScalarConstantError):
"""
Raised by get_scalar_const_value if called on something that is a
zero dimensional constant.
"""
def numpy_scalar(data):
""" Return a scalar stored in a numpy ndarray.
Raises
------
NotScalarConstantError
If the numpy ndarray is not a scalar.
"""
# handle case where data is numpy.array([])
if (data.ndim > 0 and
(len(data.shape) == 0 or
__builtins__['max'](data.shape) == 0)):
assert numpy.all(numpy.array([]) == data)
raise EmptyConstantError()
try:
numpy.complex(data) # works for all numeric scalars
return data
except Exception:
raise NotScalarConstantError(
'v.data is non-numeric, non-scalar, or has more than one'
' unique value', data)
get_scalar_constant_value_elemwises = (
scal.Cast, scal.Switch,
scal.NEQ, scal.EQ,
scal.LT, scal.GT, scal.LE, scal.GE,
scal.Sub, scal.Add, scal.Mod, scal.Mul,
scal.IntDiv, scal.TrueDiv, scal.Minimum, scal.Maximum)
def get_scalar_constant_value(orig_v, elemwise=True,
only_process_constants=False):
"""Return the constant scalar(0-D) value underlying variable `v`.
If `v` is the output of dimshuffles, fills, allocs, rebroadcasts,
cast, OutputGuard, DeepCopyOp, ScalarFromTensor, ScalarOp, Elemwise
and some pattern with Subtensor, this function digs through them.
If `v` is not some view of constant scalar data, then raise a
NotScalarConstantError.
Parameters
----------
elemwise : bool
If False, we won't try to go into elemwise. So this call is faster.
only_process_constants : bool
If True, we only attempt to obtain the value of `orig_v` if it's
directly constant and don't try to dig through dimshuffles, fills,
allocs, and other to figure out its value.
Notes
-----
There may be another function similar to this one in the code,
but I'm not sure where it is.
"""
v = orig_v
while True:
if v is None:
# None is not a scalar (and many uses of this function seem
# to depend on passing it None)
raise NotScalarConstantError()
if isinstance(v, (numpy.integer, int, float)):
return numpy.asarray(v)
if isinstance(v, numpy.ndarray):
return numpy_scalar(v)
if isinstance(v, Constant):
if getattr(v.tag, 'unique_value', None) is not None:
data = v.tag.unique_value
else:
data = v.data
return numpy_scalar(data)
if not only_process_constants and getattr(v, 'owner', None):
if isinstance(v.owner.op, (Alloc, DimShuffle, Rebroadcast,
compile.ops.OutputGuard,
compile.DeepCopyOp)):
v = v.owner.inputs[0]
continue
elif isinstance(v.owner.op, theano.compile.ops.Shape_i):
if isinstance(v.owner.inputs[0], Constant):
return numpy.asarray(
v.owner.inputs[0].data.shape[v.owner.op.i])
# Don't act as the constant_folding optimization here as this
# fct is used too early in the optimization phase. This would
# mess with the stabilization optimization and be too slow.
# We put all the scalar Ops used by get_canonical_form_slice()
# to allow it to determine the broadcast pattern correctly.
elif isinstance(v.owner.op, (ScalarFromTensor, TensorFromScalar)):
return get_scalar_constant_value(v.owner.inputs[0])
elif isinstance(v.owner.op, scal.ScalarOp):
if isinstance(v.owner.op, scal.Second):
# We don't need both input to be constant for second
shp, val = v.owner.inputs
v = val
continue
if isinstance(v.owner.op, get_scalar_constant_value_elemwises):
const = [get_scalar_constant_value(i)
for i in v.owner.inputs]
ret = [[None]]
v.owner.op.perform(v.owner, const, ret)
return ret[0][0]
elif elemwise and isinstance(v.owner.op, Elemwise):
if isinstance(v.owner.op.scalar_op, scal.Second):
# We don't need both input to be constant for second
shp, val = v.owner.inputs
v = val
continue
elif isinstance(v.owner.op.scalar_op,
get_scalar_constant_value_elemwises):
const = [get_scalar_constant_value(i)
for i in v.owner.inputs]
ret = [[None]]
v.owner.op.perform(v.owner, const, ret)
return ret[0][0]
elif (isinstance(v.owner.op, theano.tensor.subtensor.Subtensor) and
v.ndim == 0):
if isinstance(v.owner.inputs[0], TensorConstant):
cdata = tuple(v.owner.op.get_constant_idx(v.owner.inputs))
try:
return v.owner.inputs[0].data.__getitem__(cdata)
except IndexError:
raise IndexError(
str(tuple(v.owner.op.idx_list)) +
" is not a valid index into " +
str(v.owner.inputs[0].data))
# The index list 'idx_list' should have length the same
# shape as the input.
# TODO: implement the case where we take a scalar in a matrix
assert len(v.owner.op.idx_list) == v.owner.inputs[0].ndim
# Needed to make better graph in this test in
# theano/tensor/tests/test_sharedvar.py:
# test_shared_options.test_specify_shape_partial
if ((v.owner.inputs[0].owner and
isinstance(v.owner.inputs[0].owner.op, Join) and
len(v.owner.op.idx_list) == 1)):
# Ensure the Join is joining only scalar variables (so that
# the constant value can be found at the same index as the
# one used in the sub-tensor).
if python_all(var.ndim == 0 for var in
v.owner.inputs[0].owner.inputs[1:]):
idx = v.owner.op.idx_list[0]
if isinstance(idx, gof.Type):
idx = get_scalar_constant_value(v.owner.inputs[1])
# Note the '+ 1' is because the first argument to Join
# is the axis.
ret = v.owner.inputs[0].owner.inputs[idx + 1]
ret = get_scalar_constant_value(ret)
# join can cast implicitly its input in some case.
return theano._asarray(ret, dtype=v.type.dtype)
if python_all(var.ndim == 1 for var in
v.owner.inputs[0].owner.inputs[1:]):
idx = v.owner.op.idx_list[0]
if isinstance(idx, gof.Type):
idx = get_scalar_constant_value(v.owner.inputs[1])
try:
# TODO: assert joined axis is 0.
length = 0
for joined in v.owner.inputs[0].owner.inputs[1:]:
ll = get_vector_length(joined)
if idx < length + ll:
return get_scalar_constant_value(
joined[idx - length])
length += ll
except TypeError:
pass
except ValueError:
pass
elif (v.owner.inputs[0].owner and
isinstance(v.owner.inputs[0].owner.op,
theano.tensor.opt.MakeVector) and
# MakeVector normally accept only scalar as input.
# We put this check in case there is change in the future
python_all(var.ndim == 0 for var in
v.owner.inputs[0].owner.inputs) and
len(v.owner.op.idx_list) == 1):
idx = v.owner.op.idx_list[0]
if isinstance(idx, gof.Type):
idx = get_scalar_constant_value(v.owner.inputs[1])
# Python 2.4 does not support indexing with numpy.integer
# So we cast it.
idx = int(idx)
ret = v.owner.inputs[0].owner.inputs[idx]
ret = get_scalar_constant_value(ret)
# MakeVector can cast implicitly its input in some case.
return theano._asarray(ret, dtype=v.type.dtype)
# This is needed when we take the grad as the Shape op
# are not already changed into MakeVector
owner = v.owner
leftmost_parent = owner.inputs[0]
if (leftmost_parent.owner and
isinstance(leftmost_parent.owner.op,
theano.tensor.Shape)):
op = owner.op
idx_list = op.idx_list
idx = idx_list[0]
if isinstance(idx, gof.Type):
idx = get_scalar_constant_value(owner.inputs[1])
grandparent = leftmost_parent.owner.inputs[0]
gp_broadcastable = grandparent.type.broadcastable
ndim = grandparent.type.ndim
if grandparent.owner and isinstance(grandparent.owner.op,
Rebroadcast):
ggp_broadcastable = grandparent.owner.inputs[0].broadcastable
l = [b1 or b2 for b1, b2 in zip(ggp_broadcastable,
gp_broadcastable)]
gp_broadcastable = tuple(l)
assert ndim == len(gp_broadcastable)
if not (idx < len(gp_broadcastable)):
msg = ("get_scalar_constant_value detected " +
"deterministic IndexError: x.shape[%d] " +
"when x.ndim=%d.") % (idx, ndim)
if config.exception_verbosity == 'high':
msg += ' x=%s' % min_informative_str(v)
else:
msg += ' x=%s' % str(v)
raise ValueError(msg)
if gp_broadcastable[idx]:
return numpy.asarray(1)
raise NotScalarConstantError(v)
# Easy constructors
def tensor(*args, **kwargs):
name = kwargs.pop('name', None)
return TensorType(*args, **kwargs)(name=name)
def _multi(*fns):
def f2(f, *names):
if names and isinstance(names[0], int):
if names == 1:
return f()
else:
return [f() for i in xrange(names[0])]
if isinstance(names, tuple):
if len(names) == 1:
names = names[0]
if len(names) == 1:
return f(names)
else:
return [f(name) for name in names]
if len(fns) == 1:
return partial(f2, fns)
else:
return [partial(f2, f) for f in fns]
cscalar = TensorType('complex64', ())
zscalar = TensorType('complex128', ())
fscalar = TensorType('float32', ())
dscalar = TensorType('float64', ())
bscalar = TensorType('int8', ())
wscalar = TensorType('int16', ())
iscalar = TensorType('int32', ())
lscalar = TensorType('int64', ())
def scalar(name=None, dtype=None):
"""Return a symbolic scalar variable.
Parameters
----------
dtype: numeric
None means to use theano.config.floatX.
name
A name to attach to this variable.
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, ())
return type(name)
scalars, fscalars, dscalars, iscalars, lscalars = _multi(
scalar, fscalar, dscalar, iscalar, lscalar)
int_types = bscalar, wscalar, iscalar, lscalar
float_types = fscalar, dscalar
complex_types = cscalar, zscalar
int_scalar_types = int_types
float_scalar_types = float_types
complex_scalar_types = complex_types
cvector = TensorType('complex64', (False, ))
zvector = TensorType('complex128', (False, ))
fvector = TensorType('float32', (False, ))
dvector = TensorType('float64', (False, ))
bvector = TensorType('int8', (False,))
wvector = TensorType('int16', (False,))
ivector = TensorType('int32', (False, ))
lvector = TensorType('int64', (False, ))
def vector(name=None, dtype=None):
"""Return a symbolic vector variable.
Parameters
----------
dtype: numeric
None means to use theano.config.floatX.
name
A name to attach to this variable
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, (False, ))
return type(name)
vectors, fvectors, dvectors, ivectors, lvectors = _multi(
vector, fvector, dvector, ivector, lvector)
int_vector_types = bvector, wvector, ivector, lvector
float_vector_types = fvector, dvector
complex_vector_types = cvector, zvector
cmatrix = TensorType('complex64', (False, False))
zmatrix = TensorType('complex128', (False, False))
fmatrix = TensorType('float32', (False, False))
dmatrix = TensorType('float64', (False, False))
bmatrix = TensorType('int8', (False, False))
wmatrix = TensorType('int16', (False, False))
imatrix = TensorType('int32', (False, False))
lmatrix = TensorType('int64', (False, False))
def matrix(name=None, dtype=None):
"""Return a symbolic matrix variable.
Parameters
----------
dtype: numeric
None means to use theano.config.floatX.
name
A name to attach to this variable.
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, (False, False))
return type(name)
matrices, fmatrices, dmatrices, imatrices, lmatrices = _multi(
matrix, fmatrix, dmatrix, imatrix, lmatrix)
int_matrix_types = bmatrix, wmatrix, imatrix, lmatrix
float_matrix_types = fmatrix, dmatrix
complex_matrix_types = cmatrix, zmatrix
crow = TensorType('complex64', (True, False))
zrow = TensorType('complex128', (True, False))
frow = TensorType('float32', (True, False))
drow = TensorType('float64', (True, False))
brow = TensorType('int8', (True, False))
wrow = TensorType('int16', (True, False))
irow = TensorType('int32', (True, False))
lrow = TensorType('int64', (True, False))
def row(name=None, dtype=None):
"""Return a symbolic row variable (ndim=2, broadcastable=[True,False]).
Parameters
----------
dtype: numeric type
None means to use theano.config.floatX.
name
A name to attach to this variable.
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, (True, False))
return type(name)
rows, frows, drows, irows, lrows = _multi(row, frow, drow, irow, lrow)
ccol = TensorType('complex64', (False, True))
zcol = TensorType('complex128', (False, True))
fcol = TensorType('float32', (False, True))
dcol = TensorType('float64', (False, True))
bcol = TensorType('int8', (False, True))
wcol = TensorType('int16', (False, True))
icol = TensorType('int32', (False, True))
lcol = TensorType('int64', (False, True))
def col(name=None, dtype=None):
"""Return a symbolic column variable (ndim=2, broadcastable=[False,True]).
Parameters
----------
dtype : numeric
None means to use theano.config.floatX.
name
A name to attach to this variable.
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, (False, True))
return type(name)
cols, fcols, dcols, icols, lcols = _multi(col, fcol, dcol, icol, lcol)
ctensor3 = TensorType('complex64', ((False,) * 3))
ztensor3 = TensorType('complex128', ((False,) * 3))
ftensor3 = TensorType('float32', ((False,) * 3))
dtensor3 = TensorType('float64', ((False,) * 3))
btensor3 = TensorType('int8', ((False,) * 3))
wtensor3 = TensorType('int16', ((False,) * 3))
itensor3 = TensorType('int32', ((False,) * 3))
ltensor3 = TensorType('int64', ((False,) * 3))
def tensor3(name=None, dtype=None):
"""Return a symbolic 3-D variable.
Parameters
----------
dtype: numeric type
None means to use theano.config.floatX.
name
A name to attach to this variable.
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, (False, False, False))
return type(name)
tensor3s, ftensor3s, dtensor3s, itensor3s, ltensor3s = _multi(
tensor3, ftensor3, dtensor3, itensor3, ltensor3)
ctensor4 = TensorType('complex64', ((False,) * 4))
ztensor4 = TensorType('complex128', ((False,) * 4))
ftensor4 = TensorType('float32', ((False,) * 4))
dtensor4 = TensorType('float64', ((False,) * 4))
btensor4 = TensorType('int8', ((False,) * 4))
wtensor4 = TensorType('int16', ((False,) * 4))
itensor4 = TensorType('int32', ((False,) * 4))
ltensor4 = TensorType('int64', ((False,) * 4))
def tensor4(name=None, dtype=None):
"""Return a symbolic 4-D variable.
Parameters
----------
dtype: numeric type
None means to use theano.config.floatX.
name
A name to attach to this variable.
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, (False, False, False, False))
return type(name)
tensor4s, ftensor4s, dtensor4s, itensor4s, ltensor4s = _multi(
tensor4, ftensor4, dtensor4, itensor4, ltensor4)
Tensor = TensorType
# This bizarre push-import avoids a circular dependency.
elemwise.as_tensor_variable = as_tensor_variable
elemwise.TensorType = TensorType
elemwise.TensorVariable = TensorVariable
elemwise.TensorConstant = TensorConstant
#########################
# Utilities
#########################
def _scal_elemwise_with_nfunc(nfunc, nin, nout):
"""
Replace a symbol definition with an elementwise version of the
corresponding scalar Op. If it is not None, the nfunc argument
should be a string such that getattr(numpy, nfunc) implements
a vectorized version of the elemwise operation. nin is the number
of inputs expected by that function, and nout is the number of
**destination** inputs it takes. That is, the function should
take nin+nout inputs. nout == 0 means that the numpy function
does not take a numpy array argument to put its result in.
"""
def construct(symbol):
symbolname = symbol.__name__
inplace = symbolname.endswith('_inplace')
if inplace:
msg = "inplace"
else:
msg = "no_inplace"
n = "Elemwise{%s,%s}" % (symbolname, msg)
if inplace:
scalar_op = getattr(scal, symbolname[:-len('_inplace')])
inplace_scalar_op = scalar_op.__class__(scal.transfer_type(0))
rval = elemwise.Elemwise(inplace_scalar_op, {0: 0}, name=n,
nfunc_spec=(nfunc and (nfunc, nin, nout)))
else:
scalar_op = getattr(scal, symbolname)
rval = elemwise.Elemwise(scalar_op, name=n,
nfunc_spec=(nfunc and (nfunc, nin, nout)))
if getattr(symbol, '__doc__', False):
rval.__doc__ = symbol.__doc__ + '\n' + rval.__doc__
# for the meaning of this see the ./epydoc script
# it makes epydoc display rval as if it were a function, not an object
rval.__epydoc_asRoutine = symbol
rval.__module__ = 'tensor'
pprint.assign(rval, printing.FunctionPrinter(symbolname))
return rval
return construct
_scal_elemwise = _scal_elemwise_with_nfunc(None, None, None)
def _pack(x):
"""
Convert x to a list if it is an iterable, otherwise wrap it in a list.
"""
try:
return list(x)
except TypeError:
return [x]
#########################
# Casting Operations
#########################
class TensorFromScalar(Op):
__props__ = ()
def make_node(self, s):
assert isinstance(s.type, scal.Scalar)
return Apply(self,
[s],
[tensor(dtype=s.type.dtype,
broadcastable=())])
def perform(self, node, inp, out_):
s, = inp
out, = out_
out[0] = numpy.asarray(s)
def infer_shape(self, node, in_shapes):
return [()]
def grad(self, inp, grads):
s, = inp
dt, = grads
if s.type.dtype in float_dtypes:
assert dt.type.dtype in float_dtypes
return [scalar_from_tensor(dt)]
# If the input dtype is an integer, then so is the output dtype,
# and the "zero" gradient can be represented in that int dtype.
# Currently, theano.grad insists that the dtype of the returned
# gradient has a float dtype, so we use floatX.
if s.type.dtype in discrete_dtypes:
return [s.zeros_like().astype(theano.config.floatX)]
raise NotImplementedError("grad not implemented for complex dtypes")
tensor_from_scalar = TensorFromScalar()
class ScalarFromTensor(Op):
__props__ = ()
def make_node(self, t):
assert isinstance(t.type, TensorType)
assert t.type.broadcastable == ()
return Apply(self,
[t],
[scal.get_scalar_type(dtype=t.type.dtype).make_variable()]
)
def perform(self, node, inp, out_):
s, = inp
out, = out_
out[0] = s.flatten()[0]
def infer_shape(self, node, in_shapes):
return [()]
def grad(self, inp, grads):
s, = inp
dt, = grads
return [tensor_from_scalar(dt)]
def R_op(self, inputs, eval_points):
if None in eval_points:
return [None]
return self.make_node(*eval_points).outputs
def c_code(self, node, name, inputs, outputs, sub):
x, = inputs
z, = outputs
fail = sub['fail']
return """
%(z)s = ((dtype_%(x)s*)(PyArray_DATA(%(x)s)))[0];
""" % locals()
def c_code_cache_version(self):
return (1,)
scalar_from_tensor = ScalarFromTensor()
# to be removed as we get the epydoc routine-documenting thing going
# -JB 20080924
def _conversion(real_value, name):
__oplist_tag(real_value, 'casting')
real_value.__module__ = 'tensor.basic'
pprint.assign(real_value, printing.FunctionPrinter(name))
return real_value
# These _conver_to_<type> functions have leading underscores to indicate that
# they should not be called directly. They do not perform sanity checks about
# what types you are casting to what. That logic is implemented by the
# `cast()` function below.
_convert_to_int8 = _conversion(
elemwise.Elemwise(scal.convert_to_int8), 'int8')
"""Cast to 8-bit integer"""
_convert_to_int16 = _conversion(
elemwise.Elemwise(scal.convert_to_int16), 'int16')
"""Cast to 16-bit integer"""
_convert_to_int32 = _conversion(
elemwise.Elemwise(scal.convert_to_int32), 'int32')
"""Cast to 32-bit integer"""
_convert_to_int64 = _conversion(
elemwise.Elemwise(scal.convert_to_int64), 'int64')
"""Cast to 64-bit integer"""
_convert_to_uint8 = _conversion(
elemwise.Elemwise(scal.convert_to_uint8), 'uint8')
"""Cast to unsigned 8-bit integer"""
_convert_to_uint16 = _conversion(
elemwise.Elemwise(scal.convert_to_uint16), 'uint16')
"""Cast to unsigned 16-bit integer"""
_convert_to_uint32 = _conversion(
elemwise.Elemwise(scal.convert_to_uint32), 'uint32')
"""Cast to unsigned 32-bit integer"""
_convert_to_uint64 = _conversion(
elemwise.Elemwise(scal.convert_to_uint64), 'uint64')
"""Cast to unsigned 64-bit integer"""
_convert_to_float16 = _conversion(
elemwise.Elemwise(scal.convert_to_float16), 'float16')
"""Cast to half-precision floating point"""
_convert_to_float32 = _conversion(
elemwise.Elemwise(scal.convert_to_float32), 'float32')
"""Cast to single-precision floating point"""
_convert_to_float64 = _conversion(
elemwise.Elemwise(scal.convert_to_float64), 'float64')
"""Cast to double-precision floating point"""
_convert_to_complex64 = _conversion(
elemwise.Elemwise(scal.convert_to_complex64), 'complex64')
"""Cast to single-precision complex"""
_convert_to_complex128 = _conversion(
elemwise.Elemwise(scal.convert_to_complex128), 'complex128')
"""Cast to double-precision complex"""
_cast_mapping = {
'int8': _convert_to_int8,
'int16': _convert_to_int16,
'int32': _convert_to_int32,
'int64': _convert_to_int64,
'uint8': _convert_to_uint8,
'uint16': _convert_to_uint16,
'uint32': _convert_to_uint32,
'uint64': _convert_to_uint64,
'float16': _convert_to_float16,
'float32': _convert_to_float32,
'float64': _convert_to_float64,
'complex64': _convert_to_complex64,
'complex128': _convert_to_complex128}
@constructor
def cast(x, dtype):
"""Symbolically cast `x` to a Tensor of type `dtype`."""
if dtype == 'floatX':
dtype = config.floatX
_x = as_tensor_variable(x)
if _x.type.dtype == dtype:
return _x
if _x.type.dtype.startswith('complex') and not dtype.startswith('complex'):
raise TypeError((
'Casting from complex to real is ambiguous: consider real(), '
'imag(), angle() or abs()'))
return _cast_mapping[dtype](x)
##########################
# Unary Operations
##########################
class MaxAndArgmax(Op):
"""
Calculate the max and argmax over a given axis or over all axes.
"""
nin = 2 # tensor, axis
nout = 2 # max val, max idx
E_axis = 'invalid axis'
__props__ = ()
def make_node(self, x, axis=None):
x = _as_tensor_variable(x)
if isinstance(axis, (int, numpy.integer)):
axis = [int(axis)]
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0:
axis = [int(axis)]
elif isinstance(axis, (tuple, list, numpy.ndarray)):
axis = [int(a) for a in axis]
if axis == list(range(x.type.ndim)):
axis = None
elif isinstance(axis, Variable):
if NoneConst.equals(axis):
axis = None
elif not isinstance(axis, TensorConstant):
raise TypeError(
"MaxAndArgmax needs a constant axis. Got %s" % axis)
else:
assert (axis.dtype.startswith("int") or
axis.dtype.startswith("uint"))
if isinstance(axis.data, (int, numpy.integer)) or \
(isinstance(axis.data, numpy.ndarray) and
axis.data.ndim == 0):
axis = [int(axis.data)]
elif isinstance(axis.data, (list, numpy.ndarray)):
axis = [int(i) for i in axis.data]
# Make axis entries non-negative, and sort them
if isinstance(axis, list):
for idx in xrange(len(axis)):
if axis[idx] < 0:
axis[idx] += x.type.ndim
axis.sort()
# Verify that axes are valid
all_axes = []
if isinstance(axis, list):
for ax in axis:
if ax < 0 or ax >= x.type.ndim:
raise ValueError(
'Invalid axis: %s (the number of dimensions of the '
'input is: %s)' % (ax, x.type.ndim))
if ax not in all_axes:
all_axes.append(ax)
else:
all_axes = list(range(x.ndim))
if axis is None or axis == list(range(x.type.ndim)):
axis = NoneConst.clone()
else:
axis = _as_tensor_variable(all_axes)
assert axis.ndim == 1
inputs = [x, axis]
# We keep the original broadcastable flags for dimensions on which
# we do not perform the max / argmax.
broadcastable = [b for i, b in enumerate(x.type.broadcastable)
if i not in all_axes]
outputs = [tensor(x.type.dtype, broadcastable, name='max'),
tensor('int64', broadcastable, name='argmax')]
return Apply(self, inputs, outputs)
def perform(self, node, inp, outs):
x, axes = inp
max, max_idx = outs
if axes is None:
axes = tuple(range(x.ndim))
else:
axes = tuple(axes)
max[0] = theano._asarray(numpy.max(x, axes),
dtype=node.outputs[0].dtype)
# Numpy does not support multiple axes for argmax
# Work around
keep_axes = numpy.array([i for i in range(x.ndim) if i not in axes])
# Not-reduced axes in front
transposed_x = numpy.transpose(x, numpy.concatenate((keep_axes, axes)))
reshaped_x = transposed_x.reshape(transposed_x.shape[:len(keep_axes)] +
(-1,))
max_idx[0] = theano._asarray(numpy.argmax(reshaped_x, axis=-1),
dtype='int64')
def c_code(self, node, name, inp, out, sub):
x, axis = inp
max, argmax = out
fail = sub["fail"]
if NoneConst.equals(node.inputs[1]):
axis_code = "axis = NPY_MAXDIMS;"
else:
assert node.inputs[1].ndim == 1
# Fall back to perform() if there are multiple axes
if len(node.inputs[1].data) > 1:
raise NotImplementedError()
axis_code = """
axis = ((dtype_%(axis)s*)PyArray_DATA(%(axis)s))[0];
if(axis > PyArray_NDIM(%(x)s)-1 || axis < -PyArray_NDIM(%(x)s)){
PyErr_SetString(PyExc_ValueError,
"MaxAndArgmax, bad axis argument");
%(fail)s
}
""" % locals()
ret = """
int axis;
Py_CLEAR(%(max)s);
Py_CLEAR(%(argmax)s);//todo pass them as out parameter.
%(axis_code)s
%(max)s = (PyArrayObject*)PyArray_Max(%(x)s, axis, NULL);
if(%(max)s == NULL){
PyErr_SetString(PyExc_ValueError,
"MaxAndArgmax, max failed");
%(fail)s;
}
if(!PyArray_CheckExact(%(max)s)){
%(max)s = (PyArrayObject*)PyArray_FromAny((PyObject*)%(max)s, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL);
if(%(max)s == NULL){
%(fail)s;
}
}
%(argmax)s = (PyArrayObject*)PyArray_ArgMax(%(x)s, axis, NULL);
if(%(argmax)s == NULL){
PyErr_SetString(PyExc_ValueError, "MaxAndArgmax, argmax failed");
Py_CLEAR(%(max)s);
%(fail)s;
}
if(!PyArray_CheckExact(%(argmax)s)){
%(argmax)s = (PyArrayObject*)PyArray_FromAny((PyObject*)%(argmax)s, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL);
if(%(argmax)s == NULL){
%(fail)s;
}
}
if(PyArray_TYPE(%(argmax)s) != NPY_INT64){
PyObject * tmp = PyArray_Cast(%(argmax)s, NPY_INT64);
if (NULL == tmp){
%(fail)s;
}
Py_DECREF(%(argmax)s);
%(argmax)s = (PyArrayObject*)tmp;
}
"""
return ret % locals()
def c_code_cache_version(self):
return (3,)
def infer_shape(self, node, shapes):
ishape, axis_shape = shapes
axis = node.inputs[1]
if axis.data is None:
return [(), ()]
rval = tuple([ishape[i] for (i, b) in enumerate(
node.inputs[0].type.broadcastable) if i not in axis.data])
return [rval, rval]
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return [None, None]
if not isinstance(inputs[1], theano.Constant):
raise ValueError(('R_op supported for arg_max only for '
'constant axis!'))
if inputs[1].data > 1:
raise ValueError(('R_op supported for arg_max only when '
' axis is 0 or 1'))
if inputs[0].ndim != 2:
raise ValueError(('R_op supported for arg_max only when '
' input is a matrix'))
max_vals, max_pos = self.make_node(*inputs).outputs
if inputs[1].data == 0:
return [eval_points[0][max_pos,
arange(eval_points[0].shape[1])], None]
else:
return [eval_points[0][arange(eval_points[0].shape[0]),
max_pos], None]
def grad(self, inp, grads):
# The strict sense mathematical gradient of the maximum function is
# not calculated here for it is not defined at every point where some
# coordinates are identical. However, since the latter set has null
# Lebesgue measure, the result may be interpreted as weak gradient.
# @note: This function should work correctly for L{vector}s.
# (x, y), (gz, gw)
# gz*dz/dx + gw*dw/dx, gz*dz/dy + gw*dw/dy
# gMax * dMax/dx + gArgMax * dArgMax/dx,
# gMax * dMax/daxis + gArgMax * dArgMax/daxis
# g_max has one less dimension than x, so you need to complete
# g_max to x's shape when axis=0 the broadcasting mechanism
# does it automatically
x, axis = inp
g_max, g_max_idx = grads
g_max_disconnected = isinstance(g_max.type, DisconnectedType)
g_max_idx_disconnected = isinstance(g_max_idx.type, DisconnectedType)
# if the op is totally disconnected, so are its inputs
if g_max_disconnected and g_max_idx_disconnected:
return [DisconnectedType()(), DisconnectedType()()]
axis_grad = grad_undefined(
self, 1, axis,
"argmax is not defined for non-integer axes so"
" argmax(x, axis+eps) is undefined")
# if the max is disconnected but the argmax is not,
# the gradient on its inputs is zero
if g_max_disconnected:
return [x.zeros_like(), axis_grad]
if NoneConst.equals(axis):
axis_ = list(range(x.ndim))
else:
axis_ = axis
xmax = max(x, axis_)
# Raise the g_max and xmax to the same number of dim as the input.
pattern = []
out_dim = 0
if NoneConst.equals(axis):
# We are taking the max/argmax over all dimensions.
axis = None
for i in xrange(x.ndim):
if axis is None or i in axis.data:
pattern.append('x')
else:
pattern.append(out_dim)
out_dim += 1
g_max_pad = DimShuffle(g_max.broadcastable, pattern)(g_max)
xmax_pad = DimShuffle(xmax.broadcastable, pattern)(xmax)
# Set the grad to the correct position.
g_x = eq(xmax_pad, x) * g_max_pad
return g_x, axis_grad
_max_and_argmax = MaxAndArgmax()
def makeKeepDims(x, y, axis):
"""
Reintroduces in y with length one the axes of x which have been left out
in a prior reduction of x. With this option, the resulting tensor will
broadcast correctly against the original tensor x.
"""
x = as_tensor_variable(x)
y = as_tensor_variable(y)
if axis is None:
axis = list(range(x.type.ndim))
elif isinstance(axis, (int, numpy.integer)):
axis = [axis]
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0:
axis = [int(axis)]
else:
axis = [int(a) for a in axis]
newaxis = []
for a in axis:
if not isinstance(a, int):
raise ValueError(
"keepdims option can be used only with constant axis")
if a < 0:
a += x.type.ndim
newaxis.append(a)
i = 0
new_dims = []
for j, _ in enumerate(x.type.broadcastable):
if j in newaxis:
new_dims.append('x')
else:
new_dims.append(i)
i += 1
return DimShuffle(y.type.broadcastable, new_dims)(y)
@constructor
def max_and_argmax(a, axis=None, keepdims=False):
"""
Returns maximum elements and their indices obtained by iterating over
given axis.
When axis is None (the default value), the max is performed
over the flattened tensor.
Parameters
----------
keepdims : bool
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
out, argout = _max_and_argmax(a, axis)
if keepdims:
out = makeKeepDims(a, out, axis)
argout = makeKeepDims(a, argout, axis)
return [out, argout]
@constructor
def max(x, axis=None, keepdims=False):
"""
Returns maximum elements obtained by iterating over given axis.
When axis is None (the default value), the max is performed
over the flattened tensor.
Parameters
----------
keepdims: bool
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
Notes
-----
We return an error as numpy when we reduce a dim with a shape of 0.
"""
# We have a choice of implementing this call with the
# CAReduce op or the MaxAndArgmax op.
# MaxAndArgmax supports grad and Rop, so we prefer to use that.
# CAReduce is faster, but optimizations will replace MaxAndArgmax[0]
# with CAReduce at compile time, so at this stage the important
# thing is supporting all user interface features, not speed.
# Some cases can be implemented only with CAReduce.
# We thus prefer to use MaxAndArgmax, if possible. It does not
# support all axis arguments, so we may need to fall back to CAReduce.
try:
out = max_and_argmax(x, axis)[0]
except Exception:
out = CAReduce(scal.maximum, axis)(x)
if keepdims:
out = makeKeepDims(x, out, axis)
return out
@constructor
def argmax(x, axis=None, keepdims=False):
"""
Returns indices of maximum elements obtained by iterating over given axis.
When axis is None (the default value), the argmax is performed
over the flattened tensor.
Parameters
----------
keepdims : bool
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
# In python (using MaxAndArgmax.perform()) this leads to a wasteful
# implementation that goes through the data twice instead of once
# but when Argmax.c_impl() is in place, it should be fine.
argout = max_and_argmax(x, axis)[1]
if keepdims:
argout = makeKeepDims(x, argout, axis)
return argout
@constructor
def min(x, axis=None, keepdims=False):
"""
Returns minimum elements obtained by iterating over given axis.
When axis is None (the default value), the min is performed
over the flattened tensor.
Parameters
----------
keepdims: bool
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
x = as_tensor_variable(x)
str_x_type = str(x.dtype)
if str_x_type.startswith('float') or str_x_type in int_dtypes:
return -max(-x, axis=axis, keepdims=keepdims)
else:
# Be careful about unsigned integers, complex
raise NotImplementedError()
@constructor
def argmin(x, axis=None, keepdims=False):
"""
Returns indices of minimum elements obtained by iterating over given axis.
When axis is None (the default value), the argmin is performed
over the flattened tensor.
Parameters
----------
keepdims: bool
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
x = as_tensor_variable(x)
str_x_type = str(x.dtype)
if str_x_type.startswith('float') or str_x_type in int_dtypes:
return argmax(-x, axis=axis, keepdims=keepdims)
else:
# Be careful about unsigned integers, complex
raise NotImplementedError()
@constructor
def smallest(*args):
"""
Return the [elementwise] smallest of a variable number of arguments.
Like python's min.
"""
if len(args) == 2:
a, b = args
return switch(a < b, a, b)
else:
return min(stack(args), axis=0)
@constructor
def largest(*args):
"""
Return the [elementwise] largest of a variable number of arguments.
Like python's max.
"""
if len(args) == 2:
a, b = args
return switch(a > b, a, b)
else:
return max(stack(args), axis=0)
##########################
# Comparison
##########################
@_scal_elemwise
def lt(a, b):
"""a < b"""
@_scal_elemwise
def gt(a, b):
"""a > b"""
@_scal_elemwise
def le(a, b):
"""a <= b"""
@_scal_elemwise
def ge(a, b):
"""a >= b"""
@_scal_elemwise
def eq(a, b):
"""a == b"""
@_scal_elemwise
def neq(a, b):
"""a != b"""
@_scal_elemwise
def isnan(a):
"""isnan(a)"""
@_scal_elemwise
def isinf(a):
"""isinf(a)"""
def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Implement Numpy's ``allclose`` on tensors.
``absolute(a - b) <= (atol + rtol * absolute(b))``
Parameters
----------
a : tensor
Input to compare.
b : tensor
Input to compare.
rtol : float
The relative tolerance parameter.
atol : float
The absolute tolerance parameter.
equal_nan: bool
Whether to consider nan's in the same place to be close.
Returns
-------
bool
A boolean value (of type int8 returned by the tensor elementwise `all`
function) whether all elements in a and b are in the tolerance range
defined above.
Notes
-----
Not a symmetric equation. See Numpy's documentation.
"""
return all(isclose(a, b, rtol, atol, equal_nan))
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Implements Numpy's ``isclose`` on tensors.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
``absolute(a - b) <= (atol + rtol * absolute(b))``
Parameters
----------
a : tensor
Input to compare.
b : tensor
Input to compare.
rtol : float
The relative tolerance parameter.
atol : float
The absolute tolerance parameter.
equal_nan : bool
Whether to consider nan's in the same place to be close
Returns
-------
int8
A boolean (int8) array where two arrays are element-wise equal
within a tolerance.
Notes
-----
Not a symmetric equation. See Numpy's documentation.
Examples
--------
>>> import theano
>>> import numpy as np
>>> a = theano._asarray([1e10, 1e-7], dtype="float64")
>>> b = theano._asarray([1.00001e10, 1e-8], dtype="float64")
>>> theano.tensor.isclose(a, b).eval()
array([1, 0], dtype=int8)
>>> a = theano._asarray([1e10, 1e-8], dtype="float64")
>>> b = theano._asarray([1.00001e10, 1e-9], dtype="float64")
>>> theano.tensor.isclose(a, b).eval()
array([1, 1], dtype=int8)
>>> a = theano._asarray([1e10, 1e-8], dtype="float64")
>>> b = theano._asarray([1.0001e10, 1e-9], dtype="float64")
>>> theano.tensor.isclose(a, b).eval()
array([0, 1], dtype=int8)
>>> a = theano._asarray([1.0, np.nan], dtype="float64")
>>> b = theano._asarray([1.0, np.nan], dtype="float64")
>>> theano.tensor.isclose(a, b).eval()
array([1, 0], dtype==int8)
>>> a = theano._asarray([1.0, np.nan], dtype="float64")
>>> b = theano._asarray([1.0, np.nan], dtype="float64")
>>> theano.tensor.isclose(a, b, equal_nan=True).eval()
array([1, 1], dtype==int8)
>>> a = theano._asarray([1.0, np.inf], dtype="float64")
>>> b = theano._asarray([1.0, -np.inf], dtype="float64")
>>> theano.tensor.isclose(a, b).eval()
array([1, 0], dtype==int8)
>>> a = theano._asarray([1.0, np.inf], dtype="float64")
>>> b = theano._asarray([1.0, np.inf], dtype="float64")
>>> theano.tensor.isclose(a, b).eval()
array([1, 1], dtype==int8)
"""
# close will be an int8 array of 1 where within tolerance
# and 0 where not within tolerance or there was a nan or inf value.
diff = abs(a - b)
tolerance = atol + rtol * abs(b)
close_prelim = le(diff, tolerance)
a_nan = isnan(a)
b_nan = isnan(b)
nans = bitwise_or(a_nan, b_nan)
a_inf = isinf(a)
b_inf = isinf(b)
infs = bitwise_or(a_inf, b_inf)
nans_or_infs = bitwise_or(nans, infs)
# close is now an array of 0's except where elements are not nan or inf
# and are withing the tolerance.
close = bitwise_and(close_prelim, bitwise_not(nans_or_infs))
# deal with signed inf values. this will make an array inf_eq of 0's
# except where inf values have the same sign.
both_infs = bitwise_and(a_inf, b_inf)
inf_signs_eq = eq(a_inf * sgn(a), b_inf * sgn(b))
inf_eq = bitwise_and(both_infs, inf_signs_eq)
# now create the potential result combining close and inf_eq
close_with_infs = bitwise_or(close, inf_eq)
# deal with comparing nan's.
if equal_nan:
both_nans = bitwise_and(a_nan, b_nan)
return bitwise_or(close_with_infs, both_nans)
# otherwise nan's aren't considered close.
else:
return close_with_infs
##########################
# Condition
##########################
@_scal_elemwise
def switch(cond, ift, iff):
"""if cond then ift else iff"""
where = switch
##########################
# Bit-wise
##########################
@_scal_elemwise
def and_(a, b):
"""bitwise a & b"""
bitwise_and = and_ # numpy name for it
@_scal_elemwise
def or_(a, b):
"""bitwise a | b"""
bitwise_or = or_ # numpy name for it
@_scal_elemwise
def xor(a, b):
"""bitwise a ^ b"""
bitwise_xor = xor # numpy name for it
@_scal_elemwise
def invert(a):
"""bitwise ~a"""
bitwise_not = invert # numpy alias for it
##########################
# Math
##########################
@_scal_elemwise
def abs_(a):
"""|`a`|
TensorVariable overloads the `TensorVariable.__abs__` operator so that
this function is called when you type abs(a).
"""
pprint.assign(abs_, printing.PatternPrinter(('|%(0)s|', -1000)))
@_scal_elemwise
def exp(a):
"""e^`a`"""
@_scal_elemwise
def exp2(a):
"""2^`a`"""
@_scal_elemwise
def expm1(a):
"""e^`a` - 1"""
@_scal_elemwise
def neg(a):
"""-a"""
# numpy.reciprocal does integer division on integer inputs
# (which is not very interesting)
@_scal_elemwise
def inv(a):
"""1.0/a"""
@_scal_elemwise
def log(a):
"""base e logarithm of a"""
@_scal_elemwise
def log2(a):
"""base 2 logarithm of a"""
@_scal_elemwise
def log10(a):
"""base 10 logarithm of a"""
@_scal_elemwise
def log1p(a):
"""log(1+a)"""
@_scal_elemwise
def sgn(a):
"""sign of a"""
@_scal_elemwise
def ceil(a):
"""ceiling of a"""
@_scal_elemwise
def floor(a):
"""floor of a"""
@_scal_elemwise
def trunc(a):
"""trunc of a"""
@constructor
def iround(a, mode="half_away_from_zero"):
"""cast(round(a,mode),'int64')"""
return cast(round(a, mode), 'int64')
@constructor
def round(a, mode="half_away_from_zero"):
"""round_mode(a) with mode in [half_away_from_zero, half_to_even]"""
if mode == "half_away_from_zero":
return round_half_away_from_zero(a)
elif mode == "half_to_even":
return round_half_to_even(a)
else:
raise Exception("round mode %s is not implemented." % mode)
@_scal_elemwise
def round_half_to_even(a):
"""round_half_to_even(a)"""
@_scal_elemwise
def round_half_away_from_zero(a):
"""round_half_away_from_zero(a)"""
@_scal_elemwise
def sqr(a):
"""square of a"""
# alias to sqr, included to maintain similarity with numpy interface
square = sqr
@_scal_elemwise
def sqrt(a):
"""square root of a"""
@_scal_elemwise
def deg2rad(a):
"""convert degree a to radian"""
@_scal_elemwise
def rad2deg(a):
"""convert radian a to degree"""
@_scal_elemwise
def cos(a):
"""cosine of a"""
@_scal_elemwise
def arccos(a):
"""arccosine of a"""
@_scal_elemwise
def sin(a):
"""sine of a"""
@_scal_elemwise
def arcsin(a):
"""arcsine of a"""
@_scal_elemwise
def tan(a):
"""tangent of a"""
@_scal_elemwise
def arctan(a):
"""arctangent of a"""
@_scal_elemwise
def arctan2(a, b):
"""arctangent of a / b"""
@_scal_elemwise
def cosh(a):
"""hyperbolic cosine of a"""
@_scal_elemwise
def arccosh(a):
"""hyperbolic arc cosine of a"""
@_scal_elemwise
def sinh(a):
"""hyperbolic sine of a"""
@_scal_elemwise
def arcsinh(a):
"""hyperbolic arc sine of a"""
@_scal_elemwise
def tanh(a):
"""hyperbolic tangent of a"""
@_scal_elemwise
def arctanh(a):
"""hyperbolic arc tangent of a"""
@_scal_elemwise
def erf(a):
"""error function"""
@_scal_elemwise
def erfc(a):
"""complementary error function"""
@_scal_elemwise
def erfcx(a):
"""scaled complementary error function"""
@_scal_elemwise
def erfinv(a):
"""inverse error function"""
@_scal_elemwise
def erfcinv(a):
"""inverse complementary error function"""
@_scal_elemwise
def gamma(a):
"""gamma function"""
@_scal_elemwise
def gammaln(a):
"""log gamma function"""
@_scal_elemwise
def psi(a):
"""derivative of log gamma function"""
@_scal_elemwise
def chi2sf(x, k):
"""chi squared survival function"""
@_scal_elemwise
def real(z):
"""Return real component of complex-valued tensor `z`"""
_tensor_py_operators.real = property(real)
@_scal_elemwise
def imag(z):
"""Return imaginary component of complex-valued tensor `z`"""
_tensor_py_operators.imag = property(imag)
@_scal_elemwise
def angle(z):
"""Return polar-coordinate angle of complex-valued tensor `z`"""
@_scal_elemwise # numpy.complex cannot build tensors
def complex(real, imag):
"""Return complex-valued tensor with `real` and `imag` components"""
@_scal_elemwise
def conj(z):
"""Return the complex conjugate of `z`."""
@_scal_elemwise
def complex_from_polar(abs, angle):
"""Return complex-valued tensor from polar coordinate specification."""
##########################
# Misc
##########################
# fill, _fill_inplace = _elemwise(scal.second, 'fill',
# """fill WRITEME (elemwise)""")
@_scal_elemwise
def second(a, b):
"""Create a matrix by filling the shape of a with b"""
fill = second
pprint.assign(fill, printing.FunctionPrinter('fill'))
@constructor
def ones_like(model, dtype=None):
"""equivalent of numpy.ones_like"""
if dtype is None:
dtype = model.type.dtype
ret = fill(model, constant(1.0, dtype=dtype))
return ret
@constructor
def zeros_like(model, dtype=None):
"""equivalent of numpy.zeros_like"""
if dtype is None:
dtype = model.type.dtype
return fill(model, constant(0.0, dtype=dtype))
def zeros(shape, dtype=None):
"""
Create a Tensor filled with zeros, closer to Numpy's syntax than ``alloc``.
"""
if not isinstance(shape, (list, tuple, TensorVariable)):
shape = [shape]
if dtype is None:
dtype = config.floatX
return alloc(numpy.array(0, dtype=dtype), *shape)
def ones(shape, dtype=None):
"""
Create a Tensor filled with ones, closer to Numpy's syntax than ``alloc``.
"""
if not isinstance(shape, (list, tuple, TensorVariable)):
shape = [shape]
if dtype is None:
dtype = config.floatX
return alloc(numpy.array(1, dtype=dtype), *shape)
class Nonzero(gof.Op):
"""
Return the indices of the elements that are non-zero.
Returns a matrix of shape (ndim, number of nonzero elements) such that
element (i,j) is the index in the ith dimension of the jth non-zero
element.
Note this is different than NumPy, which returns a tuple of arrays, one for
each dimension of the input array.
Parameters
----------
a : array_like
Input array.
Returns
-------
matrix
Matrix containing the indices of the non-zero elements of a.
See Also
--------
nonzero_values : Return the non-zero elements of the input array
flatnonzero : Return the indices of the non-zero elements of the
flattened input array.
"""
__props__ = ()
def make_node(self, a):
a = as_tensor_variable(a)
if a.ndim == 0:
raise ValueError('Nonzero only supports non-scalar arrays.')
output = [TensorType(dtype='int64', broadcastable=(False, False))()]
return gof.Apply(self, [a], output)
def perform(self, node, inp, out_):
a = inp[0]
out, = out_
result_tuple = numpy.nonzero(a)
if len(result_tuple[0]) > 0:
result = numpy.vstack(result_tuple)
else:
result = numpy.zeros((len(result_tuple), 0))
out[0] = result.astype('int64')
def grad(self, inp, grads):
return [grad_undefined(self, 0, inp[0])]
_nonzero = Nonzero()
def nonzero(a, return_matrix=False):
"""
Returns one of the following:
If return_matrix is False (default, same as NumPy):
A tuple of vector arrays such that the ith element of the jth array
is the index of the ith non-zero element of the input array in the
jth dimension.
If return_matrix is True (same as Theano Op):
Returns a matrix of shape (ndim, number of nonzero elements) such
that element (i,j) is the index in the ith dimension of the jth
non-zero element.
Parameters
----------
a : array_like
Input array.
return_matrix : bool
If True, returns a symbolic matrix. If False, returns a tuple of
arrays. Defaults to False.
Returns
-------
tuple of vectors or matrix
See Also
--------
nonzero_values : Return the non-zero elements of the input array
flatnonzero : Return the indices of the non-zero elements of the
flattened input array.
"""
matrix_result = _nonzero(a)
if return_matrix:
return matrix_result
else:
if a.ndim > 0:
tuple_result = tuple([matrix_result[i] for i in xrange(a.ndim)])
else:
tuple_result = tuple([matrix_result[0]])
return tuple_result
def flatnonzero(a):
"""
Return a vector of indices that are non-zero in the flattened version of a.
This is equivalent to nonzero(a.flatten(), return_matrix=True)[0]
Parameters
----------
a : tensor
Input tensor
Returns
-------
vector
Output vector, containing the indices of the elements of `a.flatten()`
that are non-zero.
See Also
--------
nonzero : Return the indices of the non-zero elements of the input array.
nonzero_values : Return the non-zero elements of the input array
"""
if a.ndim == 0:
raise ValueError('Nonzero only supports non-scalar arrays.')
return nonzero(a.flatten(), return_matrix=True)[0]
def nonzero_values(a):
"""
Return a vector of non-zero elements contained in the input array.
The following behavior works to extract non-zero elements from an array
in NumPy but is *NOT* supported by Theano:
a[numpy.nonzero(a)]
Instead, the nonzero_values function or method should be used:
tensor.nonzero_values(a)
a.nonzero_values()
This is equivalent to the following:
a.flatten()[tensor.flatnonzero(a)]
Parameters
----------
a : tensor
Input tensor
Returns
-------
vector
Output vector, containing the non-zero elements of a.
See Also
--------
nonzero : Return the indices of the non-zero elements of the input array.
flatnonzero : Return the indices of the non-zero elements of the
flattened input array.
"""
return a.flatten()[flatnonzero(a)]
class Tri(gof.Op):
__props__ = ("dtype",)
def __init__(self, dtype=None):
if dtype is None:
dtype = config.floatX
self.dtype = dtype
def make_node(self, N, M, k):
N = as_tensor_variable(N)
M = as_tensor_variable(M)
k = as_tensor_variable(k)
return gof.Apply(
self,
[N, M, k],
[TensorType(dtype=self.dtype, broadcastable=(False, False))()])
def perform(self, node, inp, out_):
N, M, k = inp
out, = out_
out[0] = numpy.tri(N, M, k, dtype=self.dtype)
def infer_shape(self, node, in_shapes):
out_shape = [node.inputs[0], node.inputs[1]]
return [out_shape]
def grad(self, inp, grads):
return [grad_undefined(self, i, inp[i]) for i in xrange(3)]
def tri(N, M=None, k=0, dtype=None):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
Array of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
"""
if dtype is None:
dtype = config.floatX
if M is None:
M = N
op = Tri(dtype)
return op(N, M, k)
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
array, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : Same thing, only for the upper triangle.
"""
return m * tri(m.shape[0], m.shape[1], k=k, dtype=m.dtype)
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : Lower triangle of an array.
"""
return m * (1 - tri(m.shape[0], m.shape[1], k=k - 1, dtype=m.dtype))
class Eye(gof.Op):
__props__ = ("dtype", )
def __init__(self, dtype=None):
if dtype is None:
dtype = config.floatX
self.dtype = dtype
def make_node(self, n, m, k):
n = as_tensor_variable(n)
m = as_tensor_variable(m)
k = as_tensor_variable(k)
assert n.ndim == 0
assert m.ndim == 0
assert k.ndim == 0
return gof.Apply(
self,
[n, m, k],
[TensorType(dtype=self.dtype, broadcastable=(False, False))()])
def perform(self, node, inp, out_):
n, m, k = inp
out, = out_
out[0] = numpy.eye(n, m, k, dtype=self.dtype)
def infer_shape(self, node, in_shapes):
out_shape = [node.inputs[0], node.inputs[1]]
return [out_shape]
def grad(self, inp, grads):
return [grad_undefined(self, i, inp[i]) for i in xrange(3)]
def eye(n, m=None, k=0, dtype=None):
"""Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
n : int
Number of rows in the output.
m : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
"""
if dtype is None:
dtype = config.floatX
if m is None:
m = n
localop = Eye(dtype)
return localop(n, m, k)
def identity_like(x):
return eye(x.shape[0], x.shape[1], k=0, dtype=x.dtype)
class Alloc(gof.Op):
"""Create a Tensor from an initial value and a desired shape.
alloc(value, shape0, shape1, ..., shapeN)
Returns an N-dimensional tensor initialized by `value` using something
equivalent to
z = numpy.zeros(shape, value.dtype)
z += value
The result has N dimensions, has the dtype of `value` and is obtained by
broadcasting value over the output ndarray.
This Op is used to replace fill() during optimizations because after shapes
are lifted, the first argument to fill can often be pruned from the graph.
"""
__props__ = ()
def validate_shape(self, shape):
sh = [as_tensor_variable(s) for s in shape]
bcast = []
for i, s in enumerate(sh):
if config.exception_verbosity == 'high':
s_as_str = '\n' + min_informative_str(s)
else:
s_as_str = str(s)
if s.type.dtype[:3] not in ('int', 'uin'):
raise TypeError('Shape arguments to Alloc must be integers, '
'but argument %s is not for apply node: %s' %
(i, s_as_str))
if s.ndim != 0:
raise TypeError(
"Each shape dimension to Alloc must be a scalar, ",
'but dimension %s have %d dimensions for apply node: %s' %
(i, s.ndim, s_as_str))
# if s is constant 1, then we're broadcastable in that dim
try:
const_shp = get_scalar_constant_value(s)
except NotScalarConstantError:
const_shp = None
bcast.append(1 == const_shp)
return sh, bcast
def make_node(self, value, *shape):
v = as_tensor_variable(value)
sh, bcast = self.validate_shape(shape)
if v.ndim > len(sh):
raise TypeError("The Alloc value to use has more dimensions"
" than the specified dimensions",
v.ndim, len(sh))
otype = TensorType(dtype=v.dtype, broadcastable=bcast)
return gof.Apply(self, [v] + sh, [otype()])
def perform(self, node, inputs, out_):
out, = out_
v = inputs[0]
sh = tuple([int(i) for i in inputs[1:]])
if out[0] is None or out[0].shape != sh:
if v.size == 1 and v.item() == 0:
out[0] = numpy.zeros(sh, dtype=v.dtype)
else:
out[0] = numpy.empty(sh, dtype=v.dtype)
out[0][...] = v # broadcast v to fill us up
else:
# reuse the allocated memory.
out[0][...] = v # broadcast v to fill us up
def c_code(self, node, name, inp, out, sub):
vv = inp[0]
ndim = len(inp[1:])
zz, = out
fail = sub['fail']
code = """
npy_intp shape[%(ndim)s];
""" % dict(ndim=ndim)
# Initialize shape
for i, shp_i in enumerate(inp[1:]):
code += """
shape[%(i)s] = ((dtype_%(shp_i)s*) PyArray_DATA(%(shp_i)s))[0];
""" % dict(i=i, shp_i=shp_i)
code += """
int need_new_out = (NULL == %(zz)s);
for (int i = 0; i < %(ndim)s; i++)
need_new_out = (need_new_out
|| (PyArray_DIMS(%(zz)s)[i] != shape[i]));
if (need_new_out)
{
Py_XDECREF(%(zz)s);
%(zz)s = (PyArrayObject*) PyArray_SimpleNew(%(ndim)s,
shape, PyArray_TYPE((PyArrayObject*) py_%(vv)s));
if (!%(zz)s)
{
PyErr_SetString(PyExc_MemoryError, "alloc failed");
%(fail)s
}
}
// This function takes care of broadcasting
PyArray_CopyInto(%(zz)s, %(vv)s);
""" % dict(vv=vv, ndim=ndim, zz=zz, fail=fail)
return code
def c_code_cache_version(self):
return (1,)
def infer_shape(self, node, input_shapes):
return [node.inputs[1:]]
def connection_pattern(self, node):
rval = [[True]]
for ipt in node.inputs[1:]:
rval.append([False])
return rval
def grad(self, inputs, grads):
x = inputs[0]
gz = grads[0]
n_axes_to_sum = gz.ndim - x.ndim
# The number of dimensions added
axis = list(range(n_axes_to_sum))
# The broadcasted dimensions
axis_broadcasted = []
axis_kept = []
for i, (ib, gb) in enumerate(
zip(inputs[0].broadcastable,
# We need the dimensions corresponding to x
grads[0].broadcastable[-inputs[0].ndim:])):
if ib and not gb:
axis_broadcasted.append(i + n_axes_to_sum)
else:
axis_kept.append(i)
gx = gz.sum(axis=axis + axis_broadcasted)
if axis_broadcasted:
new_order = ['x'] * x.ndim
for idx, axis in enumerate(axis_kept):
new_order[axis] = idx
gx = gx.dimshuffle(new_order)
# Dimshuffle to add back the broadcasted dims
# The *elements* of the output are not connected to
# the inputs that specify the shape. If you grow the
# shape by epsilon, the existing elements do not
# change.
return [gx] + [DisconnectedType()() for i in inputs[1:]]
def __call__(self, val, *shapes, **kwargs):
"""
If the alloc would be useless, this function returns val.
If this function is called outside of a graph optimization context
(for instance, it is manually called by a user building a graph),
then we always return an Alloc node, to allow for DebugMode to check
for size mismatches.
If you always want an Alloc node, call make_node.
"""
ret = super(Alloc, self).__call__(val, *shapes, **kwargs)
try:
# It makes optimization difficult when useless allocs are thrown
# into the graph at every stage of optimization. This little logic
# tries to help at least in some cases.
if hasattr(val, 'fgraph') and (val.type == ret.type):
return val
except AttributeError:
pass
return ret
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return [None]
return self(eval_points[0], *inputs[1:], **dict(return_list=True))
def do_constant_folding(self, node):
if not getattr(node.outputs[0], 'clients', []):
# If there are no clients then there is no point doing constant
# folding.
return False
for client in node.outputs[0].clients:
if client[0] == 'output':
# If the output is a constant, it will have to be deepcopied
# each time the function is called. So we do not fold.
return False
elif (
# The following ops work inplace of their input id 0.
client[1] == 0 and
isinstance(client[0].op, (
# Ops that will work inplace on the Alloc. So if they
# get constant_folded, they would copy the
# constant and this is less efficients.
# Not doing the constant folding could also lower
# the peak memory usage, as we the "constant" won't
# always exists.
theano.tensor.subtensor.IncSubtensor,
theano.tensor.subtensor.AdvancedIncSubtensor1,
theano.tensor.subtensor.AdvancedIncSubtensor,
theano.tensor.blas.Gemv,
theano.tensor.blas_c.CGemv,
theano.tensor.blas.Ger,
theano.tensor.blas_c.CGer,
theano.tensor.blas_scipy.ScipyGer))):
return False
# If the clients is a transfer to the GPU, we don't want to
# fold. We let the Alloc being moved to the GPU, then we
# let the GPU algo decide if it need to fold it or not.
elif client[0].op.__class__.__name__.lower().startswith("gpu"):
return False
return True
alloc = Alloc()
pprint.assign(alloc, printing.FunctionPrinter('alloc'))
def transfer(var, target):
"""
Return a version of `var` transferred to `target`.
`cpu` mean a TensorType (on the CPU). Other types may define
additional targets.
Parameters
----------
var : variable
A theano variable
target : str
The target of the transfer
"""
if target == 'cpu':
return as_tensor_variable(var)
else:
for trans in transfer._others:
res = trans(var, target)
if res is not None:
return res
raise ValueError("Can't transfer to target %s" % (target,))
transfer._others = []
def register_transfer(fn):
"""
Register a transfer function for alternative targets.
Parameters
----------
fn : callable
"""
transfer._others.append(fn)
"""Create a duplicate of `a` (with duplicated storage)"""
tensor_copy = elemwise.Elemwise(scal.identity)
pprint.assign(tensor_copy, printing.IgnorePrinter())
@constructor
def sum(input, axis=None, dtype=None, keepdims=False, acc_dtype=None):
"""
Computes the sum along the given axis(es) of a tensor `input`.
When axis is None (the default value), the sum is performed
over the flattened tensor.
For full documentation see ``tensor.elemwise.Sum``.
In particular please pay attention to the important warning when using
a custom acc_dtype.
Parameters
----------
keepdims: bool
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
out = elemwise.Sum(axis=axis, dtype=dtype, acc_dtype=acc_dtype)(input)
if keepdims:
out = makeKeepDims(input, out, axis)
return out
pprint.assign(Sum(), printing.FunctionPrinter('sum'))
@constructor
def prod(input, axis=None, dtype=None, keepdims=False, acc_dtype=None,
no_zeros_in_input=False):
"""
Computes the product along the given axis(es) of a tensor `input`.
When axis is None (the default value), the product is performed
over the flattened tensor.
For full documentation see ``tensor.elemwise.Prod``.
Parameters
----------
keepdims: bool
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
out = elemwise.Prod(axis, dtype=dtype, acc_dtype=acc_dtype,
no_zeros_in_input=no_zeros_in_input)(input)
if keepdims:
out = makeKeepDims(input, out, axis)
return out
class Mean(elemwise.CAReduce):
def __init__(self, axis=None):
elemwise.CAReduce.__init__(self, scal.add, axis)
assert self.axis is None or len(self.axis) == 1
def __str__(self):
if self.axis is not None:
return "Mean{%s}" % (", ".join(str(x) for x in self.axis))
else:
return "Mean"
def _output_dtype(self, idtype):
# we want to protect against overflow
return 'float64'
def perform(self, node, inp, out):
input, = inp
output, = out
if self.axis is None:
axis = None
else:
axis = self.axis[0]
# numpy.asarray is needed as otherwise we can end up with a
# numpy scalar.
output[0] = numpy.asarray(numpy.mean(input, dtype='float64',
axis=axis))
def c_code(self, node, name, inames, onames, sub):
if self.axis is not None:
return super(Op, self).c_code(node, name, inames, onames, sub)
ret = elemwise.CAReduce.c_code(self, node, name, inames, onames, sub)
# TODO: c_code perform support only axis is None
return ret + """
*((double *)PyArray_DATA(%s)) /= PyArray_SIZE(%s);
""" % (onames[0], inames[0])
# TODO: implement the grad. When done and tested, you can make this the default
# version.
# def grad(self, (x,), (gout,)):
# import pdb;pdb.set_trace()
# return grad(mean(x, self.axis, op=False),[x])
@constructor
def mean(input, axis=None, dtype=None, op=False, keepdims=False,
acc_dtype=None):
"""
Computes the mean value along the given axis(es) of a tensor `input`.
Parameters
----------
axis : None or int or (list of int) (see `Sum`)
Compute the mean along this axis of the tensor.
None means all axes (like numpy).
dtype: None or string
Dtype to cast the result of the inner summation into.
For instance, by default, a sum of a float32 tensor will be
done in float64 (acc_dtype would be float64 by default),
but that result will be casted back in float32.
keepdims: bool
If this is set to True, the axes which are reduced are
left in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original tensor.
acc_dtype: None or string
Dtype to use for the inner summation. This will not
necessarily be the dtype of the output (in particular
if it is a discrete (int/uint) dtype, the output will
be in a float type). If None, then we use the same rules as `sum()`.
Notes
-----
For gpu, if you specify dtype=float32, everything will be done on the gpu.
"""
input = as_tensor_variable(input)
if op:
if dtype not in (None, 'float64'):
raise NotImplementedError(
'The Mean op does not support the dtype argument, '
'and will always use float64. If you want to specify '
'the dtype, call tensor.mean(..., op=False).',
dtype)
if acc_dtype not in (None, 'float64'):
raise NotImplementedError(
'The Mean op does not support the acc_dtype argument, '
'and will always use float64. If you want to specify '
'acc_dtype, call tensor.mean(..., op=False).',
dtype)
out = Mean(axis)(input)
if keepdims:
out = makeKeepDims(input, out, axis)
return out
if dtype is not None:
# The summation will be done with the specified dtype.
# sum() will complain if it is not suitable.
sum_dtype = dtype
else:
sum_dtype = None
# float16 overflows way too fast for sum
if ((sum_dtype == 'float16' or input.dtype == 'float16') and
acc_dtype != 'float16'):
sum_dtype == 'float32'
s = sum(input, axis=axis, dtype=sum_dtype, keepdims=keepdims,
acc_dtype=acc_dtype)
shp = shape(input)
# Cast shp into a float type
# TODO Once we have a consistent casting policy, we could simply
# use true_div.
if s.dtype in ('float16', 'float32', 'complex64'):
shp = cast(shp, 'float32')
else:
shp = cast(shp, 'float64')
if axis is None:
axis = list(range(input.ndim))
elif isinstance(axis, (int, numpy.integer)):
axis = [axis]
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0:
axis = [int(axis)]
else:
axis = [int(a) for a in axis]
# This sequential division will possibly be optimized by Theano:
for i in axis:
s = true_div(s, shp[i])
if dtype == 'float16' or (dtype is None and input.dtype == 'float16'):
s = cast(s, 'float16')
s.name = 'mean'
return s
@constructor
def var(input, axis=None, keepdims=False):
"""
Computes the variance along the given axis(es) of a tensor `input`.
Parameters
----------
axis: None or int or (list of int) (see `Sum`)
Compute the variance along this axis of the tensor.
None means all axes (like numpy).
keepdims : bool
If this is set to True, the axes which are reduced are
left in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original tensor.
Notes
-----
It uses the two-pass algorithm for more stable results.
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
There exist other implementations that are even more stable, but probably
slower.
"""
input_ndim = input.type.ndim
if axis is None:
axis = list(range(input_ndim))
elif isinstance(axis, (int, numpy.integer)):
axis = [axis]
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0:
axis = [int(axis)]
else:
axis = [int(a) for a in axis]
# compute the axis-wise mean
mean_input = mean(input, axis, keepdims=True)
# center the input
centered_input = input - mean_input
# return the mean sqr
v = mean((centered_input ** 2), axis, keepdims=keepdims)
v.name = 'var'
return v
@constructor
def std(input, axis=None, keepdims=False):
"""
Computes the standard deviation along the given axis(es) of a tensor `input`.
Parameters
----------
axis : None or int or (list of int) (see `Sum`)
Compute the standard deviation along this axis of the tensor.
None means all axes (like numpy).
keepdims : bool
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result will
broadcast correctly against the original tensor.
Notes
-----
It calls `var()` and `var()` uses the two-pass algorithm for more stable
results.
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
There exist other implementations that are even more stable, but probably
slower.
"""
ret = sqrt(var(input=input, axis=axis, keepdims=keepdims))
ret.name = 'std'
return ret
class Default(gof.Op):
"""
Takes an input x and a default value.
If the input is not None, a reference to it is returned.
If the input is None, a copy of the default value is returned instead.
The input and the default must have exactly the same type.
"""
view_map = {0: [0]}
__props__ = ()
def make_node(self, x, default):
x, default = as_tensor_variable(x), as_tensor_variable(default)
if x.type != default.type:
raise TypeError('Both default() arguments must have same type',
x, default)
return gof.Apply(self, [x, default], [default.type()])
def perform(self, node, inp, out_):
x, default = inp
out, = out_
if x is None:
# why copy? Theano can't yet understand out[0] being a view of
# either x or y, so we can be a view of x, but only a copy of y.
out[0] = default.copy()
else:
out[0] = x
default = Default()
setdefault = default # legacy
##########################
# Arithmetics
##########################
@_scal_elemwise
def maximum(x, y):
"""elemwise maximum. See max for the maximum in one tensor"""
# see decorator for function body
@_scal_elemwise
def minimum(x, y):
"""elemwise minimum. See min for the minimum in one tensor"""
# see decorator for function body
def div_proxy(x, y):
"""Proxy for either true_div or int_div, depending on types of x, y."""
f = eval('%s_div' % scal.int_or_true_div(
as_tensor_variable(x).dtype in discrete_dtypes,
as_tensor_variable(y).dtype in discrete_dtypes))
return f(x, y)
def divmod(x, y):
"""elementvise divmod, using floor_div and mod_check"""
return floor_div(x, y), mod_check(x, y)
@_scal_elemwise
def add(a, *other_terms):
"""elementwise addition"""
# see decorator for function body
@_scal_elemwise
def sub(a, b):
"""elementwise subtraction"""
# see decorator for function body
@_scal_elemwise
def mul(a, *other_terms):
"""elementwise multiplication"""
# see decorator for function body
@_scal_elemwise
def true_div(a, b):
"""elementwise [true] division (inverse of multiplication)"""
# see decorator for function body
@_scal_elemwise
def int_div(a, b):
"""elementwise [floor] division (inverse of multiplication)"""
# see decorator for function body
# floor_div and int_div are the same thing
floor_div = int_div
def ceil_intdiv(a, b):
"""
Safely compute ceil(float_division(a, b)).
Works for all dtypes, but mostly useful when a and b are int.
"""
# If a and b are int with not many significant bits, we could
# cast them to float to avoid doing the modulo. We do not know if this
# is faster or not. But this is not safe for int64 as the cast will
# lose precision.
# e.g.: cast(cast(a, scalar.upcast(a, 'float32')) / b, scal.upcast(a, b))
# We cast for the case when a and b are uint*. Otherwise neq will
# force their upcast to int.
div = int_div(a, b)
ret = cast(neq(a % b, 0), div.dtype) + div
assert ret.dtype == scal.upcast(div.owner.inputs[0], div.owner.inputs[1])
return ret
def mod_check(x, y):
"""Make sure we do not try to use complex numbers."""
if ((as_tensor_variable(x).dtype in complex_dtypes or
as_tensor_variable(y).dtype in complex_dtypes)):
# Currently forbidden.
raise scal.Mod.complex_error
else:
return mod(x, y)
@_scal_elemwise
def mod(a, b):
"""elementwise modulo"""
# see decorator for function body
@_scal_elemwise
def pow(a, b):
"""elementwise power"""
# see decorator for function body
@_scal_elemwise
def clip(x, min, max):
"""
Clip x to be between min and max.
Notes
-----
When `x` is equal to the boundaries, the output is considered
to be `x`, so at these points, the gradient of the cost wrt the output
will be propagated to `x`, not to `min` nor `max`. In other words,
on these points, the gradient wrt `x` will be equal to the gradient wrt
the output, and the gradient wrt `min` and `max` will be zero.
"""
# see decorator for function body
# for grep: clamp, bound
pprint.assign(add, printing.OperatorPrinter('+', -2, 'either'))
pprint.assign(mul, printing.OperatorPrinter('*', -1, 'either'))
pprint.assign(sub, printing.OperatorPrinter('-', -2, 'left'))
pprint.assign(neg, printing.OperatorPrinter('-', 0, 'either'))
pprint.assign(true_div, printing.OperatorPrinter('/', -1, 'left'))
pprint.assign(int_div, printing.OperatorPrinter('//', -1, 'left'))
pprint.assign(pow, printing.OperatorPrinter('**', 1, 'right'))
##########################
# View Operations
##########################
def extract_constant(x, elemwise=True):
"""
This function is basically a call to tensor.get_scalar_constant_value.
The main difference is the behaviour in case of failure. While
get_scalar_constant_value raises an TypeError, this function returns x,
as a tensor if possible. If x is a ScalarVariable from a
scalar_from_tensor, we remove the conversion. If x is just a
ScalarVariable, we convert it to a tensor with tensor_from_scalar.
"""
try:
x = get_scalar_constant_value(x, elemwise=elemwise)
except NotScalarConstantError:
pass
if ((isinstance(x, scal.ScalarVariable) or
isinstance(x, scal.sharedvar.ScalarSharedVariable))):
if x.owner and isinstance(x.owner.op, ScalarFromTensor):
x = x.owner.inputs[0]
else:
x = tensor_from_scalar(x)
return x
def transpose(x, axes=None):
"""
Reorder the dimensions of x. (Default: reverse them)
This is a macro around dimshuffle that matches the numpy.transpose function.
"""
if axes is None:
axes = list(range((x.ndim - 1), -1, -1))
ret = DimShuffle(x.broadcastable, axes, inplace=False)(x)
if x.name and axes == list(range((x.ndim - 1), -1, -1)):
ret.name = x.name + '.T'
return ret
def batched_dot(x, y):
"""
This function computes the dot product between the two tensors, by
iterating over the first dimension using scan.
Parameters
----------
x : tensor
A Tensor with sizes e.g.: for 3D (dim1, dim3, dim2).
y : tensor
A Tensor with sizes e.g.: for 3D (dim1, dim2, dim4).
Returns
-------
tensor
A tensor of size e.g. if it is 3D: (dim1, dim3, dim4).
Notes
-----
This is a subset of numpy.einsum, but we do not provide it for now.
But numpy einsum is slower than dot or tensordot:
http://mail.scipy.org/pipermail/numpy-discussion/2012-October/064259.html
Examples
--------
>>> first = tensor.tensor3('first')
>>> second = tensor.tensor3('second')
>>> result = batched_dot(first, second)
"""
result, updates = theano.scan(
fn=lambda x_mat, y_mat:
theano.tensor.dot(x_mat, y_mat),
outputs_info=None,
sequences=[x, y],
non_sequences=None)
return result
def batched_tensordot(x, y, axes=2):
"""
Compute the tensordot product.
A hybrid of batch_dot and tensordot, this function computes the
tensordot product between the two tensors, by iterating over the
first dimension using scan to perform a sequence of tensordots.
Parameters
----------
x : tensor
A Tensor with sizes e.g.: for 3D (dim1, dim3, dim2)
y : tensor
A Tensor with sizes e.g.: for 3D (dim1, dim2, dim4)
axes: int or array-like of length 2
If an integer, the number of axes to sum over.
If an array, it must have two array elements containing the axes to sum
over in each tensor.
If an integer i, it is converted to an array containing
the last i dimensions of the first tensor and the first
i dimensions of the second tensor (excluding the first
(batch) dimension):
axes = [list(range(a.ndim - i, b.ndim)), list(range(1,i+1))]
If an array, its two elements must contain compatible axes
of the two tensors. For example, [[1, 2], [2, 4]] means sum
over the 2nd and 3rd axes of a and the 3rd and 5th axes of b.
(Remember axes are zero-indexed!) The 2nd axis of a and the
3rd axis of b must have the same shape; the same is true for
the 3rd axis of a and the 5th axis of b.
Like tensordot, this function uses a series of dimshuffles and
reshapes to reduce the tensor dot product to a matrix or vector
dot product. Finally, it calls batched_dot to compute the result.
"""
return _tensordot_as_dot(x, y, axes, dot=batched_dot, batched=True)
def split(x, splits_size, n_splits, axis=0):
the_split = Split(n_splits)
return the_split(x, axis, splits_size)
class Split(Op):
"""Partition a `TensorVariable` along some axis.
Examples
--------
>>> x = vector()
>>> splits = lvector()
You have to declare right away how many split_points there will be.
>>> ra, rb, rc = split(x, splits, n_splits = 3, axis = 0)
>>> f = function([x, splits], [ra, rb, rc])
>>> a, b, c = f([0,1,2,3,4,5], [3, 2, 1])
a == [0,1,2]
b == [3, 4]
c == [5]
"""
len_splits = None
"""A Split instance will have this many outputs, and require that
the splits argument to `perform` have exactly this many elements.
"""
__props__ = ("len_splits",)
def __init__(self, len_splits):
self.len_splits = int(len_splits)
def __str__(self):
return self.__class__.__name__ + "{%s}" % self.len_splits
def make_node(self, x, axis, splits):
"""WRITEME"""
x = as_tensor_variable(x)
axis = as_tensor_variable(axis)
splits = as_tensor_variable(splits)
if splits.type not in int_vector_types:
raise TypeError('splits must have type tensor.lvector',
splits.type)
if axis.type not in int_types:
raise TypeError('axis must have type lscalar', axis.type)
# # The following lines are necessary if we allow splits of zero
# if isinstance(axis, gof.Constant):
# x = unbroadcast(x, int(axis.data))
# else:
# x = unbroadcast(x, *range(x.type.ndim))
inputs = [x, axis, splits]
outputs = [x.type() for i in xrange(self.len_splits)]
return Apply(self, inputs, outputs)
def perform(self, node, inputs, outputs):
"""WRITEME"""
x, axis, splits = inputs
# in python 2.4, x.shape[numpy.asarray(1)] don't work.
if sys.version_info[0:2] == (2, 4) and axis.size == 1:
axis = int(axis)
try:
len_along_axis = x.shape[axis]
except:
raise ValueError('Split.perform() with axis=(%s) is invalid'
' for x.shape==(%s)'
% (axis, x.shape))
if len(splits) != self.len_splits:
raise ValueError('In Split.perform(), len(splits) != len_splits.',
(len(splits), self.len_splits))
if numpy.sum(splits) != len_along_axis:
raise ValueError('The splits sum to %s, expected %s' %
(numpy.sum(splits), len_along_axis))
if python_any([nb < 0 for nb in splits]):
raise ValueError('Split: you tried to make an ndarray with a '
'negative number of elements.')
# Checking is done, let's roll the splitting algorithm!
# Basically we step along the given axis of x, extracting
# subtensors of size splits[i] as we go along.
general_key = [slice(None, None, None) for s in x.shape]
lower_idx = 0
for i in xrange(self.len_splits):
upper_idx = lower_idx + splits[i]
general_key[axis] = slice(lower_idx, upper_idx, None)
outputs[i][0] = x.__getitem__(tuple(general_key)).copy()
lower_idx = upper_idx
def infer_shape(self, node, in_shapes):
axis = node.inputs[1]
splits = node.inputs[2]
shp_x, shp_axis, shp_splits = in_shapes
out_shapes = []
for i in xrange(self.len_splits):
temp = as_tensor_variable(shp_x)
temp = theano.tensor.subtensor.set_subtensor(temp[axis], splits[i])
temp = [temp[i] for i in xrange(len(shp_x))]
out_shapes.append(temp)
return out_shapes
def grad(self, inputs, g_outputs):
"""Join the gradients along the axis that was used to split x."""
x, axis, n = inputs
outputs = self(*inputs, **dict(return_list=True))
# If all the output gradients are disconnected, then so are the inputs
if python_all([isinstance(g.type, DisconnectedType)
for g in g_outputs]):
return [DisconnectedType()(),
grad_undefined(self, 1, axis),
grad_undefined(self, 2, n)]
# Else, we have to make them zeros before joining them
new_g_outputs = []
for o, g in zip(outputs, g_outputs):
if isinstance(g.type, DisconnectedType):
new_g_outputs.append(o.zeros_like())
else:
new_g_outputs.append(g)
return [join(axis, *new_g_outputs),
grad_undefined(self, 1, axis),
grad_undefined(self, 2, n)]
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return [None for i in self.len_splits]
return self.make_node(eval_points[0], *inputs[1:]).outputs
def addbroadcast(x, *axes):
"""
Make the input broadcastable in the specified axes.
For example, addbroadcast(x, 0) will make the first dimension of
x broadcastable. When performing the function, if the length of
x along that dimension is not 1, a ValueError will be raised.
We apply the opt here not to pollute the graph especially during
the gpu optimization
Parameters
----------
x : tensor_like
Input theano tensor.
axis : an int or an iterable object such as list or tuple of int values
The dimension along which the tensor x should be broadcastable.
If the length of x along these dimensions is not 1, a ValueError will
be raised.
Returns
-------
tensor
A theano tensor, which is broadcastable along the specified dimensions.
"""
rval = Rebroadcast(*[(axis, True) for axis in axes])(x)
return theano.tensor.opt.apply_rebroadcast_opt(rval)
def unbroadcast(x, *axes):
"""
Make the input impossible to broadcast in the specified axes.
For example, addbroadcast(x, 0) will make the first dimension
of x broadcastable. When performing the function, if the length
of x along that dimension is not 1, a ValueError will be raised.
We apply the opt here not to pollute the graph especially during
the gpu optimization
Parameters
----------
x : tensor_like
Input theano tensor.
axis : an int or an iterable object such as list or tuple of int values
The dimension along which the tensor x should be unbroadcastable.
If the length of x along these dimensions is not 1, a ValueError will
be raised.
Returns
-------
tensor
A theano tensor, which is unbroadcastable along the specified dimensions.
"""
rval = Rebroadcast(*[(axis, False) for axis in axes])(x)
return theano.tensor.opt.apply_rebroadcast_opt(rval)
def patternbroadcast(x, broadcastable):
"""
Make the input adopt a specific broadcasting pattern.
Broadcastable must be iterable. For example,
patternbroadcast(x, (True, False)) will make the first
dimension of x broadcastable and the second dimension
not broadcastable, so x will now be a row.
We apply the opt here not to pollute the graph especially during the gpu
optimization.
Parameters
----------
x : tensor_like
Input theano tensor.
broadcastable : an iterable object such as list or tuple of bool values
A set of boolean values indicating whether a dimension should be
broadcastable or not. If the length of x along these dimensions is
not 1, a ValueError will be raised.
Returns
-------
tensor
A theano tensor, which is unbroadcastable along the specified dimensions.
"""
rval = Rebroadcast(*[(i, broadcastable[i])
for i in xrange(len(broadcastable))])(x)
return theano.tensor.opt.apply_rebroadcast_opt(rval)
class Join(Op):
"""
Concatenate multiple `TensorVariable`s along some axis.
The axis must be given as first argument. All tensors must have the same
shape along all dimensions other than this axis.
Of course, TensorVariable instances do not have a shape, so this error
cannot be caught until runtime. See `perform()`.
See Also
--------
stack : For joins involving scalar values
Examples
--------
>>> x, y, z = tensor.matrix(), tensor.matrix(), tensor.matrix()
>>> u = tensor.vector()
>>> r = join(0, x, y, z)
>>> c = join(1, x, y, z)
>>> join(2, x, y, z) # WRONG: the axis has to be an index into the shape
>>> join(0, x, u) # WRONG: joined tensors must have the same rank
"""
check_input = False
__props__ = ()
def make_node(self, *axis_and_tensors):
"""
Parameters
----------
axis: an Int or integer-valued Variable
tensors
A variable number (but not zero) of tensors to
concatenate along the specified axis. These tensors must have
the same shape along all dimensions other than this axis.
Returns
-------
A symbolic Variable
It has the same ndim as the input tensors, and the most inclusive
dtype.
"""
axis, tensors = axis_and_tensors[0], axis_and_tensors[1:]
if not tensors:
raise ValueError('Cannot join an empty list of tensors')
as_tensor_variable_args = [as_tensor_variable(x) for x in tensors]
dtypes = [x.type.dtype for x in as_tensor_variable_args]
out_dtype = scal.upcast(*dtypes)
def output_maker(bcastable):
return tensor(dtype=out_dtype, broadcastable=bcastable)
return self._make_node_internal(
axis, tensors, as_tensor_variable_args, output_maker)
def _make_node_internal(self, axis, tensors,
as_tensor_variable_args, output_maker):
if not python_all(targs.type.ndim for targs
in as_tensor_variable_args):
raise TypeError('Join cannot handle arguments of dimension 0.'
' For joining scalar values, see @stack')
# Handle single-tensor joins immediately.
if len(as_tensor_variable_args) == 1:
bcastable = list(as_tensor_variable_args[0].type.broadcastable)
else:
# When the axis is fixed, a dimension should be
# broadcastable if at least one of the inputs is
# broadcastable on that dimension (see justification below),
# except for the axis dimension.
# Initialize bcastable all false, and then fill in some trues with
# the loops.
bcastable = [False] * len(
as_tensor_variable_args[0].type.broadcastable)
ndim = len(bcastable)
# Axis can also be a constant
if not isinstance(axis, int):
try:
# Note : `get_scalar_constant_value` returns a ndarray not
# an int
axis = int(get_scalar_constant_value(axis))
except NotScalarConstantError:
pass
if isinstance(axis, int):
# Basically, broadcastable -> length 1, but the
# converse does not hold. So we permit e.g. T/F/T
# joins, and if they fail at runtime they fail, but if
# they don't then it means that the argument where
# that broadcastable flag was False had length 1 along
# this dimension, and therefore this dimension should
# be broadcastable for the output.
if axis < -ndim:
raise IndexError("Join axis %d out of bounds [0, %d)" %
(axis, ndim))
if axis < 0:
axis += ndim
for x in as_tensor_variable_args:
for current_axis, bflag in enumerate(x.type.broadcastable):
# Constant negative axis can no longer be negative at
# this point. It safe to compare this way.
if current_axis == axis:
continue
if bflag:
bcastable[current_axis] = True
try:
bcastable[axis] = False
except IndexError:
raise ValueError('Join argument "axis" is out of range'
' (given input dimensions)')
else:
# When the axis may vary, no dimension can be guaranteed to be
# broadcastable.
bcastable = [False] * len(
as_tensor_variable_args[0].type.broadcastable)
if not python_all([x.ndim == len(bcastable)
for x in as_tensor_variable_args[1:]]):
raise TypeError("Join() can only join tensors with the same "
"number of dimensions.")
inputs = [as_tensor_variable(axis)] + list(as_tensor_variable_args)
if inputs[0].type not in int_types:
raise TypeError('Axis could not be cast to an integer type',
axis, inputs[0].type, int_types)
outputs = [output_maker(bcastable)]
node = Apply(self, inputs, outputs)
return node
def perform(self, node, axis_and_tensors, out_):
out, = out_
axis, tensors = axis_and_tensors[0], axis_and_tensors[1:]
ndim = tensors[0].ndim
if axis < -ndim:
raise IndexError("Join axis %d out of bounds [0, %d)" %
(axis, ndim))
out[0] = theano._asarray(numpy.concatenate(tensors, axis=axis),
dtype=node.outputs[0].type.dtype)
def c_code_cache_version(self):
return (3,)
def c_code(self, node, name, inputs, outputs, sub):
axis, tensors = inputs[0], inputs[1:]
input_1 = tensors[0]
l = len(tensors)
out, = outputs
fail = sub['fail']
adtype = node.inputs[0].type.dtype_specs()[1]
code = """
PyObject* list = PyList_New(%(l)s);
""" % locals()
for i, inp in enumerate(tensors):
code += """
Py_INCREF(%(inp)s);
PyList_SetItem(list, %(i)s, (PyObject*)%(inp)s);
""" % locals()
code += """
//PyObject* PyArray_Concatenate(PyObject* obj, int axis)
int axis = ((%(adtype)s *)PyArray_DATA(%(axis)s))[0];
int ndim = PyArray_NDIM(%(input_1)s);
if( axis < -ndim ){
PyErr_Format(PyExc_IndexError,
"Join axis %%d out of bounds [0, %%d)", axis, ndim);
%(fail)s
}
Py_XDECREF(%(out)s);
%(out)s = (PyArrayObject *)PyArray_Concatenate(list, axis);
Py_DECREF(list);
if(!%(out)s){
%(fail)s
}
""" % locals()
return code
def R_op(self, inputs, eval_points):
if None in eval_points[1:]:
return [None]
return self.make_node(inputs[0], *eval_points[1:]).outputs
def grad(self, axis_and_tensors, grads):
""" The gradient wrt a join op is a `Split`, used to partition
the gradient along the `axis` which was used for joining.
"""
gz, = grads
axis, tensors = axis_and_tensors[0], axis_and_tensors[1:]
rval = [grad_undefined(self, 0, axis)]
dtypes = [as_tensor_variable(x).type.dtype for x in tensors]
out_dtype = scal.upcast(*dtypes)
if 'float' in out_dtype or 'complex' in out_dtype:
# assume that this is differentiable
split = Split(len(tensors))
split_gz = split(gz, axis, stack([shape(x)[axis]
for x in tensors]))
# If there is only one split, it might not be in a list.
if not isinstance(split_gz, list):
split_gz = [split_gz]
# Split.make_node isn't always able to infer the right
# broadcast. As the grad need to keep the information,
# read it if needed.
split_gz = [patternbroadcast(g, t.broadcastable)
for t, g in zip(tensors, split_gz)]
rval = rval + split_gz
else:
# the output has integer type, so the gradient through it
# is 0
rval = rval + [tensor.zeros_like(dtype=config.floatX)
for tensor in tensors]
return rval
def infer_shape(self, node, ishapes):
# ishapes[0] contains the size of the axis on which we join
# Join op should get at least one input to join
assert len(ishapes) > 1
n_dim = len(ishapes[1])
for shp in ishapes[1:]:
assert shp is not None
assert len(shp) == n_dim
# The joining dimension could be negative, but we need it to be
# in [0, n_dim) in the loop below.
# An axis < -n_dim or >= ndim would be invalid, but this is
# not checked here. An Assert op would be a way of addressing that,
# but it may disrupt optimizations.
join_dim = switch(ge(node.inputs[0], 0),
node.inputs[0],
node.inputs[0] + n_dim)
out_shapes = []
for dim in xrange(n_dim):
# we have to deal with 2 possible cases in here :
# a) we are dealing with the dimension for which we join
# (called t_side from true side of the if, where the if
# compares current dimension with the joining dimension)
# b) a non joining dimension ( in which maybe a symbolic
# assertion can be used to make sure all tensors have
# the same number of elements on this non-joined dimension
# this is f_side
# initialize
t_side = ishapes[1][dim]
f_side = ishapes[1][dim]
# loop over tensors and sum for the joining dimension
for shp in ishapes[2:]:
t_side = t_side + shp[dim]
# return the dimensions found
out_shapes.append(switch(eq(dim, join_dim),
t_side, f_side))
return [tuple(out_shapes)]
"""
Convenience function to concatenate `TensorType`s along the given axis.
Parameters
----------
tensors : list of tensors (or list-like)
A list of tensors to be concatenated along the given axis.
The shapes of the tensors to be concatenated must be all
identical, except in the dimension (`axis`) on which they are to
be joined.
axis : int (symbolic or literal)
On which dimension should the tensors be joined? The `axis`
must be a valid index into the shape of the tensors to be
concatenated.
The `axis` parameter may either be an integer or an object that
can be converted to a scalar using `as_scalar`(`axis`). In the
former case, the axis is fixed at construction, while in the
latter it may vary over time depending on the value of the
`axis` variable.
"""
join = Join()
pprint.assign(lambda pstate, r: r.owner and isinstance(r.owner.op, Join),
printing.FunctionPrinter('join'))
def roll(x, shift, axis=None):
"""
Convenience function to roll TensorTypes along the given axis.
Syntax copies numpy.roll function.
Parameters
----------
x : tensor_like
Input tensor.
shift : int (symbolic or literal)
The number of places by which elements are shifted.
axis : int (symbolic or literal), optional
The axis along which elements are shifted. By default, the array
is flattened before shifting, after which the original
shape is restored.
Returns
-------
tensor
Output tensor, with the same shape as ``x``.
"""
if axis is None:
if x.ndim > 1:
y = x.flatten()
return roll(y, shift, axis=0).reshape(x.shape)
else:
axis = 0
# A slice of all elements in a dimension ':'
allslice = slice(None)
# List of slices describing the front half [:, :, shift:, :]
front_slice = slice(-shift, None)
front_list = ([allslice] * axis + [front_slice] +
[allslice] * (x.ndim - axis - 1))
# List of slices describing the back half [:, :, :shift, :]
end_slice = slice(0, -shift)
end_list = ([allslice] * axis + [end_slice] +
[allslice] * (x.ndim - axis - 1))
return join(axis,
x.__getitem__(tuple(front_list)),
x.__getitem__(tuple(end_list)))
@constructor
def shape_padleft(t, n_ones=1):
"""Reshape `t` by left-padding the shape with `n_ones` 1s.
See Also
--------
shape_padaxis
shape_padright
Dimshuffle
"""
_t = as_tensor_variable(t)
pattern = ['x'] * n_ones + [i for i in xrange(_t.type.ndim)]
return DimShuffle(_t.broadcastable, pattern)(_t)
@constructor
def shape_padright(t, n_ones=1):
"""Reshape `t` by right-padding the shape with `n_ones` 1s.
See Also
--------
shape_padaxis
shape_padleft
Dimshuffle
"""
_t = as_tensor_variable(t)
pattern = [i for i in xrange(_t.type.ndim)] + ['x'] * n_ones
return DimShuffle(_t.broadcastable, pattern)(_t)
@constructor
def shape_padaxis(t, axis):
"""Reshape `t` by inserting 1 at the dimension `axis`.
Example
-------
>>> tensor = theano.tensor.tensor3()
>>> theano.tensor.shape_padaxis(tensor, axis=0)
DimShuffle{x,0,1,2}.0
>>> theano.tensor.shape_padaxis(tensor, axis=1)
DimShuffle{0,x,1,2}.0
>>> theano.tensor.shape_padaxis(tensor, axis=3)
DimShuffle{0,1,2,x}.0
>>> theano.tensor.shape_padaxis(tensor, axis=-1)
DimShuffle{0,1,2,x}.0
See Also
--------
shape_padleft
shape_padright
Dimshuffle
"""
_t = as_tensor_variable(t)
ndim = _t.ndim + 1
if not -ndim <= axis < ndim:
msg = 'axis {0} is out of bounds [-{1}, {1})'.format(axis, ndim)
raise IndexError(msg)
if axis < 0:
axis += ndim
pattern = [i for i in xrange(_t.type.ndim)]
pattern.insert(axis, 'x')
return DimShuffle(_t.broadcastable, pattern)(_t)
@constructor
def stack(*tensors, **kwargs):
"""Stack tensors in sequence on given axis (default is 0).
Take a sequence of tensors and stack them on given axis to make a single
tensor. The size in dimension `axis` of the result will be equal to the number
of tensors passed.
Note: The interface stack(*tensors) is deprecated, you should use
stack(tensors, axis=0) insted.
Parameters
----------
tensors : list or tuple of tensors
A list of tensors to be stacked.
axis : int
The index of the new axis. Default value is 0.
Examples
--------
>>> a = theano.tensor.scalar()
>>> b = theano.tensor.scalar()
>>> c = theano.tensor.scalar()
>>> x = theano.tensor.stack([a, b, c])
>>> x.ndim # x is a vector of length 3.
1
>>> a = theano.tensor.tensor4()
>>> b = theano.tensor.tensor4()
>>> c = theano.tensor.tensor4()
>>> x = theano.tensor.stack([a, b, c])
>>> x.ndim # x is a 5d tensor.
5
>>> rval = x.eval(dict((t, np.zeros((2, 2, 2, 2))) for t in [a, b, c]))
>>> rval.shape # 3 tensors are stacked on axis 0
(3, 2, 2, 2, 2)
>>> x = theano.tensor.stack([a, b, c], axis=3)
>>> x.ndim
5
>>> rval = x.eval(dict((t, np.zeros((2, 2, 2, 2))) for t in [a, b, c]))
>>> rval.shape # 3 tensors are stacked on axis 3
(2, 2, 2, 3, 2)
>>> x = theano.tensor.stack([a, b, c], axis=-2)
>>> x.ndim
5
>>> rval = x.eval(dict((t, np.zeros((2, 2, 2, 2))) for t in [a, b, c]))
>>> rval.shape # 3 tensors are stacked on axis -2
(2, 2, 2, 3, 2)
"""
# ---> Remove this when moving to the new interface:
if not tensors and not kwargs:
raise Exception('theano.tensor.stack(tensors, axis) must have at least'
' one parameter')
if not kwargs and not isinstance(tensors[0], (list, tuple)):
warnings.warn('stack(*tensors) interface is deprecated, use'
' stack(tensors, axis=0) instead.', DeprecationWarning,
stacklevel=3)
axis = 0
elif 'tensors' in kwargs:
tensors = kwargs['tensors']
if 'axis' in kwargs:
axis = kwargs['axis']
else:
axis = 0
else:
if len(tensors) == 2:
axis = tensors[1]
elif 'axis' in kwargs:
axis = kwargs['axis']
else:
axis = 0
tensors = tensors[0]
# <--- Until here.
if len(tensors) == 0:
raise Exception('tensors is empty. You should at least provide one'
' tensor to theano.tensor.stack(tensors, axis).')
# If all tensors are scalars of the same type, call make_vector.
# It makes the graph simpler, by not adding DimShuffles and Rebroadcasts
# This should be an optimization!
# Doing it here make the graph less canonicalized
# (more type need to be understood by all optimization)
# And DebugMode can't detect error in this code as it is not in an
# optimization.
# See ticket #660
if numpy.all(
[ # in case there is direct int in tensors.
isinstance(t, (numpy.number, float, integer_types,
python_complex)) or
(isinstance(t, Variable) and
isinstance(t.type, TensorType) and
t.ndim == 0)
for t in tensors]):
# in case there is direct int
tensors = list(map(as_tensor_variable, tensors))
dtype = scal.upcast(*[i.dtype for i in tensors])
return theano.tensor.opt.MakeVector(dtype)(*tensors)
return join(axis, *[shape_padaxis(t, axis) for t in tensors])
@constructor
def concatenate(tensor_list, axis=0):
"""Alias for `join`(axis, *tensor_list).
This function is similar to `join`, but uses the signature of
numpy's concatenate function.
Raises
------
TypeError
The tensor_list must be a tuple or list.
"""
# Check someone did not make the common mistake to do something like:
# c = concatenate(x, y)
# instead of
# c = concatenate((x, y))
if not isinstance(tensor_list, (tuple, list)):
raise TypeError(
"The 'tensors' argument must be either a tuple "
"or a list, make sure you did not forget () or [] around "
"arguments of concatenate.", tensor_list)
return join(axis, *tensor_list)
def get_vector_length(v):
"""Return the run-time length of a symbolic vector.
Parameters
----------
v
A rank-1 TensorType variable.
Raises
------
TypeError
`v` hasn't the proper type.
ValueError
No special case applies, the length is not known.
In general this is not possible, but for a number of special cases
the length can be determined at compile / graph-construction time.
This function implements these special cases.
"""
v = as_tensor_variable(v)
if v.ndim != 1:
raise TypeError('argument must be symbolic vector')
if v.type.broadcastable[0]:
return 1
if isinstance(v, gof.Constant) and v.type.ndim == 1:
return len(v.data)
if v.owner and isinstance(v.owner.op, theano.tensor.opt.MakeVector):
return len(v.owner.inputs)
if v.owner and isinstance(v.owner.op, Shape):
return v.owner.inputs[0].type.ndim
# If we take a slice, we know how many elements it will result in
if ((v.owner and
isinstance(v.owner.op, theano.tensor.subtensor.Subtensor) and
isinstance(v.owner.op.idx_list[0], slice))):
start = extract_constant(theano.tensor.subtensor.get_idx_list(
v.owner.inputs, v.owner.op.idx_list)[0].start)
stop = extract_constant(theano.tensor.subtensor.get_idx_list(
v.owner.inputs, v.owner.op.idx_list)[0].stop)
if start is None:
start = 0
if stop is None:
stop = 0
if ((isinstance(stop, numbers.Integral) and
isinstance(start, numbers.Integral))):
return stop - start
if isinstance(v, Variable):
msg = theano.printing.debugprint(v, file='str')
else:
msg = str(v)
raise ValueError("length not known: %s" % msg)
@constructor
def horizontal_stack(*args):
"""
Horizontally stack two L{TensorType}s.
Stack two L{TensorType}s along the second axis (column wise). These
L{TensorType}s must have the same shape along all dimensions but the
second.
"""
# Note: 'horizontal_stack' and 'vertical_stack' do not behave exactly like
# Numpy's hstack and vstack functions. This is intended, because Numpy's
# functions have potentially confusing/incoherent behavior (try them on 1D
# arrays). If this is fixed in a future version of Numpy, it may be worth
# trying to get closer to Numpy's way of doing things. In the meantime,
# better keep different names to emphasize the implementation divergences.
assert len(args) >= 2
for arg in args:
assert arg.type.ndim == 2
return concatenate(args, axis=1)
@constructor
def vertical_stack(*args):
assert len(args) >= 2
for arg in args:
assert arg.type.ndim == 2
return concatenate(args, axis=0)
class Reshape(Op):
"""Perform a reshape operation of the input x to the new shape shp.
The number of dimensions to which to reshape to (ndim) must be
known at graph build time.
"""
view_map = {0: [0]} # output 0 is potentially aliased to inputs [0]
_f16_ok = True
check_input = False
__props__ = ("ndim",)
# name does not participate because it doesn't affect computations
def __init__(self, ndim, name=None):
self.ndim = ndim
self.name = name
def __str__(self):
return '%s{%s}' % (self.__class__.__name__, self.ndim)
def make_node(self, x, shp):
x = as_tensor_variable(x)
shp_orig = shp
shp = as_tensor_variable(shp, ndim=1)
if not (shp.dtype.startswith('int') or
(isinstance(shp, TensorConstant) and shp.data.size == 0)):
# It raises an error if shp is not of integer type,
# except when shp is constant and empty
# (in this case, shp.dtype does not matter anymore).
raise TypeError("Shape must be integers", shp, shp.dtype)
assert shp.ndim == 1
if isinstance(shp, TensorConstant):
bcast = [s == 1 for s in shp.data]
return gof.Apply(self, [x, shp], [tensor(x.type.dtype, bcast)])
else:
bcasts = [False] * self.ndim
shp_list = shp_orig
if hasattr(shp_orig, "ndim") and shp_orig.ndim == 0:
shp_list = [shp_orig]
for index in xrange(self.ndim):
y = shp_list[index]
y = as_tensor_variable(y)
# Try to see if we can infer that y has a constant value of 1.
# If so, that dimension should be broadcastable.
try:
bcasts[index] = (
hasattr(y, 'get_scalar_constant_value') and
y.get_scalar_constant_value() == 1)
except NotScalarConstantError:
pass
return gof.Apply(self, [x, shp], [tensor(x.type.dtype, bcasts)])
def perform(self, node, inp, out_):
x, shp = inp
out, = out_
if (len(shp) != self.ndim):
raise ValueError('shape argument to Reshape.perform has incorrect'
' length %i'
', should be %i' % (len(shp), self.ndim), shp)
try:
out[0] = numpy.reshape(x, shp)
except Exception:
raise ValueError('Cannot reshape input of shape %s to shape %s' %
(x.shape, shp))
if not out[0].flags.aligned:
raise RuntimeError("numpy.reshape returned a not aligned tensor."
" NumPy versions 1.6.2, 1.7.0 and 1.7.1 have"
" this problem for some input shape/new shape"
" combinations. Use another NumPy version."
" Input shape: %s, input stride: %s,"
" new_shape: %s, new_strides: %s." % (
x.shape, x.strides, shp, out[0].strides))
def connection_pattern(self, node):
return [[True], [False]]
def grad(self, inp, grads):
x, shp = inp
g_out, = grads
return [reshape(g_out, shape(x), ndim=x.ndim),
DisconnectedType()()]
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return [None]
return self(eval_points[0], *inputs[1:], **dict(return_list=True))
def infer_shape(self, node, ishapes):
# inputs[1] can contain at most one value of '-1', meaning the actual
# shape of the output will be automatically computed by reshape, so
# that the total number of elements stays the same.
# TODO: Maybe put that formula here?
# It's not trivial, because we would have to check if the product of
# all the non-minus-one shapes is a divisor of the product of the
# original shapes.
# The following expression leads to cycles in feature_shape,
# because it tries to replace the Shape_i node by the switch
# statement, which depends on Shape_i.
# return [tuple([switch(eq(node.inputs[1][i], -1),
# theano.tensor.opt.Shape_i(i)(node.outputs[0]),
# node.inputs[1][i])
# for i in xrange(self.ndim)]
# )]
# Here, we only simplify if the shape (node.inputs[1]) is a constant,
# ideally it would suffice to check that it is always non-negative.
requ = node.inputs[1]
if isinstance(requ, theano.tensor.TensorConstant):
requ = list(requ.data)
requ_part = [ele for ele in requ if ele != -1]
crit = len(requ) - len(requ_part)
if crit == 1 and len(requ_part) > 0:
missing = mul(*ishapes[0]) // mul(*requ_part)
for i, ele in enumerate(requ):
if ele == -1:
requ[i] = missing
elif crit == 1: # we reshape to -1
requ = [mul(*ishapes[0])]
elif crit > 1:
raise ValueError('shape argument to Reshape.perform'
' must have at most one entry equal to -1')
return [requ]
else:
oshape = []
for i in xrange(self.ndim):
default_os_i = theano.tensor.opt.Shape_i(i)(node.outputs[0])
try:
os_i = get_scalar_constant_value(node.inputs[1][i]).item()
if os_i == -1:
os_i = default_os_i
except NotScalarConstantError:
os_i = default_os_i
oshape.append(os_i)
return [tuple(oshape)]
def c_code_cache_version(self):
return (6,)
def c_code(self, node, name, inputs, outputs, sub):
if isinstance(node.inputs[0], TensorVariable):
x, shp = inputs
z, = outputs
new_ndim = self.ndim
sdtype = node.inputs[1].type.dtype_specs()[1]
fail = sub['fail']
return """
assert (PyArray_NDIM(%(shp)s) == 1);
npy_intp new_dims[%(new_ndim)s];
PyArray_Dims newshape;
newshape.ptr = new_dims;
newshape.len = %(new_ndim)s;
for (int ii = 0; ii < %(new_ndim)s; ++ii)
{
// -- We do not want an explicit cast here. the shp can be any
// -- int* dtype. The compiler will explicitly upcast it, but
// -- will err if this will downcast. This could happen if the
// -- user pass an int64 dtype, but npy_intp endup being int32.
new_dims[ii] = ((%(sdtype)s*)(
PyArray_BYTES(%(shp)s) +
ii * PyArray_STRIDES(%(shp)s)[0]))[0];
}
Py_XDECREF(%(z)s);
%(z)s = (PyArrayObject *) PyArray_Newshape(%(x)s, &newshape,
NPY_CORDER);
if (!%(z)s)
{
//The error message should have been set by PyArray_Newshape
%(fail)s;
}
if (!PyArray_ISALIGNED(%(z)s)) {
PyErr_Format(
PyExc_RuntimeError,
"PyArray_Newshape returned an object that isn't aligned!"
" NumPy versions 1.6.2, 1.7.0 and 1.7.1 have"
" this problem for some input shape/new shape"
" combinations. Use another NumPy version.");
%(fail)s;
}
""" % locals()
else:
return Op.c_code(self, node, name, inputs, outputs, sub)
def reshape(x, newshape, ndim=None, name=None):
if ndim is None:
try:
ndim = get_vector_length(newshape)
except ValueError:
raise ValueError(
"The length of the provided shape (%s) cannot "
"be automatically determined, so Theano is not able "
"to know what the number of dimensions of the reshaped "
"variable will be. You can provide the 'ndim' keyword "
"argument to 'reshape' to avoid this problem." % newshape)
op = Reshape(ndim, name)
rval = op(x, newshape)
return rval
class Flatten(Op):
"""
Flatten a tensor.
Flattens a tensor to `outdim` dimensions by preserving the leading
outdim - 1 shape components.
"""
view_map = {0: [0]}
check_input = False
__props__ = ("outdim",)
def __init__(self, outdim=1):
self.outdim = int(outdim)
def __str__(self):
return '%s{%s}' % (self.__class__.__name__, self.outdim)
def make_node(self, x):
t_x = as_tensor_variable(x)
if self.outdim < 1 or (x.ndim and self.outdim > x.ndim):
raise ValueError('invalid output ndimensions (%i) for tensor of '
'rank %i' % (self.outdim, t_x.ndim))
# Infer the broadcastable pattern of the output. For every dimension
# unaffected by the flatten, the broadcast flag should be unchanged.
# For the dimension resulting from the collapse of other dimensions,
# it should be broadcastable iff all the collapsed dimensions were
# broadcastable.
bcast_kept_dims = x.broadcastable[:self.outdim - 1]
bcast_new_dim = python_all(x.broadcastable[self.outdim - 1:])
broadcastable = bcast_kept_dims + (bcast_new_dim,)
return gof.Apply(self, [t_x], [tensor(x.type.dtype,
broadcastable)])
def perform(self, node, inp, out_):
x, = inp
out, = out_
outdim = self.outdim
if outdim == 1:
try:
out[0] = x.reshape(x.size)
except AttributeError:
out[0] = x.reshape((numpy.prod(x.shape),))
elif outdim == len(x.shape):
out[0] = x
else:
newshape = (x.shape[:outdim - 1] +
(numpy.prod(x.shape[outdim - 1:]),))
out[0] = x.reshape(newshape)
def infer_shape(self, node, in_shapes):
in_shp, = in_shapes
part1 = in_shp[:self.outdim - 1]
part2 = in_shp[self.outdim - 1:]
if len(part2) > 1:
part2 = (prod(part2, dtype='int64'),)
elif len(part2) == 1:
# We do not want to force an upcast of part2 if its length is 1
pass
else:
if len(in_shp) == 0 and self.outdim == 1:
part2 = (1,)
else:
raise ValueError('invalid output ndimensions (%i) for tensor '
'of rank %i' % (self.outdim, len(in_shp)))
out_shape = (part1 + part2)
return [out_shape]
def grad(self, inp, grads):
x, = inp
g_out, = grads
return [reshape(g_out, shape(x), x.ndim)]
def R_op(self, inputs, eval_points):
if None in eval_points:
return [None]
return self.make_node(*eval_points).outputs
def c_code_cache_version(self):
return (1, 1)
def c_code(self, node, name, inputs, outputs, sub):
x, = inputs
out, = outputs
outdim = self.outdim
fail = sub['fail']
return """
if (%(outdim)s == PyArray_NDIM(%(x)s))
{
Py_XDECREF(%(out)s);
Py_XINCREF(%(x)s);
%(out)s = %(x)s;
}
else
{
Py_XDECREF(%(out)s);
if (%(outdim)s == 1)
{
npy_intp size = PyArray_SIZE(%(x)s);
PyArray_Dims newshape;
newshape.ptr = &size;
newshape.len = 1;
%(out)s = (PyArrayObject*)PyArray_Newshape(%(x)s,
&newshape,
NPY_CORDER);
}
else
{
npy_intp *oldshape = PyArray_DIMS(%(x)s);
npy_intp newshape_dims[%(outdim)s];
int i;
for (i = 0; i < %(outdim)s - 1; ++i)
newshape_dims[i] = oldshape[i];
newshape_dims[i] = 1;
for (int j = %(outdim)s - 1; j < PyArray_NDIM(%(x)s); ++j)
newshape_dims[i] *= oldshape[j];
PyArray_Dims newshape;
newshape.ptr = newshape_dims;
newshape.len = %(outdim)s;
%(out)s = (PyArrayObject*)PyArray_Newshape(%(x)s,
&newshape,
NPY_CORDER);
}
}
if (!%(out)s)
{
//The error message should have been set by
// PyArray_Newshape
%(fail)s;
}
if (!PyArray_ISALIGNED(%(out)s)) {
PyErr_Format(
PyExc_RuntimeError,
"PyArray_Newshape returned an object that isn't"
" aligned! NumPy versions 1.6.2, 1.7.0 and 1.7.1 have"
" this problem for some input shape/new shape"
" combinations. Use another NumPy version.");
%(fail)s;
}
""" % locals()
def flatten(x, outdim=1):
return Flatten(outdim)(x)
# class TileGrad(Op):
# """
# Calculates the gradient of the Tile Op.
# """
# # this is so weird, I can't think of how to make this a general thing.
# def make_node(self, x, reps, g_out):
# return gof.Apply(self, [x, reps, g_out], [x.type()])
#
# def perform(self, node, inp, out):
# x, reps, g_out = inp
# gx, = out
# xsh = x.shape
# if len(reps) == 2 and reps[1] == 1 and len(x.shape) == 1:
# gx[0] = numpy.sum(g_out, axis=0)
# else:
# raise NotImplementedError('x.shape, reps combination not '
# 'supported', (x.shape, reps))
#
# tilegrad = TileGrad()
class Tile(Op):
"""
Construct an array by repeating the input x according to reps pattern.
.. note:: Deprecated
Use tile() instead.
Tiles its input according to reps. The length of reps is the number of
dimension of x and contains the number of times to tile x in each
dimension.
See Also
--------
numpy.tile : http://docs.scipy.org/doc/numpy/reference/generated/numpy.tile.html
"""
__props__ = ("ndim",)
def __init__(self, ndim):
self.ndim = ndim
def __str__(self):
return self.__class__.__name__ + "{ndim=%d}" % self.ndim
def make_node(self, x, reps):
warnings.warn((
"Tile op is deprecated, use tile function instead."), stacklevel=3)
x = as_tensor_variable(x)
reps = as_tensor_variable(reps)
return gof.Apply(self, [x, reps], [tensor(x.type.dtype, [False] *
self.ndim)])
def perform(self, node, inp, out_):
x, reps = inp
out, = out_
res = numpy.tile(x, reps)
if res.ndim != self.ndim:
raise ValueError(
'Tile.perform produced incorrect number of dimensions')
if (numpy.asarray(reps) == 1).all():
# In that case, some NumPy version return a view! As this
# op isn't declared as inplace, we need to check that and
# copy the data.
if numpy.may_share_memory(res, x):
res = res.copy()
out[0] = res
def infer_shape(self, node, in_shapes):
# Note: in contrast with numpy, it is assumed that x.shape and reps
# have equal length; see also tile function below
# Note: if reps were to be allowed not to be a constant and x.shape
# and reps to be unequal, the following block of code could be used:
# prepend 1 to x.shape if needed
# if self.ndim > x.ndim:
# shp = concatenate(ones(self.ndim - x.ndim), shp)
# prepend 1 to reps if needed
# reps = concatenate(ones(self.ndim - reps.shape[0]), reps)
x, reps = node.inputs
shp = in_shapes[0]
tiled_shp = shp * reps
out_shape = []
for i in xrange(self.ndim):
out_shape.append(tiled_shp[i])
return [out_shape]
def grad(self, inp, grads):
x, reps = inp
g_out, = grads
# return [tilegrad(x, reps, g_out), None]
raise NotImplementedError()
def tile(x, reps, ndim=None):
"""
Tile input array `x` according to `reps`.
See the docstring of `numpy.tile` for details.
'reps' can be constant integer (e.g. 3), constant vector(e.g. [2 3]),
symbolic scalar (e.g. tensor.iscalar()), symbolic vector (e.g. tensor.ivector())
or a list of symbolic scalar (e.g. [tensor.iscalar(), tensor.iscalar()]).
ndim is the number of the dimensions of the output, if it is provided, ndim
should be equal or larger than x.ndim and len(reps), otherwise, we will use
max(x.ndim, len(reps)) as ndim. If reps is symbolic vector, the ndim has to
be provided.
"""
if ndim is not None and ndim < x.ndim:
raise ValueError("ndim should be equal or larger than x.ndim")
# if reps is tensor.scalar, integer or tensor.vector, we convert it to a list.
if not isinstance(reps, (list, tuple)):
reps_astensor = as_tensor_variable(reps)
ndim_check = reps_astensor.ndim
if reps_astensor.dtype not in theano.tensor.discrete_dtypes:
raise ValueError("elements of reps must be integer dtype")
# tensor.scalar/integer case
if ndim_check == 0:
reps = [reps]
# tensor.vector case
elif ndim_check == 1:
if ndim is None:
raise ValueError("if reps is tensor.vector, you should specify "
"the ndim")
else:
offset = ndim - reps.shape[0]
# assert that reps.shape[0] does not exceed ndim
offset = theano.tensor.opt.assert_(offset, ge(offset, 0))
# if reps.ndim is less than x.ndim, we pad the reps with
# "1" so that reps will have the same ndim as x.
reps_ = [switch(i < offset, 1, reps[i - offset]) for i in range(ndim)]
reps = reps_
# other raise error
else:
raise ValueError("the dimension of reps should not exceed 1")
else:
if ndim is not None and len(reps) > ndim:
raise ValueError("len(reps) should be equal or less than ndim")
if not numpy.all([isinstance(r, integer_types) or
(isinstance(r, TensorVariable) and
r.dtype in theano.tensor.discrete_dtypes) for r in reps]):
raise ValueError("elements of reps must be scalars of integer dtype")
# if reps.ndim is less than x.ndim, we pad the reps with
# "1" so that reps will have the same ndim as x.
reps = list(reps)
if ndim is None:
ndim = builtins.max(len(reps), x.ndim)
if len(reps) < ndim:
reps = [1] * (ndim - len(reps)) + reps
shape = [1] * (ndim - x.ndim) + [x.shape[i] for i in xrange(x.ndim)]
alloc_shape = reps + shape
y = alloc(x, *alloc_shape)
shuffle_ind = numpy.arange(ndim * 2).reshape(2, ndim)
shuffle_ind = shuffle_ind.transpose().flatten()
y = y.dimshuffle(*shuffle_ind)
new_shapes = [sh * reps[i] for i, sh in enumerate(shape)]
y = y.reshape(new_shapes)
return y
class ARange(Op):
"""Create an array containing evenly spaced values within a given interval.
Parameters and behaviour are the same as numpy.arange().
"""
__props__ = ("dtype",)
def __init__(self, dtype):
self.dtype = dtype
def make_node(self, start, stop, step):
start, stop, step = map(as_tensor_variable, (start, stop, step))
assert start.ndim == 0
assert stop.ndim == 0
assert step.ndim == 0
inputs = [start, stop, step]
outputs = [tensor(self.dtype, (False,))]
return Apply(self, inputs, outputs)
def infer_shape(self, node, i_shapes):
# Note start, stop and step can be float numbers.
start, stop, step = node.inputs
def is_constant_value(var, value):
try:
v = get_scalar_constant_value(var)
return numpy.all(v == value)
except NotScalarConstantError:
pass
return False
def upcast(var):
if ('int' in var.dtype and
# We do not want to cast uint64 to int64 as this can
# loose information. If we upcast uint64 with int64,
# this give float64. This is safer then checking for
# uint64 in case we support [u]int128 or other in the
# future.
scal.upcast(var.dtype, 'int64') == 'int64'):
return cast(var, 'int64')
return var
if is_constant_value(step, 1):
if is_constant_value(start, 0):
return [(cast(stop, 'int64'),)]
else:
stop = upcast(stop)
start = upcast(start)
return [(maximum(cast(stop - start, 'int64'), 0),)]
else:
stop = upcast(stop)
start = upcast(start)
return [(maximum(cast(ceil(cast((stop - start), 'float64') / step),
'int64'), 0),)]
def perform(self, node, inp, out_):
start, stop, step = inp
out, = out_
start = start.item()
stop = stop.item()
step = step.item()
out[0] = numpy.arange(start, stop, step, dtype=self.dtype)
def connection_pattern(self, node):
return [[True], [False], [True]]
def grad(self, inputs, grads):
start, stop, step = inputs
gz, = grads
# start and step affect the output values
# but the outputs are integers so there's
# no gradient through them
# stop does not affect the output values,
# just the output shape, so it is disconnected
return [start.zeros_like(),
DisconnectedType()(),
step.zeros_like()]
def R_op(self, inputs, eval_points):
return [None]
_arange = {}
def arange(start, stop=None, step=1, dtype=None):
# If only one argument is provided, it is in fact the "stop" argument,
# and start is 0.
if stop is None:
start, stop = 0, start
start, stop, step = map(as_tensor_variable, (start, stop, step))
# If dtype is not provided, infer it from the other arguments
if dtype is None:
dtype = scal.upcast(start.type.dtype, stop.type.dtype, step.type.dtype)
# don't try to be stingy and byte-optimize, this leads to
# overflow problems.
if dtype.startswith('int'):
dtype = 'int64'
if dtype.startswith('uint'):
dtype = 'uint64'
if config.cast_policy in ('numpy', 'numpy+floatX'):
# We enforce numpy semantics, except in the special case where
# `config.cast_policy` is 'numpy+floatX' and we want to use float32
# rather than float64.
# As an example, if `start`, `stop` and `step` are all int32,
# `numpy.arange` returns an int64 array (on 64-bit platforms),
# while the upcast above returns int32.
numpy_dtype = numpy.arange(
start=numpy.array(0, dtype=start.dtype),
stop=numpy.array(1, dtype=stop.dtype),
step=numpy.array(1, dtype=step.dtype)).dtype
if numpy_dtype != dtype:
if (config.cast_policy == 'numpy+floatX' and
config.floatX == 'float32' and
numpy_dtype == 'float64' and
# No explicit float64 in the three arguments?
python_all(
dt != 'float64'
for dt in [s.dtype for s in (start, stop, step)])):
# We use float32 instead.
assert dtype != 'float64'
dtype = 'float32'
else:
# We use the same dtype as numpy instead of the result of
# the upcast.
dtype = str(numpy_dtype)
if dtype not in _arange:
_arange[dtype] = ARange(dtype)
return _arange[dtype](start, stop, step)
class _nd_grid(object):
"""Create a dense n-dimensional 'meshgrid' with equally spaced points.
Used to create the instance ``mgrid`` and ``ogrid`` which act similarly
to their numpy equivalents.
Parameters
----------
sparse : boolean, optional, default=True
Specifying False leads to the equivalent of numpy's mgrid functionality.
Specifying True leads to the equivalent of ogrid.
Examples
--------
>>> a = T.mgrid[0:5, 0:3]
>>> a[0].eval()
array([[0, 0, 0],
[1, 1, 1],
[2, 2, 2],
[3, 3, 3],
[4, 4, 4]], dtype=int8)
>>> a[1].eval()
array([[0, 1, 2],
[0, 1, 2],
[0, 1, 2],
[0, 1, 2],
[0, 1, 2]], dtype=int8)
>>> b = T.ogrid[0:5, 0:3]
>>> b[0].eval()
array([[0],
[1],
[2],
[3],
[4]], dtype=int8)
>>> b[1].eval()
array([[0, 1, 2, 3]], dtype=int8)
"""
def __init__(self, sparse=False):
self.sparse = sparse
def __getitem__(self, *args):
ndim = len(args[0])
for sl in args[0]:
if isinstance(sl.step, python_complex):
raise NotImplementedError("Not implemented for slices "
"whose step is complex")
ranges = [arange(sl.start or 0,
sl.stop,
sl.step or 1) for sl in args[0]]
shapes = [tuple([1] * j + [r.shape[0]] + [1] * (ndim - 1 - j))
for j, r in enumerate(ranges)]
ranges = [r.reshape(shape) for r, shape in zip(ranges, shapes)]
if self.sparse:
grids = ranges
else:
grids = []
ones = [ones_like(r) for r in ranges]
for i in range(ndim):
grid = 1
for j in range(ndim):
if j == i:
grid = grid * ranges[j]
else:
grid = grid * ones[j]
grids.append(grid)
return grids
mgrid = _nd_grid()
ogrid = _nd_grid(sparse=True)
class PermuteRowElements(Op):
"""Permute the elements of each row (inner-most dim) of a tensor.
A permutation will be applied to every row (vector) of the input tensor x.
Depending on the dimensionality of x and the permutation tensor y,
different cases are possible.
If y.ndim = 1, y is a single permutation, that will be applied to every
vector of x. For instance, if x is a matrix, the same permutation will be
applied to each row of x.
If x.ndim = y.ndim, each row of x corresponds to a row of y, containing
a permutation that will be applied to that row. For instance, if x and y
are two matrices, a different permutation will be applied to each row of x.
If x.ndim > y.ndim, y will be broadcasted to fit x, then each row (vector)
of x will be reordered according to the corresponding row of y. (This is
a generalization of the first case).
If x.ndim = 1, every permutation in y will be applied to x, and the output
will contain all the results.
If x.ndim < y.ndim, x will be broadcasted to fit y, and different
permutations contained in y will be applied to each vector in x. (This is
a generalization of the previous case).
If the "inverse" argument is True, the Op will perform the inverse
permutation instead.
"""
__props__ = ()
def make_node(self, x, y, inverse):
x = as_tensor_variable(x)
y = as_tensor_variable(y)
if inverse: # as_tensor_variable does not accept booleans
inverse = as_tensor_variable(1)
else:
inverse = as_tensor_variable(0)
# y should contain integers
assert (y.type.dtype.startswith('int') or
y.type.dtype.startswith('uint'))
# Inverse should be an integer scalar
assert (inverse.type.ndim == 0 and
(inverse.type.dtype.startswith('int') or
inverse.type.dtype.startswith('uint')))
# Match shapes of x and y
x_dim = x.type.ndim
y_dim = y.type.ndim
if x_dim > y_dim:
y = shape_padleft(y, n_ones=(x_dim - y_dim))
elif x_dim < y_dim:
x = shape_padleft(x, n_ones=(y_dim - x_dim))
# Compute the broadcastable pattern of the output
out_broadcastable = [xb and yb for xb, yb in
izip(x.type.broadcastable, y.type.broadcastable)]
out_type = tensor(dtype=x.type.dtype, broadcastable=out_broadcastable)
inputlist = [x, y, inverse]
outputlist = [out_type]
return Apply(self, inputlist, outputlist)
def _rec_perform(self, node, x, y, inverse, out, curdim):
"""Perform the permutation by doing a recursion over the input
dimensions.
For every dimension, starting with the leftmost, the right set of
indices is determined (depending if broadcasting or not), then
the function is recursively called on the appropriate subtensors.
The terminal case is reached when the current tensors are vector,
then the permutation contained in y is applied to x.
Parameters
----------
x : tensor
The input tensor, on which the permutation is applied.
y : tensor
Tensor containing the permutations to apply.
out : tensor
Tensor storing the output result.
curdim : int
Counter of the current depth of recursion.
inverse
Wether to apply permutations or their inverse.
"""
if len(x.shape) == 1:
# Numpy advanced indexing works in this case
if inverse:
out[y] = x[:]
else:
out[:] = x[y]
if (numpy.__version__ <= '1.6.1' and
out.size != numpy.uint32(out.size)):
warnings.warn(
'Numpy versions 1.6.1 and below have a bug preventing '
'advanced indexing from correctly filling arrays that '
'are too big (>= 2^32 elements). It is possible that '
'out (%s), with shape %s, is not correctly filled.'
% (out, out.shape))
else:
xs0 = x.shape[0]
ys0 = y.shape[0]
if xs0 == ys0:
for i in xrange(xs0):
self._rec_perform(node, x[i], y[i], inverse, out[i],
curdim + 1)
elif ys0 == 1 and node.inputs[1].type.broadcastable[curdim]:
# Broadcast y
for i in xrange(xs0):
self._rec_perform(node, x[i], y[0], inverse, out[i],
curdim + 1)
elif xs0 == 1 and node.inputs[0].type.broadcastable[curdim]:
# Broadcast x
for i in xrange(ys0):
self._rec_perform(node, x[0], y[i], inverse, out[i],
curdim + 1)
else:
raise ValueError('Dimension mismatch: %s, %s' % (xs0, ys0))
def perform(self, node, inp, out):
x, y, inverse = inp
outs, = out
x_s = x.shape
y_s = y.shape
assert len(x_s) == len(y_s)
# Make sure the output is big enough
out_s = []
for xdim, ydim in izip(x_s, y_s):
if xdim == ydim:
outdim = xdim
elif xdim == 1:
outdim = ydim
elif ydim == 1:
outdim = xdim
else:
raise ValueError('Dimension mismatch: %s, %s' % (xdim, ydim))
out_s.append(outdim)
if outs[0] is None or outs[0].shape != out_s:
outs[0] = numpy.empty(out_s, dtype=x.dtype)
self._rec_perform(node, x, y, inverse, outs[0], curdim=0)
def infer_shape(self, node, in_shapes):
shp_x = in_shapes[0]
shp_y = in_shapes[1]
assert len(shp_x) == len(shp_y)
out_shape = []
for i in xrange(len(shp_x)):
out_shape.append(maximum(shp_x[i], shp_y[i]))
return [out_shape]
def grad(self, inp, grads):
x, y, inverse = inp
gz, = grads
# First, compute the gradient wrt the broadcasted x.
# If 'inverse' is False (0), apply the inverse of y on gz.
# Else, apply y on gz.
gx = permute_row_elements(gz, y, eq(inverse, 0))
# If x has been broadcasted along some axes, we need to sum
# the gradient over these axes, but keep the dimension (as
# broadcastable)
broadcasted_dims = [dim for dim in xrange(gz.type.ndim)
if x.type.broadcastable[dim] and
not gz.type.broadcastable[dim]]
gx = Sum(axis=broadcasted_dims)(gx)
# Sum(...) removed the dimensions in broadcasted_dims,
# so we need to put them back.
newdims = []
i = 0
for dim in xrange(gz.type.ndim):
if dim in broadcasted_dims:
newdims.append('x')
else:
newdims.append(i)
i += 1
gx = DimShuffle(gx.type.broadcastable, newdims)(gx)
assert gx.type.broadcastable == x.type.broadcastable
# if x is an integer type, then so is the output.
# this means f(x+eps) = f(x) so the gradient with respect
# to x is zero
if x.type.dtype.find('int') != -1:
gx = x.zeros_like()
# The elements of y and of inverse both affect the output,
# so they are connected to the output,
# and the transformation isn't defined if their values
# are non-integer, so the gradient with respect to them is
# undefined
return [gx, grad_undefined(self, 1, y),
grad_undefined(self, 1, inverse)]
_permute_row_elements = PermuteRowElements()
def permute_row_elements(x, y, inverse=0):
return _permute_row_elements(x, y, inverse)
def inverse_permutation(perm):
"""Computes the inverse of permutations.
Each row of input should contain a permutation of the first integers.
"""
return permute_row_elements(
arange(perm.shape[-1], dtype=perm.dtype),
perm,
inverse=True)
#########################
# Linalg : Dot
#########################
#
# For BLAS-related ops see blas.py
#
# TODO: Dotinv should go here, Eigs, Svd, etc.
class Dot(Op):
"""
Computes the dot product of two variables. For two matrices, this is
equivalent to matrix multiplication. For two vectors, this is the inner
product.
Notes
-----
Matrix-matrix products are sometimes optimized to Dot22 or Gemm ops
(see tensor.blas).
Vector-vector products are sometimes optimized to Ger or CGer (see
tensor.blas).
Matrix-vector products are sometimes optimized to Gemv, CGemv (see
tensor.blas).
"""
__props__ = ()
# the rationale for Dot22 is related to getting GEMM Ops into the
# graph. See Dot22 in tensor.blas for details.
def make_node(self, *inputs):
inputs = list(map(as_tensor_variable, inputs))
if len(inputs) != 2:
raise TypeError(
'theano.tensor.Dot: 2 arguments required, %d given ' %
len(inputs))
if inputs[0].ndim not in (1, 2):
raise TypeError(
'theano.tensor.Dot: input 0 (0-indexed) must have ndim of '
'1 or 2, %d given. Consider calling theano.tensor.dot '
'instead.' % inputs[0].ndim)
if inputs[1].ndim not in (1, 2):
raise TypeError(
'theano.tensor.Dot: input 1 (0-indexed) must have ndim of '
'1 or 2, %d given. Consider calling theano.tensor.dot '
'instead.' % inputs[1].ndim)
i_broadcastables = [input.type.broadcastable for input in inputs]
bx, by = i_broadcastables
if len(by) == 2: # y is a matrix
bz = bx[:-1] + by[-1:]
elif len(by) == 1: # y is vector
bz = bx[:-1]
i_dtypes = [input.type.dtype for input in inputs]
outputs = [tensor(scal.upcast(*i_dtypes), bz)]
return Apply(self, inputs, outputs)
def perform(self, node, inp, out):
x, y = inp
z, = out
# the asarray is here because dot between two vectors
# gives a numpy float object but we need to return a 0d
# ndarray
z[0] = numpy.asarray(numpy.dot(x, y))
def grad(self, inp, grads):
x, y = inp
gz, = grads
xdim, ydim, gdim = x.type.ndim, y.type.ndim, gz.type.ndim
# grad is scalar, so x is vector and y is vector
if gdim == 0:
xgrad = gz * y
ygrad = gz * x
# x is vector, y is matrix, grad is vector
elif xdim == 1 and ydim == 2:
xgrad = dot(gz, y.T)
ygrad = outer(x.T, gz)
# x is matrix, y is vector, grad is vector
elif xdim == 2 and ydim == 1:
xgrad = outer(gz, y.T)
ygrad = dot(x.T, gz)
# x is matrix, y is matrix, grad is matrix
elif xdim == ydim == 2:
xgrad = dot(gz, y.T)
ygrad = dot(x.T, gz)
# If x or y contain broadcastable dimensions but only one of
# them know that a matching dimensions is broadcastable, the
# above code don't always return the right broadcast pattern.
# This cause problem down the road. See gh-1461.
if xgrad.broadcastable != x.broadcastable:
xgrad = patternbroadcast(xgrad, x.broadcastable)
if ygrad.broadcastable != y.broadcastable:
ygrad = patternbroadcast(ygrad, y.broadcastable)
rval = xgrad, ygrad
for elem in rval:
assert elem.dtype.find('float') != -1
return rval
def R_op(self, inputs, eval_points):
# R_op for a \dot b evaluted at c for a and d for b is
# simply c \dot b + a \dot d
assert len(inputs) == 2
assert len(eval_points) == 2
if eval_points[0] is None and eval_points[1] is None:
return [None]
debugger_available = config.compute_test_value != 'off'
if debugger_available:
try:
iv0 = gof.op.get_test_value(inputs[0])
except AttributeError:
gof.op.missing_test_message(
'first input passed to Dot.R_op has no test value')
debugger_available = False
try:
iv1 = gof.op.get_test_value(inputs[1])
except AttributeError:
gof.op.missing_test_message(
'second input passed to Dot.R_op has no test value')
debugger_available = False
if eval_points[0]:
try:
ev0 = gof.op.get_test_value(eval_points[0])
except AttributeError:
gof.op.missing_test_message(
'first eval point passed to Dot.R_op '
'has no test value')
debugger_available = False
if eval_points[1]:
try:
ev1 = gof.op.get_test_value(eval_points[1])
except AttributeError:
gof.op.missing_test_message(
'second eval point passed to Dot.R_op '
'has no test value')
debugger_available = False
if debugger_available:
input_values = [iv0, iv1]
eval_point_values = [ev0, ev1]
for i in xrange(2):
if eval_point_values[i] is not None and \
input_values[i].shape != eval_point_values[i].shape:
raise ValueError(
'input ' + str(i) + ' and eval_point ' + str(i) +
' to Dot.R_op should have the same shape, but '
'their shapes are %s and %s, respectively' % (
str(input_values[i].shape),
str(eval_point_values[i].shape)))
if eval_points[0]:
t1 = self(eval_points[0], inputs[1])
if eval_points[1]:
t2 = self(inputs[0], eval_points[1])
if eval_points[0] and eval_points[1]:
return [t1 + t2]
elif eval_points[0]:
return [t1]
else:
return [t2]
def infer_shape(self, node, shapes):
xshp, yshp = shapes
x, y = node.inputs
# vector / vector
if x.ndim == 1 and y.ndim == 1:
return [()]
# matrix / vector
if x.ndim == 2 and y.ndim == 1:
return [xshp[:-1]]
# vector / matrix
if x.ndim == 1 and y.ndim == 2:
return [yshp[-1:]]
# matrix / matrix
if x.ndim == 2 and y.ndim == 2:
return [xshp[:-1] + yshp[-1:]]
raise NotImplementedError()
def __str__(self):
return "dot"
_dot = Dot()
pprint.assign(_dot, printing.OperatorPrinter(printing.special['middle_dot'],
-1, 'left'))
def dot(a, b):
"""
Computes the dot product of two variables.
For two matrices, this is equivalent to matrix multiplication.
For two vectors, this is the inner product.
When one variable is a scalar, this is like elementwise multiplication.
For N dimensions, this is a sum product over the last axis
of the first array and the second-to-last axis of the second array:
dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
Note that this dot function does one of three things, in the following
sequence:
1. If either a or b is scalar, it returns the elementwise product
without calling the Theano Dot op.
2. If either a or b has more than 2 dimensions, it calls Theano's
tensordot function with appropriate axes. The tensordot function
expresses high-dimensional dot products in terms of 2D matrix
multiplications, so it may be possible to futherize optimize for
performance.
3. If both a and b have either 1 or 2 dimensions, it calls Theano's
Dot op on a and b.
Notes
-----
Matrix-matrix products are sometimes optimized to Dot22 or Gemm ops
(see tensor.blas).
Vector-vector products are sometimes optimized to Ger or CGer (see
tensor.blas).
Matrix-vector products are sometimes optimized to Gemv, CGemv (see
tensor.blas).
"""
a, b = as_tensor_variable(a), as_tensor_variable(b)
if a.ndim == 0 or b.ndim == 0:
return a * b
elif a.ndim > 2 or b.ndim > 2:
return tensordot(a, b, [[a.ndim - 1], [numpy.maximum(0, b.ndim - 2)]])
else:
return _dot(a, b)
#########################
# Linalg : TensorDot
#########################
def _tensordot_as_dot(a, b, axes, dot, batched):
"""
Reduces a tensor dot product to a matrix or vector dot product. Based
on code from Tijmen Tieleman's gnumpy
(http://www.cs.toronto.edu/~tijmen/gnumpy.html).
Please see the documentation of tensordot for the meaning of the a, b
and axes arguments.
:param dot: a function that accepts two symbolic variables and computes
the appropriate dot product (e.g. dot, batched_dot)
:type dot: function
:param batched: whether to treat the first axis of a and b as a batch
axis. If so, this axis will be preserved in the output,
allowing this function to be used also for batched
tensor dot products.
:type batched: boolean
:returns: a tensor with shape equal to the concatenation of a's shape
(less any dimensions that were summed over) and b's shape
(less the first dimension and any dimensions that were summed
over).
:rtype: symbolic tensor
"""
a, b = as_tensor_variable(a), as_tensor_variable(b)
if not numpy.isscalar(axes) and len(axes) != 2:
raise ValueError('Axes should be an integer or a '
'list/tuple of len 2 (%s was provided)'
% str(axes))
# if 'axes' is a number of axes to multiply and sum over (trailing axes
# of a, leading axes of b), we can just reshape and use dot.
elif numpy.isscalar(axes):
axes = int(axes)
for operand_name, operand in (("a", a), ("b", b)):
if axes > operand.ndim:
raise ValueError(
'axes can not be larger than the dimension of %s '
'(%s.ndim=%i, axes=%i)'
% (operand_name, operand_name, operand.ndim, axes))
if batched and axes == operand.ndim:
raise ValueError(
'axes to sum over must not include the batch axis '
'of %s (%s.ndim=%i, axes=%i)'
% (operand_name, operand_name, operand.ndim, axes))
batch_axes = 1 if batched else 0
a_outaxes = slice(0, a.ndim - axes)
b_outaxes = slice(batch_axes + axes, b.ndim)
outshape = concatenate([a.shape[a_outaxes], b.shape[b_outaxes]])
outbcast = a.broadcastable[a_outaxes] + b.broadcastable[b_outaxes]
outndim = len(outbcast)
a_shape = [1] * 2
b_shape = [1] * 2
# compute total size of summed axes
for i in xrange(0, axes):
a_shape[1] *= a.shape[-(i + 1)]
b_shape[0] *= b.shape[batch_axes + i]
# compute total size of other axes
for i in xrange(0, a.ndim - axes - batch_axes):
a_shape[0] *= a.shape[batch_axes + i]
for i in xrange(0, b.ndim - axes - batch_axes):
b_shape[1] *= b.shape[-(i + 1)]
if batched:
a_shape.insert(0, a.shape[0])
b_shape.insert(0, b.shape[0])
a_reshaped = a.reshape(a_shape)
b_reshaped = b.reshape(b_shape)
out_reshaped = dot(a_reshaped, b_reshaped)
out = out_reshaped.reshape(outshape, outndim)
# Make sure the broadcastable pattern of the result is correct,
# since some shape information can be lost in the reshapes.
return patternbroadcast(out, outbcast)
# if 'axes' is a list, transpose a and b such that the summed axes of a
# are last and the summed axes of b are first.
else:
axes = [_pack(axes_) for axes_ in axes]
if len(axes[0]) != len(axes[1]):
raise ValueError('Axes elements must have the same length.')
for i, (operand_name, operand) in enumerate((("a", a),
("b", b))):
if len(axes[i]) > operand.ndim:
raise ValueError(
'axes[%i] should be array_like with length less than '
'the dimensions of %s (%s.ndim=%i, len(axes[0])=%i).' %
(i, operand_name, operand_name, operand.ndim,
len(axes[i])))
if len(axes[i]) > 0 and numpy.max(axes[i]) >= operand.ndim:
raise ValueError(
'axes[%i] contains dimensions greater than or equal '
'to %s.ndim (%s.ndim=%i, max(axes[0])=%i).' %
(i, operand_name, operand_name, operand.ndim,
numpy.max(numpy.array(axes[i]))))
if batched and 0 in axes[i]:
raise ValueError(
'axes to sum over must not contain the batch axis '
'(axes[%i]=%s)' %
(i, axes[i]))
batch_axes = [0] if batched else []
other_axes = [[x for x in xrange(operand.ndim)
if x not in axes[i] and x not in batch_axes]
for i, operand in enumerate((a, b))]
a_shuffled = a.dimshuffle(batch_axes + other_axes[0] + axes[0])
b_shuffled = b.dimshuffle(batch_axes + axes[1] + other_axes[1])
# now that a and b are in the right order, recur with integer axes
return _tensordot_as_dot(a_shuffled, b_shuffled, len(axes[0]),
dot=dot, batched=batched)
def tensordot(a, b, axes=2):
"""
Compute a generalized dot product over provided axes.
Given two tensors a and b, tensordot computes a generalized dot product over
the provided axes. Theano's implementation reduces all expressions to
matrix or vector dot products and is based on code from Tijmen Tieleman's
gnumpy (http://www.cs.toronto.edu/~tijmen/gnumpy.html).
Parameters
----------
a: symbolic tensor
The first tensor variable.
b: symbolic tensor
The second tensor variable
axes: int or array-like of length 2
If an integer, the number of axes to sum over.
If an array, it must have two array elements containing the axes
to sum over in each tensor.
Note that the default value of 2 is not guaranteed to work
for all values of a and b, and an error will be raised if
that is the case. The reason for keeping the default is to
maintain the same signature as numpy's tensordot function
(and np.tensordot raises analogous errors for non-compatible
inputs).
If an integer i, it is converted to an array containing
the last i dimensions of the first tensor and the first
i dimensions of the second tensor:
axes = [list(range(a.ndim - i, b.ndim)), list(range(i))]
If an array, its two elements must contain compatible axes
of the two tensors. For example, [[1, 2], [2, 0]] means sum
over the 2nd and 3rd axes of a and the 3rd and 1st axes of b.
(Remember axes are zero-indexed!) The 2nd axis of a and the
3rd axis of b must have the same shape; the same is true for
the 3rd axis of a and the 1st axis of b.
Returns
-------
symbolic tensor
A tensor with shape equal to the concatenation of a's shape
(less any dimensions that were summed over) and b's shape
(less any dimensions that were summed over).
Examples
--------
It may be helpful to consider an example to see what tensordot does.
Theano's implementation is identical to NumPy's. Here a has shape (2, 3, 4)
and b has shape (5, 6, 4, 3). The axes to sum over are [[1, 2], [3, 2]] --
note that a.shape[1] == b.shape[3] and a.shape[2] == b.shape[2]; these axes
are compatible. The resulting tensor will have shape (2, 5, 6) -- the
dimensions that are not being summed:
>>> a = np.random.random((2,3,4))
>>> b = np.random.random((5,6,4,3))
#tensordot
>>> c = np.tensordot(a, b, [[1,2],[3,2]])
#loop replicating tensordot
>>> a0, a1, a2 = a.shape
>>> b0, b1, _, _ = b.shape
>>> cloop = np.zeros((a0,b0,b1))
#loop over non-summed indices -- these exist
#in the tensor product.
>>> for i in range(a0):
... for j in range(b0):
... for k in range(b1):
... #loop over summed indices -- these don't exist
... #in the tensor product.
... for l in range(a1):
... for m in range(a2):
... cloop[i,j,k] += a[i,l,m] * b[j,k,m,l]
>>> np.allclose(c, cloop)
true
This specific implementation avoids a loop by transposing a and b such that
the summed axes of a are last and the summed axes of b are first. The
resulting arrays are reshaped to 2 dimensions (or left as vectors, if
appropriate) and a matrix or vector dot product is taken. The result is
reshaped back to the required output dimensions.
In an extreme case, no axes may be specified. The resulting tensor
will have shape equal to the concatenation of the shapes of a and b:
>>> c = np.tensordot(a, b, 0)
>>> print(a.shape)
(2,3,4)
>>> print(b.shape)
(5,6,4,3)
>>> print(c.shape)
(2,3,4,5,6,4,3)
See the documentation of numpy.tensordot for more examples.
"""
return _tensordot_as_dot(a, b, axes, dot=dot, batched=False)
def outer(x, y):
"""Return vector-vector outer product.
If an input isn't a vector, we flatten it first.
"""
if x.ndim != 1:
x = x.flatten()
if y.ndim != 1:
y = y.flatten()
return dot(
x.dimshuffle(0, 'x'),
y.dimshuffle('x', 0))
def any(x, axis=None, keepdims=False):
out = elemwise.Any(axis)(x)
if keepdims:
out = makeKeepDims(x, out, axis)
return out
def all(x, axis=None, keepdims=False):
out = elemwise.All(axis)(x)
if keepdims:
out = makeKeepDims(x, out, axis)
return out
# Some NumPy version like 1.9.2 return a view for numpy.diagonal
x = numpy.zeros((4, 4))
numpy_diagonal_return_view = numpy.may_share_memory(numpy.diagonal(x), x)
del x
class Diagonal(Op):
"""Return specified diagonals.
Parameters
----------
x
A tensor variable with x.ndim >= 2.
Returns
-------
vector
A vector representing the diagonal elements.
"""
__props__ = ("offset", "axis1", "axis2")
def __init__(self, offset=0, axis1=0, axis2=1):
if numpy_diagonal_return_view:
self.view_map = {0: [0]}
self.offset = offset
self.axis1 = axis1
self.axis2 = axis2
def make_node(self, x):
x = as_tensor_variable(x)
assert x.ndim >= 2
return Apply(self, [x], [tensor(dtype=x.dtype,
broadcastable=[False] * (x.ndim - 1))])
def perform(self, node, inputs, outputs):
(x,) = inputs
(z,) = outputs
z[0] = x.diagonal(self.offset, self.axis1, self.axis2)
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
return [grad_not_implemented(self, 0, x)]
def infer_shape(self, node, shapes):
in_shape, = shapes
dim1 = in_shape[self.axis1]
dim2 = in_shape[self.axis2]
out_shape = [d for i, d in enumerate(in_shape)
if i not in (self.axis1, self.axis2)]
# The following logic is inspired by C code of PyArray_Diagonal().
offset = self.offset
if offset > 0:
diag_size = clip(dim2 - offset, 0, dim1)
elif offset < 0:
diag_size = clip(dim1 + offset, 0, dim2)
else:
diag_size = minimum(dim1, dim2)
out_shape.append(diag_size)
return [tuple(out_shape)]
def diagonal(a, offset=0, axis1=0, axis2=1):
if (offset, axis1, axis2) == (0, 0, 1):
return theano.tensor.nlinalg.extract_diag(a)
return Diagonal(offset, axis1, axis2)(a)
class Diag(Op):
__props__ = ()
def make_node(self, diag):
diag = as_tensor_variable(diag)
if diag.type.ndim != 1:
raise TypeError('data argument must be a vector', diag.type)
return Apply(self, [diag], [matrix(dtype=diag.dtype)])
def perform(self, node, inputs, outputs):
(z,) = outputs
z[0] = numpy.diag(inputs[0])
def grad(self, inputs, gout):
(gz,) = gout
return [diagonal(gz)]
def infer_shape(self, nodes, shapes):
return [(shapes[0][0],) * 2]
def diag(v, k=0):
if v.ndim == 1:
assert k == 0, "diagonals other than main are not implemented"
return Diag()(v)
elif v.ndim == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def stacklists(arg):
"""
Recursively stack lists of tensors to maintain similar structure.
This function can create a tensor from a shaped list of scalars:
Examples
--------
>>> from theano.tensor import stacklists, scalars, matrices
>>> from theano import function
>>> a, b, c, d = scalars('abcd')
>>> X = stacklists([[a, b], [c, d]])
>>> f = function([a, b, c, d], X)
>>> f(1, 2, 3, 4)
array([[ 1., 2.],
[ 3., 4.]], dtype=float32)
We can also stack arbitrarily shaped tensors. Here we stack matrices into
a 2 by 2 grid:
>>> from numpy import ones
>>> a, b, c, d = matrices('abcd')
>>> X = stacklists([[a, b], [c, d]])
>>> f = function([a, b, c, d], X)
>>> x = ones((4, 4), 'float32')
>>> f(x, x, x, x).shape
(2, 2, 4, 4)
"""
if isinstance(arg, (tuple, list)):
return stack(list(map(stacklists, arg)))
else:
return arg
def ptp(a, axis=None):
"""
Range of values (maximum - minimum) along an axis.
The name of the function comes from the acronym for peak to peak.
Parameters
----------
a
Input tensor.
axis
Axis along which to find the peaks. By default, flatten the array.
Returns
-------
array
A new array holding the result.
"""
a = as_tensor_variable(a)
out = max(a, axis) - min(a, axis)
return out
def power(x, y):
return x ** y
def swapaxes(y, axis1, axis2):
"swap axes of inputted tensor"
y = as_tensor_variable(y)
ndim = y.ndim
li = list(range(0, ndim))
li[axis1], li[axis2] = li[axis2], li[axis1]
return y.dimshuffle(li)
def choose(a, choices, out=None, mode='raise'):
"""
Construct an array from an index array and a set of arrays to choose from.
First of all, if confused or uncertain, definitely look at the Examples -
in its full generality, this function is less simple than it might seem
from the following code description (below ndi = numpy.lib.index_tricks):
np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)]).
But this omits some subtleties. Here is a fully general summary:
Given an ``index`` array (a) of integers and a sequence of n arrays
(choices), a and each choice array are first broadcast, as necessary,
to arrays of a common shape; calling these Ba and
Bchoices[i], i = 0,...,n-1 we have that, necessarily,
Ba.shape == Bchoices[i].shape for each i.
Then, a new array with shape Ba.shape is created as follows:
- if mode=raise (the default), then, first of all, each element of a
(and thus Ba) must be in the range [0, n-1]; now, suppose that
i (in that range) is the value at the (j0, j1, ..., jm) position in Ba -
then the value at the same position in the new array is the value in
Bchoices[i] at that same position;
- if mode=wrap, values in a (and thus Ba) may be any (signed) integer;
modular arithmetic is used to map integers outside the range [0, n-1]
back into that range; and then the new array is constructed as above;
- if mode=clip, values in a (and thus Ba) may be any (signed) integer;
negative integers are mapped to 0; values greater than n-1 are mapped
to n-1; and then the new array is constructed as above.
Parameters
----------
a : int array
This array must contain integers in [0, n-1], where n is the number of
choices, unless mode=wrap or mode=clip, in which cases any integers
are permissible.
choices : sequence of arrays
Choice arrays. a and all of the choices must be broadcastable to
the same shape. If choices is itself an array (not recommended),
then its outermost dimension (i.e., the one corresponding to
choices.shape[0]) is taken as defining the ``sequence``.
out : array, optional
If provided, the result will be inserted into this array.
It should be of the appropriate shape and dtype.
mode : {``raise`` (default), ``wrap``, ``clip``}, optional
Specifies how indices outside [0, n-1] will be treated:
``raise`` : an exception is raised
``wrap`` : value becomes value mod n
``clip`` : values < 0 are mapped to 0, values > n-1 are mapped to n-1
Returns
-------
merged_array - array
The merged result.
Raises
------
ValueError - shape mismatch
If a and each choice array are not all broadcastable to the same shape.
"""
# This is done to keep the same function signature then NumPy.
assert out is None
return Choose(mode)(a, choices)
class Choose(Op):
__props__ = ('mode',)
def __init__(self, mode):
assert mode in ("raise", "wrap", "clip")
self.mode = mode
def infer_shape(self, node, shapes):
if isinstance(node.inputs[1], TensorVariable):
# We have padded node.inputs[0] to the right number of
# dimensions for the output
l = []
for sh1, sh2, b1 in zip(shapes[0],
shapes[1][1:],
node.inputs[0].broadcastable):
if b1:
l.append(sh2)
else:
l.append(sh1)
return [tuple(l)]
else:
import theano.typed_list
assert isinstance(node.inputs[1],
theano.typed_list.TypedListVariable)
raise ShapeError("Case not implemented")
shape = shapes[0]
for i in xrange(len(shapes[0]) - 1):
shape[i] = shapes[1][i]
return [(shape)]
def make_node(self, a, choices):
# Import here as it isn't imported by default and we can't
# import at the top as it would cause circular import.
import theano.typed_list
a = as_tensor_variable(a)
if a.dtype not in theano.tensor.discrete_dtypes:
raise TypeError(
'choose first argument must have an [u]int* dtype. Got %s.'
% a.dtype)
if isinstance(choices, (tuple, list,
theano.typed_list.TypedListVariable)):
choice = theano.typed_list.make_list(choices)
choice_ndim = choice.ttype.ndim
choice_bcast = choice.ttype.broadcastable
else:
choice = as_tensor_variable(choices)
choice_ndim = choice.ndim - 1
choice_bcast = choice.broadcastable[1:]
out_ndim = numpy.max([a.ndim, choice_ndim])
# Make explicit all added broadcastable dimensions.
a = shape_padleft(a, out_ndim - a.ndim)
if len(choice_bcast) != out_ndim:
if isinstance(choice.type, TensorType):
choice = choice.dimshuffle(0,
*(('x',) * (out_ndim - choice_ndim) +
tuple(range(1, choice.ndim))))
choice_ndim = choice.ndim - 1
choice_bcast = choice.broadcastable[1:]
else:
raise NotImplementedError(
"We currently didn't implemented that case. "
"To make it work, explicitly add dimensions "
"of size one for dimensions that will be broadcasted")
bcast = [False] * out_ndim
for idx, (b1, b2) in enumerate(
zip(a.broadcastable,
(True,) * (out_ndim - choice_ndim) + choice_bcast)):
if b1 and b2:
bcast[idx] = True
o = TensorType(choice.dtype, bcast)
return Apply(self, [a, choice], [o()])
def perform(self, node, inputs, outputs):
(z,) = outputs
a = inputs[0]
choice = inputs[1]
# TODO reuse out?
z[0] = numpy.choose(a, choice, mode=self.mode)
class AllocEmpty(gof.Op):
"""Implement Alloc on the cpu, but without initializing memory."""
__props__ = ("dtype",)
# specify the type of the data
def __init__(self, dtype):
assert isinstance(dtype, str)
self.dtype = dtype.lower()
def validate_shape(self, shape):
sh = [as_tensor_variable(s) for s in shape]
bcast = []
for s in sh:
if s.type.dtype[:3] not in ('int', 'uin'):
raise TypeError('Shape arguments must be integers', s)
# if s is constant 1, then we're broadcastable in that dim
try:
const_shp = get_scalar_constant_value(s)
except NotScalarConstantError:
const_shp = None
bcast.append(1 == const_shp)
otype = TensorType(dtype=self.dtype, broadcastable=bcast)
output = otype()
return sh, output
def make_node(self, *shape):
shape, output = self.validate_shape(shape)
output.tag.values_eq_approx = values_eq_approx_always_true
# The outut can contain nan/inf. output.type is a new
# instance, so we can do this only for that variable.
output.type.filter_checks_isfinite = False
return Apply(self, shape, [output])
def perform(self, node, inputs, out_):
out, = out_
sh = tuple([int(i) for i in inputs])
if out[0] is None or out[0].shape != sh:
out[0] = numpy.empty(sh, dtype=self.dtype)
def c_code(self, node, name, inputs, out_, sub):
dtype = "NPY_" + self.dtype.upper()
out, = out_
fail = sub['fail']
shps = inputs
nd = len(shps)
str = "npy_intp dims[%(nd)s];\n" % locals()
for idx, sh in enumerate(shps):
str += "dims[%(idx)s] =" \
"((npy_intp)((dtype_%(sh)s*)" \
" PyArray_DATA(%(sh)s))[0]);\n" % locals()
# Validate that the output storage exists
str += "if(%(out)s==NULL\n" % locals()
for idx, sh in enumerate(shps):
str += "||PyArray_DIMS(%(out)s)[%(idx)s]!=dims[%(idx)s]" % locals()
str += """){
/* Reference received to invalid output variable.
Decrease received reference's ref count and allocate new
output variable */
Py_XDECREF(%(out)s);
%(out)s = (PyArrayObject*)PyArray_EMPTY(%(nd)s,
dims,
%(dtype)s,
0);
if (!%(out)s)
{
PyErr_SetString(PyExc_MemoryError, "alloc failed");
%(fail)s;
}
}
""" % locals()
return str
def infer_shape(self, node, input_shapes):
return [node.inputs]
def c_code_cache_version(self):
return (3,)
def do_constant_folding(self, node):
return False
def connection_pattern(self, node):
return [[False] for i in node.inputs]
def grad(self, inputs, grads):
return [DisconnectedType()() for i in inputs]
def R_op(self, inputs, eval_points):
return [zeros(inputs, self.dtype)]
| {
"repo_name": "cmdunkers/DeeperMind",
"path": "PythonEnv/lib/python2.7/site-packages/theano/tensor/basic.py",
"copies": "1",
"size": "205429",
"license": "bsd-3-clause",
"hash": 8055024834781171000,
"line_mean": 32.2140662894,
"line_max": 121,
"alpha_frac": 0.5674028496,
"autogenerated": false,
"ratio": 3.846985018726592,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9913254187047181,
"avg_score": 0.00022673625588210563,
"num_lines": 6185
} |
"""A `Type` and `Op` classes to work with numpy.ndarrays symbolically."""
import sys
import warnings
import numpy
from six.moves import xrange
import numbers
import theano
from theano.compat import izip
from theano.configparser import config
from theano import gof
from theano.gof import Apply, Constant, Op, Variable
from theano.tensor import elemwise
from theano.tensor.var import (AsTensorError, TensorVariable,
TensorConstant,
_tensor_py_operators)
from theano.tensor.type import TensorType, values_eq_approx_always_true
from theano.tensor.type_other import NoneConst
from theano import scalar as scal
from functools import partial
from six import integer_types
from theano import compile, printing
from theano.printing import pprint, min_informative_str
# For history
from theano.compile import Rebroadcast, Shape, shape
# We use these exceptions as well.
import theano.scalar.sharedvar
from theano.gradient import grad_undefined
from theano.gradient import grad_not_implemented
from theano.gradient import DisconnectedType
# set up the external interface
from theano.tensor.elemwise import Elemwise, DimShuffle, CAReduce, Sum
import logging
_logger = logging.getLogger("theano.tensor.basic")
__docformat__ = "restructuredtext en"
# This is needed as we will hide it later
python_complex = complex
python_any = any
python_all = all
# Define common subsets of dtypes (as strings).
complex_dtypes = list(map(str, scal.complex_types))
continuous_dtypes = list(map(str, scal.continuous_types))
float_dtypes = list(map(str, scal.float_types))
discrete_dtypes = list(map(str, scal.discrete_types))
all_dtypes = list(map(str, scal.all_types))
int_dtypes = list(map(str, scal.int_types))
uint_dtypes = list(map(str, scal.uint_types))
class ShapeError(Exception):
"""Raised when the shape cannot be computed."""
pass
def check_equal_numpy(x, y):
"""
Returns True iff x and y are equal (checks the dtype and
shape if x and y are numpy.ndarray instances).
"""
if isinstance(x, numpy.ndarray) and isinstance(y, numpy.ndarray):
return (x.dtype == y.dtype and x.shape == y.shape and
numpy.any(abs(x - y) < 1e-10))
elif (isinstance(x, numpy.random.RandomState) and
isinstance(y, numpy.random.RandomState)):
return python_all(numpy.all(a == b) for a, b in
izip(x.__getstate__(), y.__getstate__()))
else:
return x == y
compile.register_checker(check_equal_numpy)
__oplist_constructor_list = []
"""List of functions to be listed as op constructors in the oplist
(`gen_oplist`, doc/oplist.txt)."""
def constructor(f):
"""Add `f` to :doc:`oplist`.
Make `f` appear as a constructor in the oplist (`gen_oplist`,
doc/oplist.txt).
"""
__oplist_constructor_list.append(f)
return f
def __oplist_tag(thing, tag):
tags = getattr(thing, '__oplist_tags', [])
tags.append(tag)
thing.__oplist_tags = tags
if 0:
# this starts to feel like we're enumerating all the types
# the one place where this is used we should also allow for sparse
# variables
# - JB 20100226
def as_cuda_or_tensor_variable(x, name=None, ndim=None):
"""
This function do the same as_tensor_variable, but don't
transfert the value on the gpu
"""
if hasattr(x, '_as_CudaNdarrayVariable'):
# TODO: pass name and ndim arguments
return x._as_CudaNdarrayVariable()
return as_tensor_variable(x, name, ndim)
def as_tensor_variable(x, name=None, ndim=None):
"""Return `x`, transformed into a `TensorType`
This function is often used by `make_node` methods of `Op`
subclasses to turn ndarrays, numbers, `Scalar` instances, `Apply`
instances and `TensorType` instances into valid input list
elements.
:Parameters:
- `x`: Apply instance, Variable instance, numpy.ndarray, or number
This thing will be transformed into a `Variable` in a sensible way. An
ndarray argument will not be copied, but a list of numbers will be
copied to make an ndarray.
- `name`: str or None
If a new `Variable` instance is created, it will be named with this
string.
- `ndim`: None or integer
Return a Variable with this many dimensions. Raise TypeError if it's
not possible.
:Exceptions:
- `ValueError`: raised if an `Apply` with more then one output is fetched
- `AsTensorError`: raised if `x` cannot be converted to a TensorType
Variable
"""
if hasattr(x, '_as_TensorVariable'):
return x._as_TensorVariable() # TODO: pass name and ndim arguments
if isinstance(x, gof.Apply):
# use Apply's default output mechanism
if (x.op.default_output is None) and (len(x.outputs) != 1):
raise ValueError(
"It is ambiguous which output of a multi-output Op has"
" to be fetched.", x)
x = x.default_output()
if isinstance(x, Variable):
if isinstance(x.type, scal.Scalar):
x = tensor_from_scalar(x)
if not isinstance(x.type, TensorType):
raise AsTensorError(
"Variable type field must be a TensorType.", x, x.type)
if ndim is None:
return x
else:
if (x.type.ndim > ndim):
# strip off leading broadcastable dimensions
first_non_broadcastable = [idx for idx in xrange(x.ndim)
if not x.broadcastable[idx]][0]
x = x.dimshuffle(list(range(x.ndim))[first_non_broadcastable:])
if x.ndim > ndim:
raise ValueError(
'TensorType could not be cast to have %i dimensions'
% ndim, x.type
)
return x
elif (x.type.ndim < ndim):
return shape_padleft(x, n_ones=(ndim - x.type.ndim))
else:
return x
if isinstance(x, (tuple, list)) and python_any(isinstance(xi, Variable)
for xi in x):
try:
return stack(*x)
except (TypeError, ValueError):
pass
if isinstance(x, bool):
raise AsTensorError(
"Cannot cast True or False as a tensor variable. Please use 1 or "
"0. This error might be caused by using the == operator on "
"Variables. v == w does not do what you think it does, "
"use theano.tensor.eq(v, w) instead.")
try:
return constant(x, name=name, ndim=ndim)
except TypeError:
try:
str_x = str(x)
except Exception:
str_x = repr(x)
raise AsTensorError("Cannot convert %s to TensorType" % str_x, type(x))
# this has a different name, because _as_tensor_variable is the
# function which ops use to upcast their arguments... this
# internal-use function is a good place to put debugging stuff, better
# than the global astensor.
_as_tensor_variable = as_tensor_variable
as_tensor = as_tensor_variable
class NumpyAutocaster(object):
"""
This class is used to cast python ints and floats to numpy arrays.
The behavior when called on scalar `x` depends on `config.cast_policy`:
- 'numpy' will simply use the same type as found by `numpy.asarray(x)`.
- 'numpy+floatX' will do the same, except it will use float32 instead
of float64 if `x` is a Python float and `config.floatX` is set to
'float32' (note that if `x` is a numpy scalar whose data type is
float64, it is not modified since we assume the user is purposedly
using float64).
- 'custom' lets one define a tuple of data types such that:
- if `x` is already a numpy scalar and its data type is in this
tuple, then it is returned unchanged;
- otherwise, the first data type in this tuple that can represent
`x` without loss of precision will be used, unless `x` is a float
and 'float32' is in the tuple (in which case `x` is cast as a
float32);
- if no data type can represent `x` without loss of precision, then
the last data type in the tuple will be used.
"""
def __init__(self, dtypes):
"""
Constructor.
:type dtypes: Tuple of strings.
:param dtypes: The ordered list of preferred data types (only used when
`config.cast_policy` is set to 'custom', see the `NumpyAutocaster` help
for details).
"""
self.dtypes = tuple(dtypes)
def __call__(self, x):
# Make sure we only deal with scalars.
assert (isinstance(x, integer_types) or
isinstance(x, float) or
(isinstance(x, numpy.ndarray) and x.ndim == 0))
if config.cast_policy == 'numpy':
return numpy.asarray(x)
elif config.cast_policy == 'numpy+floatX':
rval = numpy.asarray(x)
if ((not hasattr(x, 'dtype') and
rval.dtype in ('float64', 'float32') and
rval.dtype != config.floatX)):
rval = theano._asarray(rval, dtype=config.floatX)
return rval
# The following is the original code, corresponding to the 'custom'
# option for `config.cast_policy`.
assert config.cast_policy == 'custom'
try:
# Pass through numpy scalars, since they are already typed on
# purpose typically.
if str(x.dtype) in self.dtypes:
# No need to cast `x` into a new dtype. Note that we still
# need to convert it into an array, because it may not be
# one already (e.g. if x == numpy.float64(1.1)).
return numpy.asarray(x)
except AttributeError:
# Means `x` has no 'dtype' attribute.
pass
# unsafe downcast of float64 variables when config.floatX == 'float32'
# recall: float is numpy.float
if ((isinstance(x, float) and
config.floatX in self.dtypes and
config.floatX != 'float64')):
return theano._asarray(x, dtype=config.floatX)
# Don't autocast to float16 unless config.floatX is float16
try_dtypes = [d for d in self.dtypes
if config.floatX == 'float16' or d != 'float16']
for dtype in try_dtypes:
x_ = theano._asarray(x, dtype=dtype)
if numpy.all(x == x_):
break
# returns either an exact x_==x, or the last cast x_
return x_
autocast_int = NumpyAutocaster(('int8', 'int16', 'int32', 'int64'))
autocast_float = NumpyAutocaster(('float16', 'float32', 'float64'))
# autocast_float dtypes might be manipulated in tensor.__init__
#
# Note: it's a bit weird for a compiler to automatically downcast
# literals like this, and it might have implications for efficiency
# when mixing types. For example when you add 1.0 + dmatrix(), the
# 1.0 could be converted to float32, and require upcasting for the +
# operation at every position in the dmatrix. using
# theano._asarray(1.0, dtype='float64') will circumvent this
# autocasting, and in future, our ops might be smarter about factoring
# out upcasts. The advantage of this mechanism is to combine it with
# floatX so that 1.0 + xmatrix() will always have the same type as the
# xmatrix().
#
class autocast_float_as(object):
"""
This class makes it possible to temporarily and locally adjust autocasting
behavior when `config.cast_policy` is set to 'custom'.
If `config.cast_policy` is not 'custom', an exception is raised.
For example:
>>> with autocast_float_as('float32'):
... assert (fvector() + 1.1).dtype == 'float32' # temporary downcasting
>>> assert (fvector() + 1.1).dtype == 'float64' # back to default behaviour
This class might be convenient in some code, but it definitely
helps to test the autocasting mechanism.
"""
def __init__(self, *dtypes):
self.dtypes = dtypes
assert config.cast_policy == 'custom'
def __enter__(self):
assert config.cast_policy == 'custom'
self.old_dtypes = autocast_float.dtypes
autocast_float.dtypes = self.dtypes
def __exit__(self, *args):
assert config.cast_policy == 'custom'
autocast_float.dtypes = self.old_dtypes
def constant_or_value(x, rtype, name=None, ndim=None, dtype=None):
"""Return a symbolic `Constant` with value `x`
:Exceptions:
- `TypeError`: `x` could not be converted to a numpy.ndarray
- `ValueError`: `x` could not be expanded to have ndim dimensions
"""
if dtype is not None:
# in this case, the semantics are that the caller is forcing the dtype
x_ = theano._asarray(x, dtype=dtype)
else:
# In this case, this function should infer the dtype according to the
# autocasting rules. See autocasting above.
x_ = None
if rtype is TensorConstant and isinstance(x, integer_types):
try:
x_ = autocast_int(x)
except OverflowError:
# This is to imitate numpy behavior which tries to fit
# bigger numbers into a uint64.
x_ = theano._asarray(x, dtype='uint64')
elif rtype is TensorConstant and isinstance(x, float):
x_ = autocast_float(x)
elif isinstance(x, numpy.ndarray):
x_ = x
# Currently we do not have a bool dtype in Theano.
# So we upcast it to uint8 to avoid breaking our interface for
# constant.
if x.dtype == 'bool':
x_ = numpy.asarray(x_, dtype='uint8')
else:
# Here x is probably a list or a tuple. If it contains a long,
# we will behave like the current NumPy version: 1.7 and below,
# it will only work if the long fits in int64. For NumPy 1.7.1+,
# it will work if the long fits in int64 or uint64.
x_ = numpy.asarray(x)
assert type(x_) in [numpy.ndarray, numpy.memmap]
bcastable = [d == 1 for d in x_.shape]
if ndim is not None:
if len(bcastable) < ndim:
bcastable = [True] * (ndim - len(bcastable)) + bcastable
elif len(bcastable) > ndim:
# TODO: strip off dimensions of size 1
raise ValueError(
'ndarray could not be cast to constant with %i dimensions' %
ndim)
assert len(bcastable) == ndim
try:
if rtype is TensorConstant:
rval = rtype(
TensorType(dtype=x_.dtype, broadcastable=bcastable),
x_.copy(),
name=name)
return rval
else:
# leave the shape out of the type
return rtype(TensorType(dtype=x_.dtype, broadcastable=bcastable),
x_, name=name)
except Exception:
raise TypeError("Could not convert %s to TensorType" % x, type(x))
def constant(x, name=None, ndim=None, dtype=None):
ret = constant_or_value(x, rtype=TensorConstant, name=name, ndim=ndim,
dtype=dtype)
# We create a small cache of frequently used constant.
# This speed up the Merge optimization for big graph.
# We want to cache all scalar to don't merge as frequently constants.
# But we don't want to cache too much stuff
# So we cache integer with dtype [u]int and float where the value is
# between -10 and 10
# We want to cache all broadcast pattern for scalar.
if not constant.enable:
return ret
sig = ret.signature()
if (sig not in constant_cache and ret.data.size == 1 and
ret.data <= 10 and ret.data >= -10 and
(ret.dtype in int_dtypes or ret.dtype in uint_dtypes or
(ret.dtype in float_dtypes and int(ret.data) == ret.data))):
constant_cache[sig] = ret
# This is needed to raise a good error to the user.
ret.cached = True
return constant_cache.get(sig, ret)
constant.enable = True
constant_cache = {}
def _obj_is_wrappable_as_tensor(x):
try:
constant(x)
return True
except TypeError:
return False
if int(config.tensor.cmp_sloppy) > 1:
# This config variable is a quick-and-dirty way to get low-precision
# comparisons. For a more precise setting of these tolerances set
# them explicitly in your user code by assigning, for example,
# "theano.tensor.basic.float32_atol = ..."
# When config.tensor.cmp_sloppy>1 we are even more sloppy. This is
# useful to test the GPU as they don't use extended precision and
# this cause some difference bigger then the normal sloppy.
float32_atol = 5e-4
float32_rtol = 1e-3
float64_rtol = 1e-4
float64_atol = 1e-3
elif int(config.tensor.cmp_sloppy):
float32_atol = 1e-4
float32_rtol = 1e-3
float64_rtol = 1e-4
float64_atol = 1e-3
else:
# If you change those value in test don't forget to put them back
# when the test end. Don't forget the case when the test fail.
float32_atol = 1e-5
float32_rtol = 1e-5
# defaults in numpy.allclose
float64_rtol = 1.0000000000000001e-05
float64_atol = 1e-8
# more strict. Atleast float32 precision.
float64_rtol = 1.0000000000000001e-06
def _allclose(a, b, rtol=None, atol=None):
a = numpy.asarray(a)
b = numpy.asarray(b)
narrow = 'float32', 'complex64'
if (str(a.dtype) in narrow) or (str(b.dtype) in narrow):
atol_ = float32_atol
rtol_ = float32_rtol
else:
atol_ = float64_atol
rtol_ = float64_rtol
if rtol is not None:
rtol_ = rtol
if atol is not None:
atol_ = atol
# Work around bug in Numpy, see http://projects.scipy.org/numpy/ticket/1684
if str(b.dtype) in int_dtypes and (numpy.absolute(b) < 0).any():
b = theano._asarray(b, dtype='float64')
return numpy.allclose(a, b, atol=atol_, rtol=rtol_)
class NotScalarConstantError(Exception):
"""
Raised by get_scalar_constant_value if called on something that is
not a scalar constant.
"""
class EmptyConstantError(NotScalarConstantError):
"""
Raised by get_scalar_const_value if called on something that is a
zero dimensional constant.
"""
def numpy_scalar(data):
""" Return a scalar stored in a numpy ndarray, or raise
NotScalarConstantError if the numpy ndarray is not a scalar
"""
# handle case where data is numpy.array([])
if (data.ndim > 0 and
(len(data.shape) == 0 or
__builtins__['max'](data.shape) == 0)):
assert numpy.all(numpy.array([]) == data)
raise EmptyConstantError()
try:
numpy.complex(data) # works for all numeric scalars
return data
except Exception:
raise NotScalarConstantError(
'v.data is non-numeric, non-scalar, or has more than one'
' unique value', data)
get_scalar_constant_value_elemwises = (
scal.Cast, scal.Switch,
scal.NEQ, scal.EQ,
scal.LT, scal.GT, scal.LE, scal.GE,
scal.Sub, scal.Add, scal.Mod, scal.Mul,
scal.IntDiv, scal.TrueDiv, scal.Minimum, scal.Maximum)
def get_scalar_constant_value(orig_v, elemwise=True,
only_process_constants=False):
"""return the constant scalar(0-D) value underlying variable `v`
If v is the output of dimshuffles, fills, allocs, rebroadcasts,
cast, OutputGuard, DeepCopyOp, ScalarFromTensor, ScalarOp,
Elemwise and some pattern with Subtensor,
this function digs through them.
If `v` is not some view of constant scalar data, then raise a
NotScalarConstantError.
:param elemwise: If False, we won't try to go into elemwise.
So this call is faster.
:param only_process_constants: If True, we only attempt to obtain
the value of `orig_v` if it's directly constant and don't
try to dig through dimshuffles, fills, allocs, and other to figure
out its value.
:note: There may be another function similar to this one in the
code, but I'm not sure where it is.
"""
v = orig_v
while True:
if v is None:
# None is not a scalar (and many uses of this function seem
# to depend on passing it None)
raise NotScalarConstantError()
if isinstance(v, (numpy.integer, int, float)):
return numpy.asarray(v)
if isinstance(v, numpy.ndarray):
return numpy_scalar(v)
if isinstance(v, Constant):
if getattr(v.tag, 'unique_value', None) is not None:
data = v.tag.unique_value
else:
data = v.data
return numpy_scalar(data)
if not only_process_constants and getattr(v, 'owner', None):
if isinstance(v.owner.op, (Alloc, DimShuffle, Rebroadcast,
compile.ops.OutputGuard,
compile.DeepCopyOp)):
v = v.owner.inputs[0]
continue
elif isinstance(v.owner.op, theano.compile.ops.Shape_i):
if isinstance(v.owner.inputs[0], Constant):
return numpy.asarray(
v.owner.inputs[0].data.shape[v.owner.op.i])
# Don't act as the constant_folding optimization here as this
# fct is used too early in the optimization phase. This would
# mess with the stabilization optimization and be too slow.
# We put all the scalar Ops used by get_canonical_form_slice()
# to allow it to determine the broadcast pattern correctly.
elif isinstance(v.owner.op, (ScalarFromTensor, TensorFromScalar)):
return get_scalar_constant_value(v.owner.inputs[0])
elif isinstance(v.owner.op, scal.ScalarOp):
if isinstance(v.owner.op, scal.Second):
# We don't need both input to be constant for second
shp, val = v.owner.inputs
v = val
continue
if isinstance(v.owner.op, get_scalar_constant_value_elemwises):
const = [get_scalar_constant_value(i)
for i in v.owner.inputs]
ret = [[None]]
v.owner.op.perform(v.owner, const, ret)
return ret[0][0]
elif elemwise and isinstance(v.owner.op, Elemwise):
if isinstance(v.owner.op.scalar_op, scal.Second):
# We don't need both input to be constant for second
shp, val = v.owner.inputs
v = val
continue
elif isinstance(v.owner.op.scalar_op,
get_scalar_constant_value_elemwises):
const = [get_scalar_constant_value(i)
for i in v.owner.inputs]
ret = [[None]]
v.owner.op.perform(v.owner, const, ret)
return ret[0][0]
elif (isinstance(v.owner.op, theano.tensor.subtensor.Subtensor) and
v.ndim == 0):
if isinstance(v.owner.inputs[0], TensorConstant):
cdata = tuple(v.owner.op.get_constant_idx(v.owner.inputs))
try:
return v.owner.inputs[0].data.__getitem__(cdata)
except IndexError:
raise IndexError(
str(tuple(v.owner.op.idx_list)) +
" is not a valid index into " +
str(v.owner.inputs[0].data))
# The index list 'idx_list' should have length the same
# shape as the input.
# TODO: implement the case where we take a scalar in a matrix
assert len(v.owner.op.idx_list) == v.owner.inputs[0].ndim
# Needed to make better graph in this test in
# theano/tensor/tests/test_sharedvar.py:
# test_shared_options.test_specify_shape_partial
if ((v.owner.inputs[0].owner and
isinstance(v.owner.inputs[0].owner.op, Join) and
len(v.owner.op.idx_list) == 1)):
# Ensure the Join is joining only scalar variables (so that
# the constant value can be found at the same index as the
# one used in the sub-tensor).
if python_all(var.ndim == 0 for var in
v.owner.inputs[0].owner.inputs[1:]):
idx = v.owner.op.idx_list[0]
if isinstance(idx, gof.Type):
idx = get_scalar_constant_value(v.owner.inputs[1])
# Note the '+ 1' is because the first argument to Join
# is the axis.
ret = v.owner.inputs[0].owner.inputs[idx + 1]
ret = get_scalar_constant_value(ret)
# join can cast implicitly its input in some case.
return theano._asarray(ret, dtype=v.type.dtype)
if python_all(var.ndim == 1 for var in
v.owner.inputs[0].owner.inputs[1:]):
idx = v.owner.op.idx_list[0]
if isinstance(idx, gof.Type):
idx = get_scalar_constant_value(v.owner.inputs[1])
try:
# TODO: assert joined axis is 0.
length = 0
for joined in v.owner.inputs[0].owner.inputs[1:]:
ll = get_vector_length(joined)
if idx < length + ll:
return get_scalar_constant_value(
joined[idx - length])
length += ll
except TypeError:
pass
except ValueError:
pass
elif (v.owner.inputs[0].owner and
isinstance(v.owner.inputs[0].owner.op,
theano.tensor.opt.MakeVector) and
# MakeVector normally accept only scalar as input.
# We put this check in case there is change in the future
python_all(var.ndim == 0 for var in
v.owner.inputs[0].owner.inputs) and
len(v.owner.op.idx_list) == 1):
idx = v.owner.op.idx_list[0]
if isinstance(idx, gof.Type):
idx = get_scalar_constant_value(v.owner.inputs[1])
# Python 2.4 does not support indexing with numpy.integer
# So we cast it.
idx = int(idx)
ret = v.owner.inputs[0].owner.inputs[idx]
ret = get_scalar_constant_value(ret)
# MakeVector can cast implicitly its input in some case.
return theano._asarray(ret, dtype=v.type.dtype)
# This is needed when we take the grad as the Shape op
# are not already changed into MakeVector
owner = v.owner
leftmost_parent = owner.inputs[0]
if (leftmost_parent.owner and
isinstance(leftmost_parent.owner.op,
theano.tensor.Shape)):
op = owner.op
idx_list = op.idx_list
idx = idx_list[0]
if isinstance(idx, gof.Type):
idx = get_scalar_constant_value(owner.inputs[1])
grandparent = leftmost_parent.owner.inputs[0]
gp_broadcastable = grandparent.type.broadcastable
ndim = grandparent.type.ndim
if grandparent.owner and isinstance(grandparent.owner.op,
Rebroadcast):
l = []
for idx, (b1, b2) in enumerate(
zip(grandparent.owner.inputs[0].broadcastable,
gp_broadcastable)):
l.append(b1 or b2)
gp_broadcastable = tuple(l)
assert ndim == len(gp_broadcastable)
if not (idx < len(gp_broadcastable)):
msg = ("get_scalar_constant_value detected " +
"deterministic IndexError: x.shape[%d] " +
"when x.ndim=%d.") % (ndim, idx)
if config.exception_verbosity == 'high':
msg += 'x=%s' % min_informative_str(v)
else:
msg += 'x=%s' % str(v)
raise ValueError(msg)
if gp_broadcastable[idx]:
return numpy.asarray(1)
raise NotScalarConstantError(v)
# Easy constructors
def tensor(*args, **kwargs):
name = kwargs.pop('name', None)
return TensorType(*args, **kwargs)(name=name)
def _multi(*fns):
def f2(f, *names):
if names and isinstance(names[0], int):
if names == 1:
return f()
else:
return [f() for i in xrange(names[0])]
if isinstance(names, tuple):
if len(names) == 1:
names = names[0]
if len(names) == 1:
return f(names)
else:
return [f(name) for name in names]
if len(fns) == 1:
return partial(f2, fns)
else:
return [partial(f2, f) for f in fns]
cscalar = TensorType('complex64', ())
zscalar = TensorType('complex128', ())
fscalar = TensorType('float32', ())
dscalar = TensorType('float64', ())
bscalar = TensorType('int8', ())
wscalar = TensorType('int16', ())
iscalar = TensorType('int32', ())
lscalar = TensorType('int64', ())
def scalar(name=None, dtype=None):
"""Return a symbolic scalar variable.
:param dtype: numeric type (None means to use theano.config.floatX)
:param name: a name to attach to this variable
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, ())
return type(name)
scalars, fscalars, dscalars, iscalars, lscalars = _multi(
scalar, fscalar, dscalar, iscalar, lscalar)
int_types = bscalar, wscalar, iscalar, lscalar
float_types = fscalar, dscalar
complex_types = cscalar, zscalar
int_scalar_types = int_types
float_scalar_types = float_types
complex_scalar_types = complex_types
cvector = TensorType('complex64', (False, ))
zvector = TensorType('complex128', (False, ))
fvector = TensorType('float32', (False, ))
dvector = TensorType('float64', (False, ))
bvector = TensorType('int8', (False,))
wvector = TensorType('int16', (False,))
ivector = TensorType('int32', (False, ))
lvector = TensorType('int64', (False, ))
def vector(name=None, dtype=None):
"""Return a symbolic vector variable.
:param dtype: numeric type (None means to use theano.config.floatX)
:param name: a name to attach to this variable
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, (False, ))
return type(name)
vectors, fvectors, dvectors, ivectors, lvectors = _multi(
vector, fvector, dvector, ivector, lvector)
int_vector_types = bvector, wvector, ivector, lvector
float_vector_types = fvector, dvector
complex_vector_types = cvector, zvector
cmatrix = TensorType('complex64', (False, False))
zmatrix = TensorType('complex128', (False, False))
fmatrix = TensorType('float32', (False, False))
dmatrix = TensorType('float64', (False, False))
bmatrix = TensorType('int8', (False, False))
wmatrix = TensorType('int16', (False, False))
imatrix = TensorType('int32', (False, False))
lmatrix = TensorType('int64', (False, False))
def matrix(name=None, dtype=None):
"""Return a symbolic matrix variable.
:param dtype: numeric type (None means to use theano.config.floatX)
:param name: a name to attach to this variable
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, (False, False))
return type(name)
matrices, fmatrices, dmatrices, imatrices, lmatrices = _multi(
matrix, fmatrix, dmatrix, imatrix, lmatrix)
int_matrix_types = bmatrix, wmatrix, imatrix, lmatrix
float_matrix_types = fmatrix, dmatrix
complex_matrix_types = cmatrix, zmatrix
crow = TensorType('complex64', (True, False))
zrow = TensorType('complex128', (True, False))
frow = TensorType('float32', (True, False))
drow = TensorType('float64', (True, False))
brow = TensorType('int8', (True, False))
wrow = TensorType('int16', (True, False))
irow = TensorType('int32', (True, False))
lrow = TensorType('int64', (True, False))
def row(name=None, dtype=None):
"""Return a symbolic row variable (ndim=2, broadcastable=[True,False]).
:param dtype: numeric type (None means to use theano.config.floatX)
:param name: a name to attach to this variable
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, (True, False))
return type(name)
rows, frows, drows, irows, lrows = _multi(row, frow, drow, irow, lrow)
ccol = TensorType('complex64', (False, True))
zcol = TensorType('complex128', (False, True))
fcol = TensorType('float32', (False, True))
dcol = TensorType('float64', (False, True))
bcol = TensorType('int8', (False, True))
wcol = TensorType('int16', (False, True))
icol = TensorType('int32', (False, True))
lcol = TensorType('int64', (False, True))
def col(name=None, dtype=None):
"""Return a symbolic column variable (ndim=2, broadcastable=[False,True]).
:param dtype: numeric type (None means to use theano.config.floatX)
:param name: a name to attach to this variable
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, (False, True))
return type(name)
cols, fcols, dcols, icols, lcols = _multi(col, fcol, dcol, icol, lcol)
ctensor3 = TensorType('complex64', ((False,) * 3))
ztensor3 = TensorType('complex128', ((False,) * 3))
ftensor3 = TensorType('float32', ((False,) * 3))
dtensor3 = TensorType('float64', ((False,) * 3))
btensor3 = TensorType('int8', ((False,) * 3))
wtensor3 = TensorType('int16', ((False,) * 3))
itensor3 = TensorType('int32', ((False,) * 3))
ltensor3 = TensorType('int64', ((False,) * 3))
def tensor3(name=None, dtype=None):
"""Return a symbolic 3-D variable.
:param dtype: numeric type (None means to use theano.config.floatX)
:param name: a name to attach to this variable
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, (False, False, False))
return type(name)
tensor3s, ftensor3s, dtensor3s, itensor3s, ltensor3s = _multi(
tensor3, ftensor3, dtensor3, itensor3, ltensor3)
ctensor4 = TensorType('complex64', ((False,) * 4))
ztensor4 = TensorType('complex128', ((False,) * 4))
ftensor4 = TensorType('float32', ((False,) * 4))
dtensor4 = TensorType('float64', ((False,) * 4))
btensor4 = TensorType('int8', ((False,) * 4))
wtensor4 = TensorType('int16', ((False,) * 4))
itensor4 = TensorType('int32', ((False,) * 4))
ltensor4 = TensorType('int64', ((False,) * 4))
def tensor4(name=None, dtype=None):
"""Return a symbolic 4-D variable.
:param dtype: numeric type (None means to use theano.config.floatX)
:param name: a name to attach to this variable
"""
if dtype is None:
dtype = config.floatX
type = TensorType(dtype, (False, False, False, False))
return type(name)
tensor4s, ftensor4s, dtensor4s, itensor4s, ltensor4s = _multi(
tensor4, ftensor4, dtensor4, itensor4, ltensor4)
Tensor = TensorType
# This bizarre push-import avoids a circular dependency.
elemwise.as_tensor_variable = as_tensor_variable
elemwise.TensorType = TensorType
elemwise.TensorVariable = TensorVariable
elemwise.TensorConstant = TensorConstant
#########################
# Utilities
#########################
def _scal_elemwise_with_nfunc(nfunc, nin, nout):
"""
Replace a symbol definition with an elementwise version of the
corresponding scalar Op. If it is not None, the nfunc argument
should be a string such that getattr(numpy, nfunc) implements
a vectorized version of the elemwise operation. nin is the number
of inputs expected by that function, and nout is the number of
**destination** inputs it takes. That is, the function should
take nin+nout inputs. nout == 0 means that the numpy function
does not take a numpy array argument to put its result in.
"""
def construct(symbol):
symbolname = symbol.__name__
inplace = symbolname.endswith('_inplace')
if inplace:
msg = "inplace"
else:
msg = "no_inplace"
n = "Elemwise{%s,%s}" % (symbolname, msg)
if inplace:
scalar_op = getattr(scal, symbolname[:-len('_inplace')])
inplace_scalar_op = scalar_op.__class__(scal.transfer_type(0))
rval = elemwise.Elemwise(inplace_scalar_op, {0: 0}, name=n,
nfunc_spec=(nfunc and (nfunc, nin, nout)))
else:
scalar_op = getattr(scal, symbolname)
rval = elemwise.Elemwise(scalar_op, name=n,
nfunc_spec=(nfunc and (nfunc, nin, nout)))
if getattr(symbol, '__doc__', False):
rval.__doc__ = symbol.__doc__ + '\n' + rval.__doc__
# for the meaning of this see the ./epydoc script
# it makes epydoc display rval as if it were a function, not an object
rval.__epydoc_asRoutine = symbol
rval.__module__ = 'tensor'
pprint.assign(rval, printing.FunctionPrinter(symbolname))
return rval
return construct
_scal_elemwise = _scal_elemwise_with_nfunc(None, None, None)
#########################
# Casting Operations
#########################
class TensorFromScalar(Op):
__props__ = ()
def make_node(self, s):
assert isinstance(s.type, scal.Scalar)
return Apply(self,
[s],
[tensor(dtype=s.type.dtype,
broadcastable=())])
def perform(self, node, inp, out_):
s, = inp
out, = out_
out[0] = numpy.asarray(s)
def infer_shape(self, node, in_shapes):
return [()]
def grad(self, inp, grads):
s, = inp
dt, = grads
if s.type.dtype in float_dtypes:
assert dt.type.dtype in float_dtypes
return [scalar_from_tensor(dt)]
# If the input dtype is an integer, then so is the output dtype,
# and the "zero" gradient can be represented in that int dtype.
# Currently, theano.grad insists that the dtype of the returned
# gradient has a float dtype, so we use floatX.
if s.type.dtype in discrete_dtypes:
return [s.zeros_like().astype(theano.config.floatX)]
raise NotImplementedError("grad not implemented for complex dtypes")
tensor_from_scalar = TensorFromScalar()
class ScalarFromTensor(Op):
__props__ = ()
def make_node(self, t):
assert isinstance(t.type, TensorType)
assert t.type.broadcastable == ()
return Apply(self,
[t],
[scal.get_scalar_type(dtype=t.type.dtype).make_variable()]
)
def perform(self, node, inp, out_):
s, = inp
out, = out_
out[0] = s.flatten()[0]
def infer_shape(self, node, in_shapes):
return [()]
def grad(self, inp, grads):
s, = inp
dt, = grads
return [tensor_from_scalar(dt)]
def R_op(self, inputs, eval_points):
if None in eval_points:
return [None]
return self.make_node(*eval_points).outputs
def c_code(self, node, name, inputs, outputs, sub):
x, = inputs
z, = outputs
fail = sub['fail']
return """
%(z)s = ((dtype_%(x)s*)(PyArray_DATA(%(x)s)))[0];
""" % locals()
def c_code_cache_version(self):
return (1,)
scalar_from_tensor = ScalarFromTensor()
# to be removed as we get the epydoc routine-documenting thing going
# -JB 20080924
def _conversion(real_value, name):
__oplist_tag(real_value, 'casting')
real_value.__module__ = 'tensor.basic'
pprint.assign(real_value, printing.FunctionPrinter(name))
return real_value
# These _conver_to_<type> functions have leading underscores to indicate that
# they should not be called directly. They do not perform sanity checks about
# what types you are casting to what. That logic is implemented by the
# `cast()` function below.
_convert_to_int8 = _conversion(
elemwise.Elemwise(scal.convert_to_int8), 'int8')
"""Cast to 8-bit integer"""
_convert_to_int16 = _conversion(
elemwise.Elemwise(scal.convert_to_int16), 'int16')
"""Cast to 16-bit integer"""
_convert_to_int32 = _conversion(
elemwise.Elemwise(scal.convert_to_int32), 'int32')
"""Cast to 32-bit integer"""
_convert_to_int64 = _conversion(
elemwise.Elemwise(scal.convert_to_int64), 'int64')
"""Cast to 64-bit integer"""
_convert_to_uint8 = _conversion(
elemwise.Elemwise(scal.convert_to_uint8), 'uint8')
"""Cast to unsigned 8-bit integer"""
_convert_to_uint16 = _conversion(
elemwise.Elemwise(scal.convert_to_uint16), 'uint16')
"""Cast to unsigned 16-bit integer"""
_convert_to_uint32 = _conversion(
elemwise.Elemwise(scal.convert_to_uint32), 'uint32')
"""Cast to unsigned 32-bit integer"""
_convert_to_uint64 = _conversion(
elemwise.Elemwise(scal.convert_to_uint64), 'uint64')
"""Cast to unsigned 64-bit integer"""
_convert_to_float16 = _conversion(
elemwise.Elemwise(scal.convert_to_float16), 'float16')
"""Cast to half-precision floating point"""
_convert_to_float32 = _conversion(
elemwise.Elemwise(scal.convert_to_float32), 'float32')
"""Cast to single-precision floating point"""
_convert_to_float64 = _conversion(
elemwise.Elemwise(scal.convert_to_float64), 'float64')
"""Cast to double-precision floating point"""
_convert_to_complex64 = _conversion(
elemwise.Elemwise(scal.convert_to_complex64), 'complex64')
"""Cast to single-precision complex"""
_convert_to_complex128 = _conversion(
elemwise.Elemwise(scal.convert_to_complex128), 'complex128')
"""Cast to double-precision complex"""
_cast_mapping = {
'int8': _convert_to_int8,
'int16': _convert_to_int16,
'int32': _convert_to_int32,
'int64': _convert_to_int64,
'uint8': _convert_to_uint8,
'uint16': _convert_to_uint16,
'uint32': _convert_to_uint32,
'uint64': _convert_to_uint64,
'float16': _convert_to_float16,
'float32': _convert_to_float32,
'float64': _convert_to_float64,
'complex64': _convert_to_complex64,
'complex128': _convert_to_complex128}
@constructor
def cast(x, dtype):
"""Symbolically cast `x` to a Tensor of type `dtype`."""
if dtype == 'floatX':
dtype = config.floatX
_x = as_tensor_variable(x)
if _x.type.dtype == dtype:
return _x
if _x.type.dtype.startswith('complex') and not dtype.startswith('complex'):
raise TypeError((
'Casting from complex to real is ambiguous: consider real(), '
'imag(), angle() or abs()'))
return _cast_mapping[dtype](x)
##########################
# Unary Operations
##########################
class MaxAndArgmax(Op):
"""Calculate the max and argmax over a given axis or over all axes.
"""
nin = 2 # tensor, axis
nout = 2 # max val, max idx
E_axis = 'invalid axis'
__props__ = ()
def make_node(self, x, axis=None):
x = _as_tensor_variable(x)
if isinstance(axis, (tuple, list)):
axis = [int(a) for a in axis]
if len(axis) != 1:
axis = list(axis)
for idx in xrange(len(axis)):
if axis[idx] < 0:
axis[idx] += x.type.ndim
axis.sort()
if axis == list(range(-x.type.ndim, 0, 1)):
axis = list(range(x.type.ndim))
assert axis == list(range(x.type.ndim)), (
"MaxAndArgmax does not support multiple"
" axes. the max fct supports it. Got %s" % axis)
axis = None
else:
axis = axis[0]
if isinstance(axis, (int, numpy.integer)):
axis = int(axis)
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0:
axis = int(axis)
elif isinstance(axis, Variable):
if NoneConst.equals(axis):
axis = None
elif not isinstance(axis, TensorConstant):
raise TypeError(
"MaxAndArgmax needs a constant axis. Got %s" % axis)
else:
assert (axis.dtype.startswith("int") or
axis.dtype.startswith("uint"))
axis = int(axis.data)
# we make the axis all positive to make the infer_shape work
# with negative axis
if x.type.ndim > 0 and axis is not None:
if axis < 0:
if -axis > x.type.ndim:
raise ValueError('axis out of range')
axis = x.type.ndim + axis
# Verify that the axis is valid.
all_axes = set()
if axis is not None:
if axis < 0 or axis >= x.type.ndim:
raise ValueError(
'Invalid axis: %s (the number of dimensions of the '
'input is: %s)' % (axis, x.type.ndim))
all_axes.add(axis)
else:
all_axes = list(range(x.ndim))
if axis is None:
axis = NoneConst.clone()
else:
axis = _as_tensor_variable(axis)
assert axis.ndim == 0
inputs = [x, axis]
# We keep the original broadcastable flags for dimensions on which
# we do not perform the max / argmax.
broadcastable = [b for i, b in enumerate(x.type.broadcastable)
if i not in all_axes]
outputs = [tensor(x.type.dtype, broadcastable, name='max'),
tensor('int64', broadcastable, name='argmax')]
return Apply(self, inputs, outputs)
def perform(self, node, inp, outs):
x, axis = inp
max, max_idx = outs
max[0] = theano._asarray(numpy.max(x, axis),
dtype=node.outputs[0].dtype)
max_idx[0] = theano._asarray(numpy.argmax(x, axis), dtype='int64')
def c_code(self, node, name, inp, out, sub):
x, axis = inp
max, argmax = out
fail = sub["fail"]
if NoneConst.equals(node.inputs[1]):
axis_code = "axis = NPY_MAXDIMS;"
else:
assert node.inputs[1].ndim == 0
axis_code = """
axis = ((dtype_%(axis)s*)PyArray_DATA(%(axis)s))[0];
if(axis > PyArray_NDIM(%(x)s)-1 || axis < -PyArray_NDIM(%(x)s)){
PyErr_SetString(PyExc_ValueError, "MaxAndArgmax, bad axis argument");
%(fail)s
}
""" % locals()
ret = """
int axis;
Py_CLEAR(%(max)s);
Py_CLEAR(%(argmax)s);//todo pass them as out parameter.
%(axis_code)s
%(max)s = (PyArrayObject*)PyArray_Max(%(x)s, axis, NULL);
if(%(max)s == NULL){
PyErr_SetString(PyExc_ValueError,
"MaxAndArgmax, max failed");
%(fail)s;
}
if(!PyArray_CheckExact(%(max)s)){
%(max)s = (PyArrayObject*)PyArray_FromAny((PyObject*)%(max)s, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL);
if(%(max)s == NULL){
%(fail)s;
}
}
%(argmax)s = (PyArrayObject*)PyArray_ArgMax(%(x)s, axis, NULL);
if(%(argmax)s == NULL){
PyErr_SetString(PyExc_ValueError, "MaxAndArgmax, argmax failed");
Py_CLEAR(%(max)s);
%(fail)s;
}
if(!PyArray_CheckExact(%(argmax)s)){
%(argmax)s = (PyArrayObject*)PyArray_FromAny((PyObject*)%(argmax)s, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL);
if(%(argmax)s == NULL){
%(fail)s;
}
}
if(PyArray_TYPE(%(argmax)s) != NPY_INT64){
PyObject * tmp = PyArray_Cast(%(argmax)s, NPY_INT64);
if (NULL == tmp){
%(fail)s;
}
Py_DECREF(%(argmax)s);
%(argmax)s = (PyArrayObject*)tmp;
}
"""
return ret % locals()
def c_code_cache_version(self):
return (3,)
def infer_shape(self, node, shapes):
ishape, axis_shape = shapes
axis = node.inputs[1]
if node.inputs[1].data is None:
return [(), ()]
rval = tuple([ishape[i] for (i, b) in enumerate(
node.inputs[0].type.broadcastable) if i != axis.data])
return [rval, rval]
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return [None, None]
if not isinstance(inputs[1], theano.Constant):
raise ValueError(('R_op supported for arg_max only for '
'constant axis!'))
if inputs[1].data > 1:
raise ValueError(('R_op supported for arg_max only when '
' axis is 0 or 1'))
if inputs[0].ndim != 2:
raise ValueError(('R_op supported for arg_max only when '
' input is a matrix'))
max_vals, max_pos = self.make_node(*inputs).outputs
if inputs[1].data == 0:
return [eval_points[0][max_pos,
arange(eval_points[0].shape[1])], None]
else:
return [eval_points[0][arange(eval_points[0].shape[0]),
max_pos], None]
def grad(self, inp, grads):
# The strict sense mathematical gradient of the maximum function is
# not calculated here for it is not defined at every point where some
# coordinates are identical. However, since the latter set has null
# Lebesgue measure, the result may be interpreted as weak gradient.
# @note: This function should work correctly for L{vector}s.
# (x, y), (gz, gw)
# gz*dz/dx + gw*dw/dx, gz*dz/dy + gw*dw/dy
# gMax * dMax/dx + gArgMax * dArgMax/dx,
# gMax * dMax/daxis + gArgMax * dArgMax/daxis
# g_max has one less dimension than x, so you need to complete
# g_max to x's shape when axis=0 the broadcasting mechanism
# does it automatically
x, axis = inp
g_max, g_max_idx = grads
g_max_disconnected = isinstance(g_max.type, DisconnectedType)
g_max_idx_disconnected = isinstance(g_max_idx.type, DisconnectedType)
# if the op is totally disconnected, so are its inputs
if g_max_disconnected and g_max_idx_disconnected:
return [DisconnectedType()(), DisconnectedType()()]
axis_grad = grad_undefined(
self, 1, axis,
"argmax is not defined for non-integer axes so"
" argmax(x, axis+eps) is undefined")
# if the max is disconnected but the argmax is not,
# the gradient on its inputs is zero
if g_max_disconnected:
return [x.zeros_like(), axis_grad]
if NoneConst.equals(axis):
axis_ = list(range(x.ndim))
else:
axis_ = axis
xmax = max(x, axis_)
# Raise the g_max and xmax to the same number of dim as the input.
pattern = []
out_dim = 0
if NoneConst.equals(axis):
# We are taking the max/argmax over all dimensions.
axis = None
for i in xrange(x.ndim):
if axis is None or i == axis.data:
pattern.append('x')
else:
pattern.append(out_dim)
out_dim += 1
g_max_pad = DimShuffle(g_max.broadcastable, pattern)(g_max)
xmax_pad = DimShuffle(xmax.broadcastable, pattern)(xmax)
# Set the grad to the correct position.
g_x = eq(xmax_pad, x) * g_max_pad
return g_x, axis_grad
_max_and_argmax = MaxAndArgmax()
def makeKeepDims(x, y, axis):
"""
Reintroduces in y with length one the axes of x which have been left out
in a prior reduction of x. With this option, the resulting tensor will
broadcast correctly against the original tensor x.
"""
x = as_tensor_variable(x)
y = as_tensor_variable(y)
if axis is None:
axis = list(range(x.type.ndim))
elif isinstance(axis, (int, numpy.integer)):
axis = [axis]
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0:
axis = [int(axis)]
else:
axis = [int(a) for a in axis]
newaxis = []
for a in axis:
if not isinstance(a, int):
raise ValueError(
"keepdims option can be used only with constant axis")
if a < 0:
a += x.type.ndim
newaxis.append(a)
i = 0
new_dims = []
for j, _ in enumerate(x.type.broadcastable):
if j in newaxis:
new_dims.append('x')
else:
new_dims.append(i)
i += 1
return DimShuffle(y.type.broadcastable, new_dims)(y)
@constructor
def max_and_argmax(a, axis=None, keepdims=False):
"""
Returns maximum elements and their indices obtained by iterating over
given axis
When axis is None (the default value), the max is performed
over the flattened tensor.
keepdims: If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
out, argout = _max_and_argmax(a, axis)
if keepdims:
out = makeKeepDims(a, out, axis)
argout = makeKeepDims(a, argout, axis)
return [out, argout]
@constructor
def max(x, axis=None, keepdims=False):
"""
Returns maximum elements obtained by iterating over given axis
When axis is None (the default value), the max is performed
over the flattened tensor.
keepdims: If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
:note: we return an error as numpy when we reduce a dim with a shape of 0
"""
# We have a choice of implementing this call with the
# CAReduce op or the MaxAndArgmax op.
# MaxAndArgmax supports grad and Rop, so we prefer to use that.
# CAReduce is faster, but optimizations will replace MaxAndArgmax[0]
# with CAReduce at compile time, so at this stage the important
# thing is supporting all user interface features, not speed.
# Some cases can be implemented only with CAReduce.
# We thus prefer to use MaxAndArgmax, if possible. It does not
# support all axis arguments, so we may need to fall back to CAReduce.
try:
out = max_and_argmax(x, axis)[0]
except Exception:
out = CAReduce(scal.maximum, axis)(x)
if keepdims:
out = makeKeepDims(x, out, axis)
return out
@constructor
def argmax(x, axis=None, keepdims=False):
"""
Returns indices of maximum elements obtained by iterating over given axis
When axis is None (the default value), the argmax is performed
over the flattened tensor.
keepdims: If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
# In python (using MaxAndArgmax.perform()) this leads to a wasteful
# implementation that goes through the data twice instead of once
# but when Argmax.c_impl() is in place, it should be fine.
argout = max_and_argmax(x, axis)[1]
if keepdims:
argout = makeKeepDims(x, argout, axis)
return argout
@constructor
def min(x, axis=None, keepdims=False):
"""
Returns minimum elements obtained by iterating over given axis
When axis is None (the default value), the min is performed
over the flattened tensor.
keepdims: If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
x = as_tensor_variable(x)
str_x_type = str(x.dtype)
if str_x_type.startswith('float') or str_x_type in int_dtypes:
return -max(-x, axis=axis, keepdims=keepdims)
else:
# Be careful about unsigned integers, complex
raise NotImplementedError()
@constructor
def argmin(x, axis=None, keepdims=False):
"""
Returns indices of minimum elements obtained by iterating over given axis
When axis is None (the default value), the argmin is performed
over the flattened tensor.
keepdims: If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
"""
x = as_tensor_variable(x)
str_x_type = str(x.dtype)
if str_x_type.startswith('float') or str_x_type in int_dtypes:
return argmax(-x, axis=axis, keepdims=keepdims)
else:
# Be careful about unsigned integers, complex
raise NotImplementedError()
@constructor
def smallest(*args):
"""
Return the [elementwise] smallest of a variable number of arguments.
Like python's min.
"""
if len(args) == 2:
a, b = args
return switch(a < b, a, b)
else:
return min(stack(*args), axis=0)
@constructor
def largest(*args):
"""
Return the [elementwise] largest of a variable number of arguments.
Like python's max.
"""
if len(args) == 2:
a, b = args
return switch(a > b, a, b)
else:
return max(stack(*args), axis=0)
##########################
# Comparison
##########################
@_scal_elemwise_with_nfunc('less', 2, 1)
def lt(a, b):
"""a < b"""
@_scal_elemwise_with_nfunc('greater', 2, 1)
def gt(a, b):
"""a > b"""
@_scal_elemwise_with_nfunc('less_equal', 2, 1)
def le(a, b):
"""a <= b"""
@_scal_elemwise_with_nfunc('greater_equal', 2, 1)
def ge(a, b):
"""a >= b"""
@_scal_elemwise_with_nfunc('equal', 2, 1)
def eq(a, b):
"""a == b"""
@_scal_elemwise_with_nfunc('not_equal', 2, 1)
def neq(a, b):
"""a != b"""
@_scal_elemwise_with_nfunc('isnan', 1, 1)
def isnan(a):
"""isnan(a)"""
@_scal_elemwise_with_nfunc('isinf', 1, 1)
def isinf(a):
"""isinf(a)"""
def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Implements Numpy's ``allclose`` on tensors.
``absolute(a - b) <= (atol + rtol * absolute(b))``
:note: Not a symmetric equation. See Numpy's documentation.
:param a: input to compare
:type a: tensor
:param b: input to compare
:type b: tensor
:param rtol: the relative tolerance parameter
:type rtol: float
:param atol: the absolute tolerance parameter
:type atol: float
:param equal_nan: whether to consider nan's in the same place to be close
:type equal_nan: bool
:returns: a boolean value (of type int8 returned by the tensor
elementwise `all` function) whether all elements in a and b are in
the tolerance range defined above.
:rtype: int8
"""
return all(isclose(a, b, rtol, atol, equal_nan))
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Implements Numpy's ``isclose`` on tensors.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
``absolute(a - b) <= (atol + rtol * absolute(b))``
:note: Not a symmetric equation. See Numpy's documentation.
:param a: input to compare
:type a: tensor
:param b: input to compare
:type b: tensor
:param rtol: the relative tolerance parameter
:type rtol: float
:param atol: the absolute tolerance parameter
:type atol: float
:param equal_nan: whether to consider nan's in the same place to be close
:type equal_nan: bool
:returns: returns a boolean (int8) array where two arrays are element-wise
equal within a tolerance.
:rtype: int8
>>> import theano
>>> import numpy as np
>>> a = theano._asarray([1e10, 1e-7], dtype="float64")
>>> b = theano._asarray([1.00001e10, 1e-8], dtype="float64")
>>> theano.tensor.isclose(a, b).eval()
array([1, 0], dtype=int8)
>>> a = theano._asarray([1e10, 1e-8], dtype="float64")
>>> b = theano._asarray([1.00001e10, 1e-9], dtype="float64")
>>> theano.tensor.isclose(a, b).eval()
array([1, 1], dtype=int8)
>>> a = theano._asarray([1e10, 1e-8], dtype="float64")
>>> b = theano._asarray([1.0001e10, 1e-9], dtype="float64")
>>> theano.tensor.isclose(a, b).eval()
array([0, 1], dtype=int8)
>>> a = theano._asarray([1.0, np.nan], dtype="float64")
>>> b = theano._asarray([1.0, np.nan], dtype="float64")
>>> theano.tensor.isclose(a, b).eval()
array([1, 0], dtype==int8)
>>> a = theano._asarray([1.0, np.nan], dtype="float64")
>>> b = theano._asarray([1.0, np.nan], dtype="float64")
>>> theano.tensor.isclose(a, b, equal_nan=True).eval()
array([1, 1], dtype==int8)
>>> a = theano._asarray([1.0, np.inf], dtype="float64")
>>> b = theano._asarray([1.0, -np.inf], dtype="float64")
>>> theano.tensor.isclose(a, b).eval()
array([1, 0], dtype==int8)
>>> a = theano._asarray([1.0, np.inf], dtype="float64")
>>> b = theano._asarray([1.0, np.inf], dtype="float64")
>>> theano.tensor.isclose(a, b).eval()
array([1, 1], dtype==int8)
"""
# close will be an int8 array of 1 where within tolerance
# and 0 where not within tolerance or there was a nan or inf value.
diff = abs(a - b)
tolerance = atol + rtol * abs(b)
close_prelim = le(diff, tolerance)
a_nan = isnan(a)
b_nan = isnan(b)
nans = bitwise_or(a_nan, b_nan)
a_inf = isinf(a)
b_inf = isinf(b)
infs = bitwise_or(a_inf, b_inf)
nans_or_infs = bitwise_or(nans, infs)
# close is now an array of 0's except where elements are not nan or inf
# and are withing the tolerance.
close = bitwise_and(close_prelim, bitwise_not(nans_or_infs))
# deal with signed inf values. this will make an array inf_eq of 0's
# except where inf values have the same sign.
both_infs = bitwise_and(a_inf, b_inf)
inf_signs_eq = eq(a_inf * sgn(a), b_inf * sgn(b))
inf_eq = bitwise_and(both_infs, inf_signs_eq)
# now create the potential result combining close and inf_eq
close_with_infs = bitwise_or(close, inf_eq)
# deal with comparing nan's.
if equal_nan:
both_nans = bitwise_and(a_nan, b_nan)
return bitwise_or(close_with_infs, both_nans)
# otherwise nan's aren't considered close.
else:
return close_with_infs
##########################
# Condition
##########################
@_scal_elemwise_with_nfunc('where', 3, 1)
def switch(cond, ift, iff):
"""if cond then ift else iff"""
where = switch
##########################
# Bit-wise
##########################
@_scal_elemwise_with_nfunc('bitwise_and', 2, 1)
def and_(a, b):
"""bitwise a & b"""
bitwise_and = and_ # numpy name for it
@_scal_elemwise_with_nfunc('bitwise_or', 2, 1)
def or_(a, b):
"""bitwise a | b"""
bitwise_or = or_ # numpy name for it
@_scal_elemwise_with_nfunc('bitwise_xor', 2, 1)
def xor(a, b):
"""bitwise a ^ b"""
bitwise_xor = xor # numpy name for it
@_scal_elemwise_with_nfunc('invert', 1, 1)
def invert(a):
"""bitwise ~a"""
bitwise_not = invert # numpy alias for it
##########################
# Math
##########################
@_scal_elemwise_with_nfunc('abs', 1, 1)
def abs_(a):
"""|`a`|
TensorVariable overloads the `TensorVariable.__abs__` operator so that
this function is called when you type abs(a).
"""
pprint.assign(abs_, printing.PatternPrinter(('|%(0)s|', -1000)))
@_scal_elemwise_with_nfunc('exp', 1, 1)
def exp(a):
"""e^`a`"""
@_scal_elemwise_with_nfunc('exp2', 1, 1)
def exp2(a):
"""2^`a`"""
@_scal_elemwise_with_nfunc('expm1', 1, 1)
def expm1(a):
"""e^`a` - 1"""
@_scal_elemwise_with_nfunc('negative', 1, 1)
def neg(a):
"""-a"""
# numpy.reciprocal does integer division on integer inputs
# (which is not very interesting)
@_scal_elemwise
def inv(a):
"""1.0/a"""
@_scal_elemwise_with_nfunc('log', 1, 1)
def log(a):
"""base e logarithm of a"""
@_scal_elemwise_with_nfunc('log2', 1, 1)
def log2(a):
"""base 2 logarithm of a"""
@_scal_elemwise_with_nfunc('log10', 1, 1)
def log10(a):
"""base 10 logarithm of a"""
@_scal_elemwise_with_nfunc('log1p', 1, 1)
def log1p(a):
"""log(1+a)"""
@_scal_elemwise_with_nfunc('sign', 1, 1)
def sgn(a):
"""sign of a"""
@_scal_elemwise_with_nfunc('ceil', 1, 1)
def ceil(a):
"""ceiling of a"""
@_scal_elemwise_with_nfunc('floor', 1, 1)
def floor(a):
"""floor of a"""
@_scal_elemwise_with_nfunc('trunc', 1, 1)
def trunc(a):
"""trunc of a"""
@constructor
def iround(a, mode="half_away_from_zero"):
"""cast(round(a,mode),'int64')"""
return cast(round(a, mode), 'int64')
@constructor
def round(a, mode="half_away_from_zero"):
"""round_mode(a) with mode in [half_away_from_zero, half_to_even]"""
if mode == "half_away_from_zero":
return round_half_away_from_zero(a)
elif mode == "half_to_even":
return round_half_to_even(a)
else:
raise Exception("round mode %s is not implemented." % mode)
@_scal_elemwise_with_nfunc('around', 1, 1)
def round_half_to_even(a):
"""round_half_to_even(a)"""
@_scal_elemwise
def round_half_away_from_zero(a):
"""round_half_away_from_zero(a)"""
@_scal_elemwise_with_nfunc('square', 1, 1)
def sqr(a):
"""square of a"""
# alias to sqr, included to maintain similarity with numpy interface
square = sqr
@_scal_elemwise_with_nfunc('sqrt', 1, 1)
def sqrt(a):
"""square root of a"""
@_scal_elemwise_with_nfunc('deg2rad', 1, 1)
def deg2rad(a):
"""convert degree a to radian"""
@_scal_elemwise_with_nfunc('rad2deg', 1, 1)
def rad2deg(a):
"""convert radian a to degree"""
@_scal_elemwise_with_nfunc('cos', 1, 1)
def cos(a):
"""cosine of a"""
@_scal_elemwise_with_nfunc('arccos', 1, 1)
def arccos(a):
"""arccosine of a"""
@_scal_elemwise_with_nfunc('sin', 1, 1)
def sin(a):
"""sine of a"""
@_scal_elemwise_with_nfunc('arcsin', 1, 1)
def arcsin(a):
"""arcsine of a"""
@_scal_elemwise_with_nfunc('tan', 1, 1)
def tan(a):
"""tangent of a"""
@_scal_elemwise_with_nfunc('arctan', 1, 1)
def arctan(a):
"""arctangent of a"""
@_scal_elemwise_with_nfunc('arctan2', 1, 1)
def arctan2(a, b):
"""arctangent of a / b"""
@_scal_elemwise_with_nfunc('cosh', 1, 1)
def cosh(a):
"""hyperbolic cosine of a"""
@_scal_elemwise_with_nfunc('arccosh', 1, 1)
def arccosh(a):
"""hyperbolic arc cosine of a"""
@_scal_elemwise_with_nfunc('sinh', 1, 1)
def sinh(a):
"""hyperbolic sine of a"""
@_scal_elemwise_with_nfunc('arcsinh', 1, 1)
def arcsinh(a):
"""hyperbolic arc sine of a"""
@_scal_elemwise_with_nfunc('tanh', 1, 1)
def tanh(a):
"""hyperbolic tangent of a"""
@_scal_elemwise_with_nfunc('arctanh', 1, 1)
def arctanh(a):
"""hyperbolic arc tangent of a"""
@_scal_elemwise
def erf(a):
"""error function"""
@_scal_elemwise
def erfc(a):
"""complementary error function"""
@_scal_elemwise
def erfcx(a):
"""scaled complementary error function"""
@_scal_elemwise
def erfinv(a):
"""inverse error function"""
@_scal_elemwise
def erfcinv(a):
"""inverse complementary error function"""
@_scal_elemwise
def gamma(a):
"""gamma function"""
@_scal_elemwise
def gammaln(a):
"""log gamma function"""
@_scal_elemwise
def psi(a):
"""derivative of log gamma function"""
@_scal_elemwise
def chi2sf(x, k):
"""chi squared survival function"""
# numpy.real(float32) return a view on the inputs.
# @_scal_elemwise_with_nfunc('real', 1, 1)
@_scal_elemwise
def real(z):
"""Return real component of complex-valued tensor `z`"""
_tensor_py_operators.real = property(real)
@_scal_elemwise_with_nfunc('imag', 1, 1)
def imag(z):
"""Return imaginary component of complex-valued tensor `z`"""
_tensor_py_operators.imag = property(imag)
@_scal_elemwise_with_nfunc('angle', 1, 1)
def angle(z):
"""Return polar-coordinate angle of complex-valued tensor `z`"""
@_scal_elemwise # numpy.complex cannot build tensors
def complex(real, imag):
"""Return complex-valued tensor with `real` and `imag` components"""
@_scal_elemwise_with_nfunc('conj', 1, 1)
def conj(z):
"""Return the complex conjugate of `z`."""
@_scal_elemwise
def complex_from_polar(abs, angle):
"""Return complex-valued tensor from polar coordinate specification."""
##########################
# Misc
##########################
# fill, _fill_inplace = _elemwise(scal.second, 'fill',
# """fill WRITEME (elemwise)""")
@_scal_elemwise
def second(a, b):
"""Create a matrix by filling the shape of a with b"""
fill = second
pprint.assign(fill, printing.FunctionPrinter('fill'))
@constructor
def ones_like(model, dtype=None):
"""equivalent of numpy.ones_like"""
if dtype is None:
dtype = model.type.dtype
ret = fill(model, constant(1.0, dtype=dtype))
return ret
@constructor
def zeros_like(model, dtype=None):
"""equivalent of numpy.zeros_like"""
if dtype is None:
dtype = model.type.dtype
return fill(model, constant(0.0, dtype=dtype))
def zeros(shape, dtype=None):
"""
Create a Tensor filled with zeros, closer to Numpy's syntax than ``alloc``.
"""
if not isinstance(shape, (list, tuple, TensorVariable)):
shape = [shape]
if dtype is None:
dtype = config.floatX
return alloc(numpy.array(0, dtype=dtype), *shape)
def ones(shape, dtype=None):
"""
Create a Tensor filled with ones, closer to Numpy's syntax than ``alloc``.
"""
if not isinstance(shape, (list, tuple, TensorVariable)):
shape = [shape]
if dtype is None:
dtype = config.floatX
return alloc(numpy.array(1, dtype=dtype), *shape)
class Nonzero(gof.Op):
"""
Return the indices of the elements that are non-zero.
Returns a matrix of shape (ndim, number of nonzero elements) such that
element (i,j) is the index in the ith dimension of the jth non-zero
element.
Note this is different than NumPy, which returns a tuple of arrays, one for
each dimension of the input array.
Parameters
----------
a : array_like
Input array.
Returns
-------
result : matrix
matrix containing the indices of the non-zero elements of a.
See Also
--------
nonzero_values : Return the non-zero elements of the input array
flatnonzero : Return the indices of the non-zero elements of the
flattened input array.
"""
__props__ = ()
def make_node(self, a):
a = as_tensor_variable(a)
if a.ndim == 0:
raise ValueError('Nonzero only supports non-scalar arrays.')
output = [TensorType(dtype='int64', broadcastable=(False, False))()]
return gof.Apply(self, [a], output)
def perform(self, node, inp, out_):
a = inp[0]
out, = out_
result_tuple = numpy.nonzero(a)
if len(result_tuple[0]) > 0:
result = numpy.vstack(result_tuple)
else:
result = numpy.zeros((len(result_tuple), 0))
out[0] = result.astype('int64')
def grad(self, inp, grads):
return [grad_undefined(self, 0, inp[0])]
_nonzero = Nonzero()
def nonzero(a, return_matrix=False):
"""
Returns one of the following:
If return_matrix is False (default, same as NumPy):
A tuple of vector arrays such that the ith element of the jth array
is the index of the ith non-zero element of the input array in the
jth dimension.
If return_matrix is True (same as Theano Op):
Returns a matrix of shape (ndim, number of nonzero elements) such
that element (i,j) is the index in the ith dimension of the jth
non-zero element.
Parameters
----------
a : array_like
Input array.
return_matrix : bool
If True, returns a symbolic matrix. If False, returns a tuple of
arrays. Defaults to False.
Returns
-------
result : tuple of vectors or matrix
See Also
--------
nonzero_values : Return the non-zero elements of the input array
flatnonzero : Return the indices of the non-zero elements of the
flattened input array.
"""
matrix_result = _nonzero(a)
if return_matrix:
return matrix_result
else:
if a.ndim > 0:
tuple_result = tuple([matrix_result[i] for i in xrange(a.ndim)])
else:
tuple_result = tuple([matrix_result[0]])
return tuple_result
def flatnonzero(a):
"""
Return a vector of indices that are non-zero in the flattened version of a.
This is equivalent to nonzero(a.flatten(), return_matrix=True)[0]
Parameters
----------
a : tensor
Input tensor
Returns
-------
res : vector
Output vector, containing the indices of the elements of `a.flatten()`
that are non-zero.
See Also
--------
nonzero : Return the indices of the non-zero elements of the input array.
nonzero_values : Return the non-zero elements of the input array
"""
if a.ndim == 0:
raise ValueError('Nonzero only supports non-scalar arrays.')
return nonzero(a.flatten(), return_matrix=True)[0]
def nonzero_values(a):
"""
Return a vector of non-zero elements contained in the input array.
The following behavior works to extract non-zero elements from an array
in NumPy but is *NOT* supported by Theano:
a[numpy.nonzero(a)]
Instead, the nonzero_values function or method should be used:
tensor.nonzero_values(a)
a.nonzero_values()
This is equivalent to the following:
a.flatten()[tensor.flatnonzero(a)]
Parameters
----------
a : tensor
Input tensor
Returns
-------
res : vector
Output vector, containing the non-zero elements of a.
See Also
--------
nonzero : Return the indices of the non-zero elements of the input array.
flatnonzero : Return the indices of the non-zero elements of the
flattened input array.
"""
return a.flatten()[flatnonzero(a)]
class Tri(gof.Op):
__props__ = ("dtype",)
def __init__(self, dtype=None):
if dtype is None:
dtype = config.floatX
self.dtype = dtype
def make_node(self, N, M, k):
N = as_tensor_variable(N)
M = as_tensor_variable(M)
k = as_tensor_variable(k)
return gof.Apply(
self,
[N, M, k],
[TensorType(dtype=self.dtype, broadcastable=(False, False))()])
def perform(self, node, inp, out_):
N, M, k = inp
out, = out_
out[0] = numpy.tri(N, M, k, dtype=self.dtype)
def infer_shape(self, node, in_shapes):
out_shape = [node.inputs[0], node.inputs[1]]
return [out_shape]
def grad(self, inp, grads):
return [grad_undefined(self, i, inp[i]) for i in xrange(3)]
def tri(N, M=None, k=0, dtype=None):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : Array of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
"""
if dtype is None:
dtype = config.floatX
if M is None:
M = N
op = Tri(dtype)
return op(N, M, k)
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : array, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
"""
return m * tri(m.shape[0], m.shape[1], k=k, dtype=m.dtype)
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
"""
return m * (1 - tri(m.shape[0], m.shape[1], k=k - 1, dtype=m.dtype))
class Eye(gof.Op):
__props__ = ("dtype", )
def __init__(self, dtype=None):
if dtype is None:
dtype = config.floatX
self.dtype = dtype
def make_node(self, n, m, k):
n = as_tensor_variable(n)
m = as_tensor_variable(m)
k = as_tensor_variable(k)
assert n.ndim == 0
assert m.ndim == 0
assert k.ndim == 0
return gof.Apply(
self,
[n, m, k],
[TensorType(dtype=self.dtype, broadcastable=(False, False))()])
def perform(self, node, inp, out_):
n, m, k = inp
out, = out_
out[0] = numpy.eye(n, m, k, dtype=self.dtype)
def infer_shape(self, node, in_shapes):
out_shape = [node.inputs[0], node.inputs[1]]
return [out_shape]
def grad(self, inp, grads):
return [grad_undefined(self, i, inp[i]) for i in xrange(3)]
def eye(n, m=None, k=0, dtype=None):
"""Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
n : int
Number of rows in the output.
m : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
"""
if dtype is None:
dtype = config.floatX
if m is None:
m = n
localop = Eye(dtype)
return localop(n, m, k)
def identity_like(x):
return eye(x.shape[0], x.shape[1], k=0, dtype=x.dtype)
class Alloc(gof.Op):
"""Create a Tensor from an initial value and a desired shape
alloc(value, shape0, shape1, ..., shapeN)
Returns an N-dimensional tensor initialized by `value` using something
equivalent to
z = numpy.zeros(shape, value.dtype)
z += value
The result has N dimensions, has the dtype of `value` and is obtained by
broadcasting value over the output ndarray.
This Op is used to replace fill() during optimizations because after shapes
are lifted, the first argument to fill can often be pruned from the graph.
"""
__props__ = ()
def validate_shape(self, shape):
sh = [as_tensor_variable(s) for s in shape]
bcast = []
for i, s in enumerate(sh):
if s.type.dtype[:3] not in ('int', 'uin'):
if config.exception_verbosity == 'high':
s_as_str = '\n' + min_informative_str(s)
else:
s_as_str = str(s)
raise TypeError('Shape arguments to Alloc must be integers, '
'but argument %s is not for apply node: %s' %
(i, s_as_str))
# if s is constant 1, then we're broadcastable in that dim
try:
const_shp = get_scalar_constant_value(s)
except NotScalarConstantError:
const_shp = None
bcast.append(numpy.all(1 == const_shp))
return sh, bcast
def make_node(self, value, *shape):
v = as_tensor_variable(value)
sh, bcast = self.validate_shape(shape)
if v.ndim > len(sh):
raise TypeError("The Alloc value to use has more dimensions"
" than the specified dimensions",
v.ndim, len(sh))
otype = TensorType(dtype=v.dtype, broadcastable=bcast)
return gof.Apply(self, [v] + sh, [otype()])
def perform(self, node, inputs, out_):
out, = out_
v = inputs[0]
sh = tuple([int(i) for i in inputs[1:]])
if out[0] is None or out[0].shape != sh:
if v.size == 1 and v.item() == 0:
out[0] = numpy.zeros(sh, dtype=v.dtype)
else:
out[0] = numpy.empty(sh, dtype=v.dtype)
out[0][...] = v # broadcast v to fill us up
else:
# reuse the allocated memory.
out[0][...] = v # broadcast v to fill us up
def c_code(self, node, name, inp, out, sub):
vv = inp[0]
ndim = len(inp[1:])
zz, = out
fail = sub['fail']
code = """
npy_intp shape[%(ndim)s];
""" % dict(ndim=ndim)
# Initialize shape
for i, shp_i in enumerate(inp[1:]):
code += """
shape[%(i)s] = ((dtype_%(shp_i)s*) PyArray_DATA(%(shp_i)s))[0];
""" % dict(i=i, shp_i=shp_i)
code += """
int need_new_out = (NULL == %(zz)s);
for (int i = 0; i < %(ndim)s; i++)
need_new_out = (need_new_out
|| (PyArray_DIMS(%(zz)s)[i] != shape[i]));
if (need_new_out)
{
Py_XDECREF(%(zz)s);
%(zz)s = (PyArrayObject*) PyArray_SimpleNew(%(ndim)s,
shape, PyArray_TYPE((PyArrayObject*) py_%(vv)s));
if (!%(zz)s)
{
PyErr_SetString(PyExc_MemoryError, "alloc failed");
%(fail)s
}
}
// This function takes care of broadcasting
PyArray_CopyInto(%(zz)s, %(vv)s);
""" % dict(vv=vv, ndim=ndim, zz=zz, fail=fail)
return code
def c_code_cache_version(self):
return (1,)
def infer_shape(self, node, input_shapes):
return [node.inputs[1:]]
def connection_pattern(self, node):
rval = [[True]]
for ipt in node.inputs[1:]:
rval.append([False])
return rval
def grad(self, inputs, grads):
x = inputs[0]
gz = grads[0]
n_axes_to_sum = gz.ndim - x.ndim
# The number of dimensions added
axis = list(range(n_axes_to_sum))
# The broadcasted dimensions
axis_broadcasted = []
axis_kept = []
for i, (ib, gb) in enumerate(
zip(inputs[0].broadcastable,
# We need the dimensions corresponding to x
grads[0].broadcastable[-inputs[0].ndim:])):
if ib and not gb:
axis_broadcasted.append(i + n_axes_to_sum)
else:
axis_kept.append(i)
gx = gz.sum(axis=axis + axis_broadcasted)
if axis_broadcasted:
new_order = ['x'] * x.ndim
for idx, axis in enumerate(axis_kept):
new_order[axis] = idx
gx = gx.dimshuffle(new_order)
# Dimshuffle to add back the broadcasted dims
# The *elements* of the output are not connected to
# the inputs that specify the shape. If you grow the
# shape by epsilon, the existing elements do not
# change.
return [gx] + [DisconnectedType()() for i in inputs[1:]]
def __call__(self, val, *shapes, **kwargs):
"""
If the alloc would be useless, this function returns val.
If this function is called outside of a graph optimization context
(for instance, it is manually called by a user building a graph),
then we always return an Alloc node, to allow for DebugMode to check
for size mismatches.
If you always want an Alloc node, call make_node.
"""
ret = super(Alloc, self).__call__(val, *shapes, **kwargs)
try:
# It makes optimization difficult when useless allocs are thrown
# into the graph at every stage of optimization. This little logic
# tries to help at least in some cases.
if hasattr(val, 'fgraph') and (val.type == ret.type):
return val
except AttributeError:
pass
return ret
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return [None]
return self(eval_points[0], *inputs[1:], **dict(return_list=True))
def do_constant_folding(self, node):
if not getattr(node.outputs[0], 'clients', []):
# If there are no clients then there is no point doing constant
# folding.
return False
for client in node.outputs[0].clients:
if client[0] == 'output':
# If the output is a constant, it will have to be deepcopied
# each time the function is called. So we do not fold.
return False
elif (
# The following ops work inplace of their input id 0.
client[1] == 0 and
isinstance(client[0].op, (
# Ops that will work inplace on the Alloc. So if they
# get constant_folded, they would copy the
# constant and this is less efficients.
# Not doing the constant folding could also lower
# the peak memory usage, as we the "constant" won't
# always exists.
theano.tensor.subtensor.IncSubtensor,
theano.tensor.subtensor.AdvancedIncSubtensor1,
theano.tensor.subtensor.AdvancedIncSubtensor,
theano.tensor.blas.Gemv,
theano.tensor.blas_c.CGemv,
theano.tensor.blas.Ger,
theano.tensor.blas_c.CGer,
theano.tensor.blas_scipy.ScipyGer))):
return False
# If the clients is a transfer to the GPU, we don't want to
# fold. We let the Alloc being moved to the GPU, then we
# let the GPU algo decide if it need to fold it or not.
elif client[0].op.__class__.__name__.lower().startswith("gpu"):
return False
return True
alloc = Alloc()
pprint.assign(alloc, printing.FunctionPrinter('alloc'))
"""Create a duplicate of `a` (with duplicated storage)"""
tensor_copy = elemwise.Elemwise(scal.identity)
pprint.assign(tensor_copy, printing.IgnorePrinter())
@constructor
def sum(input, axis=None, dtype=None, keepdims=False, acc_dtype=None):
"""
Computes the sum along the given axis(es) of a tensor `input`
When axis is None (the default value), the sum is performed
over the flattened tensor.
keepdims: If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
For full documentation see ``tensor.elemwise.Sum``.
In particular please pay attention to the important warning when using
a custom acc_dtype.
"""
out = elemwise.Sum(axis=axis, dtype=dtype, acc_dtype=acc_dtype)(input)
if keepdims:
out = makeKeepDims(input, out, axis)
return out
pprint.assign(Sum(), printing.FunctionPrinter('sum'))
@constructor
def prod(input, axis=None, dtype=None, keepdims=False, acc_dtype=None,
no_zeros_in_input=False):
"""
Computes the product along the given axis(es) of a tensor `input`
When axis is None (the default value), the product is performed
over the flattened tensor.
keepdims: If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the result
will broadcast correctly against the original tensor.
For full documentation see ``tensor.elemwise.Prod``.
"""
out = elemwise.Prod(axis, dtype=dtype, acc_dtype=acc_dtype,
no_zeros_in_input=no_zeros_in_input)(input)
if keepdims:
out = makeKeepDims(input, out, axis)
return out
class Mean(elemwise.CAReduce):
def __init__(self, axis=None):
elemwise.CAReduce.__init__(self, scal.add, axis)
assert self.axis is None or len(self.axis) == 1
def __str__(self):
if self.axis is not None:
return "Mean{%s}" % (", ".join(str(x) for x in self.axis))
else:
return "Mean"
def _output_dtype(self, idtype):
# we want to protect against overflow
return 'float64'
def perform(self, node, inp, out):
input, = inp
output, = out
if self.axis is None:
axis = None
else:
axis = self.axis[0]
# numpy.asarray is needed as otherwise we can end up with a
# numpy scalar.
output[0] = numpy.asarray(numpy.mean(input, dtype='float64',
axis=axis))
def c_code(self, node, name, inames, onames, sub):
if self.axis is not None:
return super(Op, self).c_code(node, name, inames, onames, sub)
ret = elemwise.CAReduce.c_code(self, node, name, inames, onames, sub)
# TODO: c_code perform support only axis is None
return ret + """
*((double *)PyArray_DATA(%s)) /= PyArray_SIZE(%s);
""" % (onames[0], inames[0])
# TODO: implement the grad. When done and tested, you can make this the default
# version.
# def grad(self, (x,), (gout,)):
# import pdb;pdb.set_trace()
# return grad(mean(x, self.axis, op=False),[x])
@constructor
def mean(input, axis=None, dtype=None, op=False, keepdims=False,
acc_dtype=None):
"""
Computes the mean value along the given axis(es) of a tensor `input`
:param axis: compute the mean along this axis of the tensor.
None means all axes (like numpy).
:type axis: None or int or (list of int) (see `Sum`)
:param dtype: dtype to cast the result of the inner summation into.
For instance, by default, a sum of a float32 tensor will be
done in float64 (acc_dtype would be float64 by default),
but that result will be casted back in float32.
:type dtype: None or string
:param keepdims: If this is set to True, the axes which are reduced are
left in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original tensor.
:param acc_dtype: dtype to use for the inner summation. This will not
necessarily be the dtype of the output (in particular
if it is a discrete (int/uint) dtype, the output will
be in a float type).
If None, then we use the same rules as `sum()`.
:type acc_dtype: None or string
:note: for gpu, if you specify dtype=float32, everything will be done
on the gpu.
"""
if op:
if dtype not in (None, 'float64'):
raise NotImplementedError(
'The Mean op does not support the dtype argument, '
'and will always use float64. If you want to specify '
'the dtype, call tensor.mean(..., op=False).',
dtype)
if acc_dtype not in (None, 'float64'):
raise NotImplementedError(
'The Mean op does not support the acc_dtype argument, '
'and will always use float64. If you want to specify '
'acc_dtype, call tensor.mean(..., op=False).',
dtype)
out = Mean(axis)(input)
if keepdims:
out = makeKeepDims(input, out, axis)
return out
if dtype is not None:
# The summation will be done with the specified dtype.
# sum() will complain if it is not suitable.
sum_dtype = dtype
else:
sum_dtype = None
# float16 overflows way too fast for sum
if ((sum_dtype == 'float16' or input.dtype == 'float16') and
acc_dtype != 'float16'):
sum_dtype == 'float32'
s = sum(input, axis=axis, dtype=sum_dtype, keepdims=keepdims,
acc_dtype=acc_dtype)
shp = shape(input)
# Cast shp into a float type
# TODO Once we have a consistent casting policy, we could simply
# use true_div.
if s.dtype in ('float16', 'float32', 'complex64'):
shp = cast(shp, 'float32')
else:
shp = cast(shp, 'float64')
if axis is None:
axis = list(range(input.ndim))
elif isinstance(axis, (int, numpy.integer)):
axis = [axis]
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0:
axis = [int(axis)]
else:
axis = [int(a) for a in axis]
# This sequential division will possibly be optimized by Theano:
for i in axis:
s = true_div(s, shp[i])
if dtype == 'float16' or (dtype is None and input.dtype == 'float16'):
s = cast(s, 'float16')
return s
@constructor
def var(input, axis=None, keepdims=False):
"""
Computes the variance along the given axis(es) of a tensor `input`.
:param axis: Compute the variance along this axis of the tensor.
None means all axes (like numpy).
:type axis: None or int or (list of int) (see `Sum`)
:param keepdims: If this is set to True, the axes which are reduced are
left in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original tensor.
:note: It uses the two-pass algorithm for more stable results.
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
There exist other implementations that are even more stable, but
probably slower.
"""
input_ndim = input.type.ndim
if axis is None:
axis = list(range(input_ndim))
elif isinstance(axis, (int, numpy.integer)):
axis = [axis]
elif isinstance(axis, numpy.ndarray) and axis.ndim == 0:
axis = [int(axis)]
else:
axis = [int(a) for a in axis]
# compute the axis-wise mean
mean_input = mean(input, axis, keepdims=True)
# center the input
centered_input = input - mean_input
# return the mean sqr
return mean((centered_input ** 2), axis, keepdims=keepdims)
@constructor
def std(input, axis=None, keepdims=False):
"""
Computes the standard deviation along the given axis(es)
of a tensor `input`.
:param axis: Compute the standard deviation along this
axis of the tensor.
None means all axes (like numpy).
:type axis: None or int or (list of int) (see `Sum`)
:param keepdims: If this is set to True, the axes
which are reduced are
left in the result as dimensions with size one.
With this option,
the result will broadcast correctly against the
original tensor.
:note: It calls `var()` and `var()` uses the two-pass algorithm for more
stable results.
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
There exist other implementations that are even more stable, but
probably slower.
"""
return sqrt(var(input=input, axis=axis, keepdims=keepdims))
class Default(gof.Op):
"""
Takes an input x and a default value. If the input is not None, a
reference to it is returned. If the input is None, a copy of the
default value is returned instead. The input and the default must
have exactly the same type.
"""
view_map = {0: [0]}
__props__ = ()
def make_node(self, x, default):
x, default = as_tensor_variable(x), as_tensor_variable(default)
if x.type != default.type:
raise TypeError('Both default() arguments must have same type',
x, default)
return gof.Apply(self, [x, default], [default.type()])
def perform(self, node, inp, out_):
x, default = inp
out, = out_
if x is None:
# why copy? Theano can't yet understand out[0] being a view of
# either x or y, so we can be a view of x, but only a copy of y.
out[0] = default.copy()
else:
out[0] = x
default = Default()
setdefault = default # legacy
##########################
# Arithmetics
##########################
@_scal_elemwise_with_nfunc('maximum', 2, 1)
def maximum(x, y):
"""elemwise maximum. See max for the maximum in one tensor
"""
# see decorator for function body
@_scal_elemwise_with_nfunc('minimum', 2, 1)
def minimum(x, y):
"""elemwise minimum. See min for the minimum in one tensor
"""
# see decorator for function body
def div_proxy(x, y):
"""Proxy for either true_div or int_div, depending on types of x, y."""
f = eval('%s_div' % scal.int_or_true_div(
as_tensor_variable(x).dtype in discrete_dtypes,
as_tensor_variable(y).dtype in discrete_dtypes))
return f(x, y)
def divmod(x, y):
"""elementvise divmod, using floor_div and mod_check"""
return floor_div(x, y), mod_check(x, y)
@_scal_elemwise_with_nfunc('add', 2, 1)
def add(a, *other_terms):
"""elementwise addition"""
# see decorator for function body
@_scal_elemwise_with_nfunc('subtract', 2, 1)
def sub(a, b):
"""elementwise subtraction"""
# see decorator for function body
@_scal_elemwise_with_nfunc('multiply', 2, 1)
def mul(a, *other_terms):
"""elementwise multiplication"""
# see decorator for function body
@_scal_elemwise_with_nfunc('true_divide', 2, 1)
def true_div(a, b):
"""elementwise [true] division (inverse of multiplication)"""
# see decorator for function body
@_scal_elemwise_with_nfunc('floor_divide', 2, 1)
def int_div(a, b):
"""elementwise [floor] division (inverse of multiplication)"""
# see decorator for function body
# floor_div and int_div are the same thing
floor_div = int_div
def ceil_intdiv(a, b):
"""
Safely compute ceil(float_division(a, b)).
Works for all dtypes, but mostly useful when a and b are int.
"""
# If a and b are int with not many significant bits, we could
# cast them to float to avoid doing the modulo. We do not know if this
# is faster or not. But this is not safe for int64 as the cast will
# lose precision.
# e.g.: cast(cast(a, scalar.upcast(a, 'float32')) / b, scal.upcast(a, b))
# We cast for the case when a and b are uint*. Otherwise neq will
# force their upcast to int.
div = int_div(a, b)
ret = cast(neq(a % b, 0), div.dtype) + div
assert ret.dtype == scal.upcast(div.owner.inputs[0], div.owner.inputs[1])
return ret
def mod_check(x, y):
"""Make sure we do not try to use complex numbers."""
if ((as_tensor_variable(x).dtype in complex_dtypes or
as_tensor_variable(y).dtype in complex_dtypes)):
# Currently forbidden.
raise scal.Mod.complex_error
else:
return mod(x, y)
@_scal_elemwise_with_nfunc('mod', 2, 1)
def mod(a, b):
"""elementwise modulo"""
# see decorator for function body
@_scal_elemwise_with_nfunc('power', 2, 1)
def pow(a, b):
"""elementwise power"""
# see decorator for function body
# The numpy.clip don't work correctly when the min is bigger then the max,
# So we do not use @scal_elemwise_with_nfunc('clip', 3, 1)
@_scal_elemwise
def clip(x, min, max):
"""clip x to be between min and max.
:note: When `x` is equal to the boundaries, the output is considered
to be `x`, so at these points, the gradient of the cost wrt the output
will be propagated to `x`, not to `min` nor `max`. In other words,
on these points, the gradient wrt `x` will be equal to the gradient wrt
the output, and the gradient wrt `min` and `max` will be zero.
"""
# see decorator for function body
# for grep: clamp, bound
pprint.assign(add, printing.OperatorPrinter('+', -2, 'either'))
pprint.assign(mul, printing.OperatorPrinter('*', -1, 'either'))
pprint.assign(sub, printing.OperatorPrinter('-', -2, 'left'))
pprint.assign(neg, printing.OperatorPrinter('-', 0, 'either'))
pprint.assign(true_div, printing.OperatorPrinter('/', -1, 'left'))
pprint.assign(int_div, printing.OperatorPrinter('//', -1, 'left'))
pprint.assign(pow, printing.OperatorPrinter('**', 1, 'right'))
##########################
# View Operations
##########################
def extract_constant(x, elemwise=True):
'''
This function is basically a call to tensor.get_scalar_constant_value. The
main difference is the behaviour in case of failure. While
get_scalar_constant_value raises an TypeError, this function returns x,
as a tensor if possible. If x is a ScalarVariable from a
scalar_from_tensor, we remove the conversion. If x is just a
ScalarVariable, we convert it to a tensor with tensor_from_scalar.
'''
try:
x = get_scalar_constant_value(x, elemwise=elemwise)
except NotScalarConstantError:
pass
if ((isinstance(x, scal.ScalarVariable) or
isinstance(x, scal.sharedvar.ScalarSharedVariable))):
if x.owner and isinstance(x.owner.op, ScalarFromTensor):
x = x.owner.inputs[0]
else:
x = tensor_from_scalar(x)
return x
def transpose(x, axes=None):
"""
Reorder the dimensions of x. (Default: reverse them)
This is a macro around dimshuffle that matches the numpy.transpose
function.
"""
if axes is None:
axes = list(range((x.ndim - 1), -1, -1))
ret = DimShuffle(x.broadcastable, axes, inplace=False)(x)
if x.name and axes == list(range((x.ndim - 1), -1, -1)):
ret.name = x.name + '.T'
return ret
def batched_dot(x, y):
"""
:param x: A Tensor with sizes e.g.: for 3D (dim1, dim3, dim2)
:param y: A Tensor with sizes e.g.: for 3D (dim1, dim2, dim4)
This function computes the dot product between the two tensors, by
iterating over the first dimension using scan.
Returns a tensor of size e.g. if it is 3D: (dim1, dim3, dim4)
Example:
>>> first = tensor.tensor3('first')
>>> second = tensor.tensor3('second')
>>> result = batched_dot(first, second)
:note: This is a subset of numpy.einsum, but we do not provide it for now.
But numpy einsum is slower than dot or tensordot:
http://mail.scipy.org/pipermail/numpy-discussion/2012-October/064259.html
"""
result, updates = theano.scan(
fn=lambda x_mat, y_mat:
theano.tensor.dot(x_mat, y_mat),
outputs_info=None,
sequences=[x, y],
non_sequences=None)
return result
def batched_tensordot(x, y, axes=2):
"""
:param x: A Tensor with sizes e.g.: for 3D (dim1, dim3, dim2)
:param y: A Tensor with sizes e.g.: for 3D (dim1, dim2, dim4)
:param axes: an integer or array. If an integer, the number of axes
to sum over. If an array, it must have two array
elements containing the axes to sum over in each tensor.
If an integer i, it is converted to an array containing
the last i dimensions of the first tensor and the first
i dimensions of the second tensor (excluding the first
(batch) dimension):
axes = [list(range(a.ndim - i, b.ndim)), list(range(1,i+1))]
If an array, its two elements must contain compatible axes
of the two tensors. For example, [[1, 2], [2, 4]] means sum
over the 2nd and 3rd axes of a and the 3rd and 5th axes of b.
(Remember axes are zero-indexed!) The 2nd axis of a and the
3rd axis of b must have the same shape; the same is true for
the 3rd axis of a and the 5th axis of b.
:type axes: int or array-like of length 2
A hybrid of batch_dot and tensordot, this function computes the
tensordot product between the two tensors, by iterating over the
first dimension using scan to perform a sequence of tensordots.
"""
if isinstance(axes, (list, numpy.ndarray)):
if isinstance(axes, list):
axes = numpy.asarray(axes)
else:
axes = axes.copy()
assert numpy.greater(axes, 0).all(), (
"All axes should be greater than one, as the "
"first axis is iterated over (batch-wise scan)")
axes -= 1
result, updates = theano.scan(
fn=lambda x_mat, y_mat:
theano.tensor.tensordot(x_mat, y_mat, axes),
outputs_info=None,
sequences=[x, y],
non_sequences=None)
return result
def split(x, splits_size, n_splits, axis=0):
the_split = Split(n_splits)
return the_split(x, axis, splits_size)
class Split(Op):
"""Partition a `TensorVariable` along some axis.
.. python::
x = vector()
splits = lvector()
# you have to declare right away how many split_points there will be.
ra, rb, rc = split(x, splits, n_splits = 3, axis = 0)
f = function([x, splits], [ra, rb, rc])
a, b, c = f([0,1,2,3,4,5], [3, 2, 1])
#a == [0,1,2]
#b == [3, 4]
#c == [5]
"""
len_splits = None
"""A Split instance will have this many outputs, and require that
the splits argument to `perform` have exactly this many elements.
"""
__props__ = ("len_splits",)
def __init__(self, len_splits):
self.len_splits = int(len_splits)
def __str__(self):
return self.__class__.__name__ + "{%s}" % self.len_splits
def make_node(self, x, axis, splits):
"""WRITEME"""
x = as_tensor_variable(x)
axis = as_tensor_variable(axis)
splits = as_tensor_variable(splits)
if splits.type not in int_vector_types:
raise TypeError('splits must have type tensor.lvector',
splits.type)
if axis.type not in int_types:
raise TypeError('axis must have type lscalar', axis.type)
# # The following lines are necessary if we allow splits of zero
# if isinstance(axis, gof.Constant):
# x = unbroadcast(x, int(axis.data))
# else:
# x = unbroadcast(x, *range(x.type.ndim))
inputs = [x, axis, splits]
outputs = [x.type() for i in xrange(self.len_splits)]
return Apply(self, inputs, outputs)
def perform(self, node, inputs, outputs):
"""WRITEME"""
x, axis, splits = inputs
# in python 2.4, x.shape[numpy.asarray(1)] don't work.
if sys.version_info[0:2] == (2, 4) and axis.size == 1:
axis = int(axis)
try:
len_along_axis = x.shape[axis]
except:
raise ValueError('Split.perform() with axis=(%s) is invalid'
' for x.shape==(%s)'
% (axis, x.shape))
if len(splits) != self.len_splits:
raise ValueError('In Split.perform(), len(splits) != len_splits.',
(len(splits), self.len_splits))
if numpy.sum(splits) != len_along_axis:
raise ValueError('The splits sum to %s, expected %s' %
(numpy.sum(splits), len_along_axis))
if python_any([nb < 0 for nb in splits]):
raise ValueError('Split: you tried to make an ndarray with a '
'negative number of elements.')
# Checking is done, let's roll the splitting algorithm!
# Basically we step along the given axis of x, extracting
# subtensors of size splits[i] as we go along.
general_key = [slice(None, None, None) for s in x.shape]
lower_idx = 0
for i in xrange(self.len_splits):
upper_idx = lower_idx + splits[i]
general_key[axis] = slice(lower_idx, upper_idx, None)
outputs[i][0] = x.__getitem__(tuple(general_key)).copy()
lower_idx = upper_idx
def infer_shape(self, node, in_shapes):
axis = node.inputs[1]
splits = node.inputs[2]
shp_x, shp_axis, shp_splits = in_shapes
out_shapes = []
for i in xrange(self.len_splits):
temp = as_tensor_variable(shp_x)
temp = theano.tensor.subtensor.set_subtensor(temp[axis], splits[i])
temp = [temp[i] for i in xrange(len(shp_x))]
out_shapes.append(temp)
return out_shapes
def grad(self, inputs, g_outputs):
"""Join the gradients along the axis that was used to split x."""
x, axis, n = inputs
outputs = self(*inputs, **dict(return_list=True))
# If all the output gradients are disconnected, then so are the inputs
if python_all([isinstance(g.type, DisconnectedType)
for g in g_outputs]):
return [DisconnectedType()(),
grad_undefined(self, 1, axis),
grad_undefined(self, 2, n)]
# Else, we have to make them zeros before joining them
new_g_outputs = []
for o, g in zip(outputs, g_outputs):
if isinstance(g.type, DisconnectedType):
new_g_outputs.append(o.zeros_like())
else:
new_g_outputs.append(g)
return [join(axis, *new_g_outputs),
grad_undefined(self, 1, axis),
grad_undefined(self, 2, n)]
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return [None for i in self.len_splits]
return self.make_node(eval_points[0], *inputs[1:]).outputs
def addbroadcast(x, *axes):
"""
Make the input broadcastable in the specified axes.
For example, addbroadcast(x, 0) will make the first dimension of
x broadcastable. When performing the function, if the length of
x along that dimension is not 1, a ValueError will be raised.
We apply the opt here not to pollute the graph especially during
the gpu optimization
Parameters:
------------
x : tensor_like
Input theano tensor.
axis : an int or an iterable object such as list or tuple
of int values
The dimension along which the tensor x should be
broadcastable. if the length of x along these
dimensions is not 1, a ValueError will be raised.
returns:
----------
a theano tensor, which is broadcastable along the specified dimensions.
"""
rval = Rebroadcast(*[(axis, True) for axis in axes])(x)
return theano.tensor.opt.apply_rebroadcast_opt(rval)
def unbroadcast(x, *axes):
"""
Make the input impossible to broadcast in the specified axes.
For example, addbroadcast(x, 0) will make the first dimension
of x broadcastable. When performing the function, if the length
of x along that dimension is not 1, a ValueError will be raised.
We apply the opt here not to pollute the graph especially during
the gpu optimization
Parameters:
------------
x : tensor_like
Input theano tensor.
axis : an int or an iterable object such as list or tuple
of int values
The dimension along which the tensor x should be
unbroadcastable. if the length of x along these
dimensions is not 1, a ValueError will be raised.
returns:
----------
a theano tensor, which is unbroadcastable along the specified dimensions.
"""
rval = Rebroadcast(*[(axis, False) for axis in axes])(x)
return theano.tensor.opt.apply_rebroadcast_opt(rval)
def patternbroadcast(x, broadcastable):
"""
Make the input adopt a specific broadcasting pattern.
broadcastable must be iterable. For example,
patternbroadcast(x, (True, False)) will make the first
dimension of x broadcastable and the second dimension
not broadcastable, so x will now be a row.
We apply the opt here not to pollute the graph especially during the gpu
optimization.
Parameters:
------------
x : tensor_like
Input theano tensor.
broadcastable : an iterable object such as list or tuple
of bool values
a set of boolean values indicating whether a dimension
should be broadcastable or not.
if the length of x along these dimensions is not 1,
a ValueError will be raised.
returns:
----------
a theano tensor, which is unbroadcastable along the specified dimensions.
"""
rval = Rebroadcast(*[(i, broadcastable[i])
for i in xrange(len(broadcastable))])(x)
return theano.tensor.opt.apply_rebroadcast_opt(rval)
class Join(Op):
"""
Concatenate multiple `TensorVariable`s along some axis.
The axis must be given as first argument. All tensors must have the same
shape along all dimensions other than this axis.
Of course, TensorVariable instances do not have a shape, so this error
cannot be caught until runtime. See `perform()`.
For joins involving scalar values, see @stack.
.. python::
x, y, z = tensor.matrix(), tensor.matrix(), tensor.matrix()
u = tensor.vector()
r = join(0, x, y, z)
c = join(1, x, y, z)
join(2, x, y, z) # WRONG: the axis has to be an index into the shape
join(0, x, u) # WRONG: joined tensors must have the same rank
"""
check_input = False
__props__ = ()
def make_node(self, *axis_and_tensors):
"""
:param axis: an Int or integer-valued Variable
:param tensors: a variable number (but not zero) of tensors to
concatenate along the specified axis. These tensors must have
the same shape along all dimensions other than this axis.
:returns: a symbolic Variable. It has the same ndim as the
input tensors, and the most inclusive dtype.
"""
axis, tensors = axis_and_tensors[0], axis_and_tensors[1:]
if not tensors:
raise ValueError('Cannot join an empty list of tensors')
as_tensor_variable_args = [as_tensor_variable(x) for x in tensors]
dtypes = [x.type.dtype for x in as_tensor_variable_args]
out_dtype = scal.upcast(*dtypes)
def output_maker(bcastable):
return tensor(dtype=out_dtype, broadcastable=bcastable)
return self._make_node_internal(
axis, tensors, as_tensor_variable_args, output_maker)
def _make_node_internal(self, axis, tensors,
as_tensor_variable_args, output_maker):
if not python_all(targs.type.ndim for targs
in as_tensor_variable_args):
raise TypeError('Join cannot handle arguments of dimension 0.'
' For joining scalar values, see @stack')
# Handle single-tensor joins immediately.
if len(as_tensor_variable_args) == 1:
bcastable = list(as_tensor_variable_args[0].type.broadcastable)
else:
# When the axis is fixed, a dimension should be
# broadcastable if at least one of the inputs is
# broadcastable on that dimension (see justification below),
# except for the axis dimension.
# Initialize bcastable all false, and then fill in some trues with
# the loops.
bcastable = [False] * len(
as_tensor_variable_args[0].type.broadcastable)
ndim = len(bcastable)
# Axis can also be a constant
if not isinstance(axis, int):
try:
# Note : `get_scalar_constant_value` returns a ndarray not
# an int
axis = int(get_scalar_constant_value(axis))
except NotScalarConstantError:
pass
if isinstance(axis, int):
# Basically, broadcastable -> length 1, but the
# converse does not hold. So we permit e.g. T/F/T
# joins, and if they fail at runtime they fail, but if
# they don't then it means that the argument where
# that broadcastable flag was False had length 1 along
# this dimension, and therefore this dimension should
# be broadcastable for the output.
if axis < -ndim:
raise IndexError("Join axis %d out of bounds [0, %d)" %
(axis, ndim))
if axis < 0:
axis += ndim
for x in as_tensor_variable_args:
for current_axis, bflag in enumerate(x.type.broadcastable):
# Constant negative axis can no longer be negative at
# this point. It safe to compare this way.
if current_axis == axis:
continue
if bflag:
bcastable[current_axis] = True
try:
bcastable[axis] = False
except IndexError:
raise ValueError('Join argument "axis" is out of range'
' (given input dimensions)')
else:
# When the axis may vary, no dimension can be guaranteed to be
# broadcastable.
bcastable = [False] * len(
as_tensor_variable_args[0].type.broadcastable)
if not python_all([x.ndim == len(bcastable)
for x in as_tensor_variable_args[1:]]):
raise TypeError("Join() can only join tensors with the same "
"number of dimensions.")
inputs = [as_tensor_variable(axis)] + list(as_tensor_variable_args)
if inputs[0].type not in int_types:
raise TypeError('Axis could not be cast to an integer type',
axis, inputs[0].type, int_types)
outputs = [output_maker(bcastable)]
node = Apply(self, inputs, outputs)
return node
def perform(self, node, axis_and_tensors, out_):
out, = out_
axis, tensors = axis_and_tensors[0], axis_and_tensors[1:]
ndim = tensors[0].ndim
if axis < -ndim:
raise IndexError("Join axis %d out of bounds [0, %d)" %
(axis, ndim))
out[0] = theano._asarray(numpy.concatenate(tensors, axis=axis),
dtype=node.outputs[0].type.dtype)
def c_code_cache_version(self):
return (3,)
def c_code(self, node, name, inputs, outputs, sub):
axis, tensors = inputs[0], inputs[1:]
input_1 = tensors[0]
l = len(tensors)
out, = outputs
fail = sub['fail']
adtype = node.inputs[0].type.dtype_specs()[1]
code = """
PyObject* list = PyList_New(%(l)s);
""" % locals()
for i, inp in enumerate(tensors):
code += """
Py_INCREF(%(inp)s);
PyList_SetItem(list, %(i)s, (PyObject*)%(inp)s);
""" % locals()
code += """
//PyObject* PyArray_Concatenate(PyObject* obj, int axis)
int axis = ((%(adtype)s *)PyArray_DATA(%(axis)s))[0];
int ndim = PyArray_NDIM(%(input_1)s);
if( axis < -ndim ){
PyErr_Format(PyExc_IndexError,
"Join axis %%d out of bounds [0, %%d)", axis, ndim);
%(fail)s
}
Py_XDECREF(%(out)s);
%(out)s = (PyArrayObject *)PyArray_Concatenate(list, axis);
Py_DECREF(list);
if(!%(out)s){
%(fail)s
}
""" % locals()
return code
def R_op(self, inputs, eval_points):
if None in eval_points[1:]:
return [None]
return self.make_node(inputs[0], *eval_points[1:]).outputs
def grad(self, axis_and_tensors, grads):
""" The gradient wrt a join op is a `Split`, used to partition
the gradient along the `axis` which was used for joining.
"""
gz, = grads
axis, tensors = axis_and_tensors[0], axis_and_tensors[1:]
rval = [grad_undefined(self, 0, axis)]
dtypes = [as_tensor_variable(x).type.dtype for x in tensors]
out_dtype = scal.upcast(*dtypes)
if 'float' in out_dtype or 'complex' in out_dtype:
# assume that this is differentiable
split = Split(len(tensors))
split_gz = split(gz, axis, stack(*[shape(x)[axis]
for x in tensors]))
# If there is only one split, it might not be in a list.
if not isinstance(split_gz, list):
split_gz = [split_gz]
# Split.make_node isn't always able to infer the right
# broadcast. As the grad need to keep the information,
# read it if needed.
split_gz = [patternbroadcast(g, t.broadcastable)
for t, g in zip(tensors, split_gz)]
rval = rval + split_gz
else:
# the output has integer type, so the gradient through it
# is 0
rval = rval + [tensor.zeros_like(dtype=config.floatX)
for tensor in tensors]
return rval
def infer_shape(self, node, ishapes):
# ishapes[0] contains the size of the axis on which we join
# Join op should get at least one input to join
assert len(ishapes) > 1
n_dim = len(ishapes[1])
for shp in ishapes[1:]:
assert shp is not None
assert len(shp) == n_dim
out_shapes = []
for dim in xrange(n_dim):
# we have to deal with 2 possible cases in here :
# a) we are dealing with the dimension for which we join
# (called t_side from true side of the if, where the if
# compares current dimension with the joining dimension)
# b) a non joining dimension ( in which maybe a symbolic
# assertion can be used to make sure all tensors have
# the same number of elements on this non-joined dimension
# this is f_side
# initialize
t_side = ishapes[1][dim]
f_side = ishapes[1][dim]
# loop over tensors and sum for the joining dimension
for shp in ishapes[2:]:
t_side = t_side + shp[dim]
# return the dimensions found
out_shapes.append(switch(eq(dim, node.inputs[0]),
t_side, f_side))
return [tuple(out_shapes)]
"""
Convenience function to concatenate `TensorType`s along the given axis.
:Parameters:
- `tensors` : list of tensors (or list-like)
A list of tensors to be concatenated along the given axis.
- `axis` : int (symbolic or literal)
On which dimension should the tensors be joined? The `axis`
must be a valid index into the shape of the tensors to be
concatenated.
The `axis` parameter may either be an integer or an object that
can be converted to a scalar using `as_scalar`(`axis`). In the
former case, the axis is fixed at construction, while in the
latter it may vary over time depending on the value of the
`axis` variable.
The shapes of the tensors to be concatenated must be all
identical, except in the dimension (`axis`) on which they are to
be joined.
"""
join = Join()
pprint.assign(lambda pstate, r: r.owner and isinstance(r.owner.op, Join),
printing.FunctionPrinter('join'))
def roll(x, shift, axis=None):
"""
Convenience function to roll `TensorType`s along the given axis.
Syntax copies numpy.roll function
Parameters
----------
x : tensor_like
Input tensor.
shift : int (symbolic or literal)
The number of places by which elements are shifted.
axis : int (symbolic or literal) (optional)
The axis along which elements are shifted. By default, the array
is flattened before shifting, after which the original
shape is restored.
Returns
-------
res : tensor
Output tensor, with the same shape as `x`.
"""
if axis is None:
if x.ndim > 1:
y = x.flatten()
return roll(y, shift, axis=0).reshape(x.shape)
else:
axis = 0
# A slice of all elements in a dimension ':'
allslice = slice(None)
# List of slices describing the front half [:, :, shift:, :]
front_slice = slice(-shift, None)
front_list = ([allslice] * axis + [front_slice] +
[allslice] * (x.ndim - axis - 1))
# List of slices describing the back half [:, :, :shift, :]
end_slice = slice(0, -shift)
end_list = ([allslice] * axis + [end_slice] +
[allslice] * (x.ndim - axis - 1))
return join(axis,
x.__getitem__(tuple(front_list)),
x.__getitem__(tuple(end_list)))
@constructor
def shape_padleft(t, n_ones=1):
"""Reshape `t` by left-padding the shape with `n_ones` 1s
See also: `shape_padright` and `Dimshuffle`
"""
_t = as_tensor_variable(t)
pattern = ['x'] * n_ones + [i for i in xrange(_t.type.ndim)]
return DimShuffle(_t.broadcastable, pattern)(_t)
@constructor
def shape_padright(t, n_ones=1):
"""Reshape `t` by right-padding the shape with `n_ones` 1s
See also: `shape_padleft` and `Dimshuffle`
"""
_t = as_tensor_variable(t)
pattern = [i for i in xrange(_t.type.ndim)] + ['x'] * n_ones
return DimShuffle(_t.broadcastable, pattern)(_t)
@constructor
def stack(*tensors):
"""Insert the arguments as slices into a tensor of 1 rank greater.
The size in dimension 0 of the result will be equal to the number
of tensors passed.
"""
if len(tensors) == 0:
raise Exception('theano.tensor.stack(*tensors) must have at least'
' one parameter')
# If all tensors are scalars of the same type, call make_vector.
# It makes the graph simpler, by not adding DimShuffles and Rebroadcasts
# This should be an optimization!
# Doing it here make the graph less canonicalized
# (more type need to be understood by all optimization)
# And DebugMode can't detect error in this code as it is not in an
# optimization.
# See ticket #660
if numpy.all(
[ # in case there is direct int in tensors.
isinstance(t, (numpy.number, float, integer_types,
python_complex)) or
(isinstance(t, Variable) and
isinstance(t.type, TensorType) and
t.ndim == 0)
for t in tensors]):
# in case there is direct int
tensors = list(map(as_tensor_variable, tensors))
dtype = scal.upcast(*[i.dtype for i in tensors])
return theano.tensor.opt.MakeVector(dtype)(*tensors)
return join(0, *[shape_padleft(t, 1) for t in tensors])
@constructor
def concatenate(tensor_list, axis=0):
"""Alias for `join`(axis, *tensor_list).
This function is similar to `join`, but uses the signature of
numpy's concatenate function.
This function
:Exceptions:
- `TypeError` : the tensor_list must be a tuple or list
"""
# Check someone did not make the common mistake to do something like:
# c = concatenate(x, y)
# instead of
# c = concatenate((x, y))
if not isinstance(tensor_list, (tuple, list)):
raise TypeError(
"The 'tensors' argument must be either a tuple "
"or a list, make sure you did not forget () or [] around "
"arguments of concatenate.", tensor_list)
return join(axis, *tensor_list)
def get_vector_length(v):
"""Return the run-time length of a symbolic vector.
:Parameters:
- `v` : A rank-1 TensorType variable.
:Exceptions:
- `TypeError` : `v` hasn't the proper type.
- `ValueError` : No special case applies, the length is not known.
In general this is not possible, but for a number of special cases
the length can be determined at compile / graph-construction time.
This function implements these special cases.
"""
v = as_tensor_variable(v)
if v.ndim != 1:
raise TypeError('argument must be symbolic vector')
if v.type.broadcastable[0]:
return 1
if isinstance(v, gof.Constant) and v.type.ndim == 1:
return len(v.data)
if v.owner and isinstance(v.owner.op, theano.tensor.opt.MakeVector):
return len(v.owner.inputs)
if v.owner and isinstance(v.owner.op, Shape):
return v.owner.inputs[0].type.ndim
# If we take a slice, we know how many elements it will result in
if ((v.owner and
isinstance(v.owner.op, theano.tensor.subtensor.Subtensor) and
isinstance(v.owner.op.idx_list[0], slice))):
start = extract_constant(theano.tensor.subtensor.get_idx_list(
v.owner.inputs, v.owner.op.idx_list)[0].start)
stop = extract_constant(theano.tensor.subtensor.get_idx_list(
v.owner.inputs, v.owner.op.idx_list)[0].stop)
if start is None:
start = 0
if stop is None:
stop = 0
if ((isinstance(stop, numbers.Integral) and
isinstance(start, numbers.Integral))):
return stop - start
raise ValueError("length not known")
@constructor
def horizontal_stack(*args):
"""
Horizontally stack two L{TensorType}s.
Stack two L{TensorType}s along the second axis (column wise). These
L{TensorType}s must have the same shape along all dimensions but the
second.
"""
# Note: 'horizontal_stack' and 'vertical_stack' do not behave exactly like
# Numpy's hstack and vstack functions. This is intended, because Numpy's
# functions have potentially confusing/incoherent behavior (try them on 1D
# arrays). If this is fixed in a future version of Numpy, it may be worth
# trying to get closer to Numpy's way of doing things. In the meantime,
# better keep different names to emphasize the implementation divergences.
assert len(args) >= 2
for arg in args:
assert arg.type.ndim == 2
return concatenate(args, axis=1)
@constructor
def vertical_stack(*args):
assert len(args) >= 2
for arg in args:
assert arg.type.ndim == 2
return concatenate(args, axis=0)
class Reshape(Op):
"""Perform a reshape operation of the input x to the new shape shp.
The number of dimensions to which to reshape to (ndim) must be
known at graph build time."""
view_map = {0: [0]} # output 0 is potentially aliased to inputs [0]
_f16_ok = True
check_input = False
__props__ = ("ndim",)
# name does not participate because it doesn't affect computations
def __init__(self, ndim, name=None):
self.ndim = ndim
self.name = name
def __str__(self):
return '%s{%s}' % (self.__class__.__name__, self.ndim)
def make_node(self, x, shp):
x = as_tensor_variable(x)
shp_orig = shp
shp = as_tensor_variable(shp, ndim=1)
if not shp.dtype.startswith('int'):
raise TypeError("Shape must be integers", shp, shp.dtype)
assert shp.ndim == 1
if isinstance(shp, TensorConstant):
bcast = [s == 1 for s in shp.data]
return gof.Apply(self, [x, shp], [tensor(x.type.dtype, bcast)])
else:
bcasts = [False] * self.ndim
shp_list = shp_orig
if hasattr(shp_orig, "ndim") and shp_orig.ndim == 0:
shp_list = [shp_orig]
for index in xrange(self.ndim):
y = shp_list[index]
y = as_tensor_variable(y)
# Try to see if we can infer that y has a constant value of 1.
# If so, that dimension should be broadcastable.
try:
bcasts[index] = (
hasattr(y, 'get_scalar_constant_value') and
y.get_scalar_constant_value() == 1)
except NotScalarConstantError:
pass
return gof.Apply(self, [x, shp], [tensor(x.type.dtype, bcasts)])
def perform(self, node, inp, out_):
x, shp = inp
out, = out_
if (len(shp) != self.ndim):
raise ValueError('shape argument to Reshape.perform has incorrect'
' length %i'
', should be %i' % (len(shp), self.ndim), shp)
try:
out[0] = numpy.reshape(x, shp)
except Exception:
raise ValueError('Cannot reshape input of shape %s to shape %s' %
(x.shape, shp))
if not out[0].flags.aligned:
raise RuntimeError("numpy.reshape returned a not aligned tensor."
" NumPy versions 1.6.2, 1.7.0 and 1.7.1 have"
" this problem for some input shape/new shape"
" combinations. Use another NumPy version."
" Input shape: %s, input stride: %s,"
" new_shape: %s, new_strides: %s." % (
x.shape, x.strides, shp, out[0].strides))
def connection_pattern(self, node):
return [[True], [False]]
def grad(self, inp, grads):
x, shp = inp
g_out, = grads
return [reshape(g_out, shape(x), ndim=x.ndim),
DisconnectedType()()]
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return [None]
return self(eval_points[0], *inputs[1:], **dict(return_list=True))
def infer_shape(self, node, ishapes):
# inputs[1] can contain at most one value of '-1', meaning the actual
# shape of the output will be automatically computed by reshape, so
# that the total number of elements stays the same.
# TODO: Maybe put that formula here?
# It's not trivial, because we would have to check if the product of
# all the non-minus-one shapes is a divisor of the product of the
# original shapes.
# The following expression leads to cycles in feature_shape,
# because it tries to replace the Shape_i node by the switch
# statement, which depends on Shape_i.
# return [tuple([switch(eq(node.inputs[1][i], -1),
# theano.tensor.opt.Shape_i(i)(node.outputs[0]),
# node.inputs[1][i])
# for i in xrange(self.ndim)]
# )]
# Here, we only simplify if the shape (node.inputs[1]) is a constant,
# ideally it would suffice to check that it is always non-negative.
requ = node.inputs[1]
if isinstance(requ, theano.tensor.TensorConstant):
requ = list(requ.data)
requ_part = [ele for ele in requ if ele != -1]
crit = len(requ) - len(requ_part)
if crit == 1 and len(requ_part) > 0:
missing = mul(*ishapes[0]) // mul(*requ_part)
for i, ele in enumerate(requ):
if ele == -1:
requ[i] = missing
elif crit == 1: # we reshape to -1
requ = [mul(*ishapes[0])]
elif crit > 1:
raise ValueError('shape argument to Reshape.perform'
' must have at most one entry equal to -1')
return [requ]
else:
oshape = []
for i in xrange(self.ndim):
default_os_i = theano.tensor.opt.Shape_i(i)(node.outputs[0])
try:
os_i = get_scalar_constant_value(node.inputs[1][i]).item()
if os_i == -1:
os_i = default_os_i
except NotScalarConstantError:
os_i = default_os_i
oshape.append(os_i)
return [tuple(oshape)]
def c_code_cache_version(self):
return (6,)
def c_code(self, node, name, inputs, outputs, sub):
if isinstance(node.inputs[0], TensorVariable):
x, shp = inputs
z, = outputs
new_ndim = self.ndim
sdtype = node.inputs[1].type.dtype_specs()[1]
fail = sub['fail']
return """
assert (PyArray_NDIM(%(shp)s) == 1);
npy_intp new_dims[%(new_ndim)s];
PyArray_Dims newshape;
newshape.ptr = new_dims;
newshape.len = %(new_ndim)s;
for (int ii = 0; ii < %(new_ndim)s; ++ii)
{
// -- We do not want an explicit cast here. the shp can be any
// -- int* dtype. The compiler will explicitly upcast it, but
// -- will err if this will downcast. This could happen if the
// -- user pass an int64 dtype, but npy_intp endup being int32.
new_dims[ii] = ((%(sdtype)s*)(
PyArray_BYTES(%(shp)s) +
ii * PyArray_STRIDES(%(shp)s)[0]))[0];
}
Py_XDECREF(%(z)s);
%(z)s = (PyArrayObject *) PyArray_Newshape(%(x)s, &newshape,
NPY_CORDER);
if (!%(z)s)
{
//The error message should have been set by PyArray_Newshape
%(fail)s;
}
if (!PyArray_ISALIGNED(%(z)s)) {
PyErr_Format(
PyExc_RuntimeError,
"PyArray_Newshape returned an object that isn't aligned!"
" NumPy versions 1.6.2, 1.7.0 and 1.7.1 have"
" this problem for some input shape/new shape"
" combinations. Use another NumPy version.");
%(fail)s;
}
""" % locals()
else:
return Op.c_code(self, node, name, inputs, outputs, sub)
def reshape(x, newshape, ndim=None, name=None):
if ndim is None:
try:
ndim = get_vector_length(newshape)
except ValueError:
raise ValueError(
"The length of the provided shape (%s) cannot "
"be automatically determined, so Theano is not able "
"to know what the number of dimensions of the reshaped "
"variable will be. You can provide the 'ndim' keyword "
"argument to 'reshape' to avoid this problem." % newshape)
op = Reshape(ndim, name)
rval = op(x, newshape)
return rval
class Flatten(Op):
"""
Flattens a tensor to `outdim` dimensions by preserving the leading
outdim - 1 shape components.
"""
view_map = {0: [0]}
check_input = False
__props__ = ("outdim",)
def __init__(self, outdim=1):
self.outdim = int(outdim)
def __str__(self):
return '%s{%s}' % (self.__class__.__name__, self.outdim)
def make_node(self, x):
t_x = as_tensor_variable(x)
if self.outdim < 1 or (x.ndim and self.outdim > x.ndim):
raise ValueError('invalid output ndimensions (%i) for tensor of '
'rank %i' % (self.outdim, t_x.ndim))
# Infer the broadcastable pattern of the output. For every dimension
# unaffected by the flatten, the broadcast flag should be unchanged.
# For the dimension resulting from the collapse of other dimensions,
# it should be broadcastable iff all the collapsed dimensions were
# broadcastable.
bcast_kept_dims = x.broadcastable[:self.outdim - 1]
bcast_new_dim = python_all(x.broadcastable[self.outdim - 1:])
broadcastable = bcast_kept_dims + (bcast_new_dim,)
return gof.Apply(self, [t_x], [tensor(x.type.dtype,
broadcastable)])
def perform(self, node, inp, out_):
x, = inp
out, = out_
outdim = self.outdim
if outdim == 1:
try:
out[0] = x.reshape(x.size)
except AttributeError:
out[0] = x.reshape((numpy.prod(x.shape),))
elif outdim == len(x.shape):
out[0] = x
else:
newshape = (x.shape[:outdim - 1] +
(numpy.prod(x.shape[outdim - 1:]),))
out[0] = x.reshape(newshape)
def infer_shape(self, node, in_shapes):
in_shp, = in_shapes
part1 = in_shp[:self.outdim - 1]
part2 = in_shp[self.outdim - 1:]
if len(part2) > 1:
part2 = (prod(part2, dtype='int64'),)
elif len(part2) == 1:
# We do not want to force an upcast of part2 if its length is 1
pass
else:
if len(in_shp) == 0 and self.outdim == 1:
part2 = (1,)
else:
raise ValueError('invalid output ndimensions (%i) for tensor '
'of rank %i' % (self.outdim, len(in_shp)))
out_shape = (part1 + part2)
return [out_shape]
def grad(self, inp, grads):
x, = inp
g_out, = grads
return [reshape(g_out, shape(x), x.ndim)]
def R_op(self, inputs, eval_points):
if None in eval_points:
return [None]
return self.make_node(*eval_points).outputs
def c_code_cache_version(self):
return (1, 1)
def c_code(self, node, name, inputs, outputs, sub):
x, = inputs
out, = outputs
outdim = self.outdim
fail = sub['fail']
return """
if (%(outdim)s == PyArray_NDIM(%(x)s))
{
Py_XDECREF(%(out)s);
Py_XINCREF(%(x)s);
%(out)s = %(x)s;
}
else
{
Py_XDECREF(%(out)s);
if (%(outdim)s == 1)
{
npy_intp size = PyArray_SIZE(%(x)s);
PyArray_Dims newshape;
newshape.ptr = &size;
newshape.len = 1;
%(out)s = (PyArrayObject*)PyArray_Newshape(%(x)s,
&newshape,
NPY_CORDER);
}
else
{
npy_intp *oldshape = PyArray_DIMS(%(x)s);
npy_intp newshape_dims[%(outdim)s];
int i;
for (i = 0; i < %(outdim)s - 1; ++i)
newshape_dims[i] = oldshape[i];
newshape_dims[i] = 1;
for (int j = %(outdim)s - 1; j < PyArray_NDIM(%(x)s); ++j)
newshape_dims[i] *= oldshape[j];
PyArray_Dims newshape;
newshape.ptr = newshape_dims;
newshape.len = %(outdim)s;
%(out)s = (PyArrayObject*)PyArray_Newshape(%(x)s,
&newshape,
NPY_CORDER);
}
}
if (!%(out)s)
{
//The error message should have been set by
// PyArray_Newshape
%(fail)s;
}
if (!PyArray_ISALIGNED(%(out)s)) {
PyErr_Format(
PyExc_RuntimeError,
"PyArray_Newshape returned an object that isn't"
" aligned! NumPy versions 1.6.2, 1.7.0 and 1.7.1 have"
" this problem for some input shape/new shape"
" combinations. Use another NumPy version.");
%(fail)s;
}
""" % locals()
def flatten(x, outdim=1):
return Flatten(outdim)(x)
# class TileGrad(Op):
# """
# Calculates the gradient of the Tile Op.
# """
# # this is so weird, I can't think of how to make this a general thing.
# def make_node(self, x, reps, g_out):
# return gof.Apply(self, [x, reps, g_out], [x.type()])
#
# def perform(self, node, inp, out):
# x, reps, g_out = inp
# gx, = out
# xsh = x.shape
# if len(reps) == 2 and reps[1] == 1 and len(x.shape) == 1:
# gx[0] = numpy.sum(g_out, axis=0)
# else:
# raise NotImplementedError('x.shape, reps combination not '
# 'supported', (x.shape, reps))
#
# tilegrad = TileGrad()
class Tile(Op):
"""
DEPRECATED: use tile() instead.
Construct an array by repeating the input x according to reps pattern.
Tiles its input according to reps. The length of reps is the number of
dimension of x and contains the number of times to tile x in each
dimension.
:see: `numpy.tile
<http://docs.scipy.org/doc/numpy/reference/generated/numpy.tile.html>`_
"""
__props__ = ("ndim",)
def __init__(self, ndim):
self.ndim = ndim
def __str__(self):
return self.__class__.__name__ + "{ndim=%d}" % self.ndim
def make_node(self, x, reps):
warnings.warn((
"Tile op is deprecated, use tile function instead."), stacklevel=3)
x = as_tensor_variable(x)
reps = as_tensor_variable(reps)
return gof.Apply(self, [x, reps], [tensor(x.type.dtype, [False] *
self.ndim)])
def perform(self, node, inp, out_):
x, reps = inp
out, = out_
res = numpy.tile(x, reps)
if res.ndim != self.ndim:
raise ValueError(
'Tile.perform produced incorrect number of dimensions')
if (numpy.asarray(reps) == 1).all():
# In that case, some NumPy version return a view! As this
# op isn't declared as inplace, we need to check that and
# copy the data.
if numpy.may_share_memory(res, x):
res = res.copy()
out[0] = res
def infer_shape(self, node, in_shapes):
# Note: in contrast with numpy, it is assumed that x.shape and reps
# have equal length; see also tile function below
# Note: if reps were to be allowed not to be a constant and x.shape
# and reps to be unequal, the following block of code could be used:
# prepend 1 to x.shape if needed
# if self.ndim > x.ndim:
# shp = concatenate(ones(self.ndim - x.ndim), shp)
# prepend 1 to reps if needed
# reps = concatenate(ones(self.ndim - reps.shape[0]), reps)
x, reps = node.inputs
shp = in_shapes[0]
tiled_shp = shp * reps
out_shape = []
for i in xrange(self.ndim):
out_shape.append(tiled_shp[i])
return [out_shape]
def grad(self, inp, grads):
x, reps = inp
g_out, = grads
# return [tilegrad(x, reps, g_out), None]
raise NotImplementedError()
def tile(x, reps, ndim=None):
"""
Tile input array `x` according to `reps`. See the docstring of `numpy.tile`
for details.
Currently, x.ndim and len(reps) must be equal, and, if specified, 'ndim'
must be equal to both.
TODO: expand this.
"""
try:
iter(reps)
except TypeError:
raise ValueError("reps must be iterable")
if not numpy.all([isinstance(r, integer_types) or
(isinstance(r, TensorVariable) and
r.dtype in ["int8", "int16", "int32", "int64"])
for r in reps]):
raise ValueError("elements of reps must be scalars of integer dtype")
elif len(reps) != x.ndim:
raise ValueError("len(reps) != x.ndim not currently supported")
elif (ndim is not None) and ndim != x.ndim:
raise ValueError("if specified, ndim must be equal to both x.ndim and "
"len(reps)")
if ndim is None:
ndim = len(reps)
reps = list(reps)
shape = [x.shape[i] for i in xrange(ndim)]
alloc_shape = reps + shape
y = alloc(x, *alloc_shape)
shuffle_ind = numpy.arange(ndim * 2).reshape(2, ndim)
shuffle_ind = shuffle_ind.transpose().flatten()
y = y.dimshuffle(*shuffle_ind)
new_shapes = [sh * reps[i] for i, sh in enumerate(shape)]
y = y.reshape(new_shapes)
return y
class ARange(Op):
"""Create an array containing evenly spaced values within a given interval.
Parameters and behaviour are the same as numpy.arange().
"""
__props__ = ("dtype",)
def __init__(self, dtype):
self.dtype = dtype
def make_node(self, start, stop, step):
start, stop, step = map(as_tensor_variable, (start, stop, step))
assert start.ndim == 0
assert stop.ndim == 0
assert step.ndim == 0
inputs = [start, stop, step]
outputs = [tensor(self.dtype, (False,))]
return Apply(self, inputs, outputs)
def infer_shape(self, node, i_shapes):
# Note start, stop and step can be float numbers.
start, stop, step = node.inputs
def is_constant_value(var, value):
try:
v = get_scalar_constant_value(var)
return numpy.all(v == value)
except NotScalarConstantError:
pass
return False
def upcast(var):
if ('int' in var.dtype and
# We do not want to cast uint64 to int64 as this can
# loose information. If we upcast uint64 with int64,
# this give float64. This is safer then checking for
# uint64 in case we support [u]int128 or other in the
# future.
scal.upcast(var.dtype, 'int64') == 'int64'):
return cast(var, 'int64')
return var
if is_constant_value(step, 1):
if is_constant_value(start, 0):
return [(cast(stop, 'int64'),)]
else:
stop = upcast(stop)
start = upcast(start)
return [(maximum(cast(stop - start, 'int64'), 0),)]
else:
stop = upcast(stop)
start = upcast(start)
return [(maximum(cast(ceil(cast((stop - start), 'float64') / step),
'int64'), 0),)]
def perform(self, node, inp, out_):
start, stop, step = inp
out, = out_
start = start.item()
stop = stop.item()
step = step.item()
out[0] = numpy.arange(start, stop, step, dtype=self.dtype)
def connection_pattern(self, node):
return [[True], [False], [True]]
def grad(self, inputs, grads):
start, stop, step = inputs
gz, = grads
# start and step affect the output values
# but the outputs are integers so there's
# no gradient through them
# stop does not affect the output values,
# just the output shape, so it is disconnected
return [start.zeros_like(),
DisconnectedType()(),
step.zeros_like()]
def R_op(self, inputs, eval_points):
return [None]
_arange = {}
def arange(start, stop=None, step=1, dtype=None):
# If only one argument is provided, it is in fact the "stop" argument,
# and start is 0.
if stop is None:
start, stop = 0, start
start, stop, step = map(as_tensor_variable, (start, stop, step))
# If dtype is not provided, infer it from the other arguments
if dtype is None:
dtype = scal.upcast(start.type.dtype, stop.type.dtype, step.type.dtype)
if config.cast_policy in ('numpy', 'numpy+floatX'):
# We enforce numpy semantics, except in the special case where
# `config.cast_policy` is 'numpy+floatX' and we want to use float32
# rather than float64.
# As an example, if `start`, `stop` and `step` are all int32,
# `numpy.arange` returns an int64 array (on 64-bit platforms),
# while the upcast above returns int32.
numpy_dtype = numpy.arange(
start=numpy.array(0, dtype=start.dtype),
stop=numpy.array(1, dtype=stop.dtype),
step=numpy.array(1, dtype=step.dtype)).dtype
if numpy_dtype != dtype:
if (config.cast_policy == 'numpy+floatX' and
config.floatX == 'float32' and
numpy_dtype == 'float64' and
# No explicit float64 in the three arguments?
python_all(
dt != 'float64'
for dt in [s.dtype for s in (start, stop, step)])):
# We use float32 instead.
assert dtype != 'float64'
dtype = 'float32'
else:
# We use the same dtype as numpy instead of the result of
# the upcast.
dtype = str(numpy_dtype)
if dtype not in _arange:
_arange[dtype] = ARange(dtype)
return _arange[dtype](start, stop, step)
class _nd_grid(object):
"""Create a dense n-dimensional 'meshgrid' with equally spaced points.
Used to create the instance ``mgrid`` and ``ogrid`` which act similarly
to their numpy equivalents.
Parameters
==========
sparse : boolean, optional, default=True
Specifying False leads to the equivalent of numpy's mgrid
functionality. Specifying True leads to the equivalent of ogrid.
Examples
========
>>> a = T.mgrid[0:5, 0:3]
>>> a[0].eval()
array([[0, 0, 0],
[1, 1, 1],
[2, 2, 2],
[3, 3, 3],
[4, 4, 4]], dtype=int8)
>>> a[1].eval()
array([[0, 1, 2],
[0, 1, 2],
[0, 1, 2],
[0, 1, 2],
[0, 1, 2]], dtype=int8)
>>> b = T.ogrid[0:5, 0:3]
>>> b[0].eval()
array([[0],
[1],
[2],
[3],
[4]], dtype=int8)
>>> b[1].eval()
array([[0, 1, 2, 3]], dtype=int8)
"""
def __init__(self, sparse=False):
self.sparse = sparse
def __getitem__(self, *args):
ndim = len(args[0])
for sl in args[0]:
if isinstance(sl.step, python_complex):
raise NotImplementedError("Not implemented for slices "
"whose step is complex")
ranges = [arange(sl.start or 0,
sl.stop,
sl.step or 1) for sl in args[0]]
shapes = [tuple([1] * j + [r.shape[0]] + [1] * (ndim - 1 - j))
for j, r in enumerate(ranges)]
ranges = [r.reshape(shape) for r, shape in zip(ranges, shapes)]
if self.sparse:
grids = ranges
else:
grids = []
ones = [ones_like(r) for r in ranges]
for i in range(ndim):
grid = 1
for j in range(ndim):
if j == i:
grid = grid * ranges[j]
else:
grid = grid * ones[j]
grids.append(grid)
return grids
mgrid = _nd_grid()
ogrid = _nd_grid(sparse=True)
class PermuteRowElements(Op):
"""Permute the elements of each row (inner-most dim) of a tensor.
A permutation will be applied to every row (vector) of the input tensor x.
Depending on the dimensionality of x and the permutation tensor y,
different cases are possible.
If y.ndim = 1, y is a single permutation, that will be applied to every
vector of x. For instance, if x is a matrix, the same permutation will be
applied to each row of x.
If x.ndim = y.ndim, each row of x corresponds to a row of y, containing
a permutation that will be applied to that row. For instance, if x and y
are two matrices, a different permutation will be applied to each row of x.
If x.ndim > y.ndim, y will be broadcasted to fit x, then each row (vector)
of x will be reordered according to the corresponding row of y. (This is
a generalization of the first case).
If x.ndim = 1, every permutation in y will be applied to x, and the output
will contain all the results.
If x.ndim < y.ndim, x will be broadcasted to fit y, and different
permutations contained in y will be applied to each vector in x. (This is
a generalization of the previous case).
If the "inverse" argument is True, the Op will perform the inverse
permutation instead.
"""
__props__ = ()
def make_node(self, x, y, inverse):
x = as_tensor_variable(x)
y = as_tensor_variable(y)
if inverse: # as_tensor_variable does not accept booleans
inverse = as_tensor_variable(1)
else:
inverse = as_tensor_variable(0)
# y should contain integers
assert (y.type.dtype.startswith('int') or
y.type.dtype.startswith('uint'))
# Inverse should be an integer scalar
assert (inverse.type.ndim == 0 and
(inverse.type.dtype.startswith('int') or
inverse.type.dtype.startswith('uint')))
# Match shapes of x and y
x_dim = x.type.ndim
y_dim = y.type.ndim
if x_dim > y_dim:
y = shape_padleft(y, n_ones=(x_dim - y_dim))
elif x_dim < y_dim:
x = shape_padleft(x, n_ones=(y_dim - x_dim))
# Compute the broadcastable pattern of the output
out_broadcastable = [xb and yb for xb, yb in
izip(x.type.broadcastable, y.type.broadcastable)]
out_type = tensor(dtype=x.type.dtype, broadcastable=out_broadcastable)
inputlist = [x, y, inverse]
outputlist = [out_type]
return Apply(self, inputlist, outputlist)
def _rec_perform(self, node, x, y, inverse, out, curdim):
"""Perform the permutation by doing a recursion over the input
dimensions.
For every dimension, starting with the leftmost, the right set of
indices is determined (depending if broadcasting or not), then
the function is recursively called on the appropriate subtensors.
The terminal case is reached when the current tensors are vector,
then the permutation contained in y is applied to x.
:param x: The input tensor, on which the permutation is applied
:param y: Tensor containing the permutations to apply
:param out: Tensor storing the output result
:param curdim: Counter of the current depth of recursion
:param inverse: Wether to apply permutations or their inverse
"""
if len(x.shape) == 1:
# Numpy advanced indexing works in this case
if inverse:
out[y] = x[:]
else:
out[:] = x[y]
if (numpy.__version__ <= '1.6.1' and
out.size != numpy.uint32(out.size)):
warnings.warn(
'Numpy versions 1.6.1 and below have a bug preventing '
'advanced indexing from correctly filling arrays that '
'are too big (>= 2^32 elements). It is possible that '
'out (%s), with shape %s, is not correctly filled.'
% (out, out.shape))
else:
xs0 = x.shape[0]
ys0 = y.shape[0]
if xs0 == ys0:
for i in xrange(xs0):
self._rec_perform(node, x[i], y[i], inverse, out[i],
curdim + 1)
elif ys0 == 1 and node.inputs[1].type.broadcastable[curdim]:
# Broadcast y
for i in xrange(xs0):
self._rec_perform(node, x[i], y[0], inverse, out[i],
curdim + 1)
elif xs0 == 1 and node.inputs[0].type.broadcastable[curdim]:
# Broadcast x
for i in xrange(ys0):
self._rec_perform(node, x[0], y[i], inverse, out[i],
curdim + 1)
else:
raise ValueError('Dimension mismatch: %s, %s' % (xs0, ys0))
def perform(self, node, inp, out):
x, y, inverse = inp
outs, = out
x_s = x.shape
y_s = y.shape
assert len(x_s) == len(y_s)
# Make sure the output is big enough
out_s = []
for xdim, ydim in izip(x_s, y_s):
if xdim == ydim:
outdim = xdim
elif xdim == 1:
outdim = ydim
elif ydim == 1:
outdim = xdim
else:
raise ValueError('Dimension mismatch: %s, %s' % (xdim, ydim))
out_s.append(outdim)
if outs[0] is None or outs[0].shape != out_s:
outs[0] = numpy.empty(out_s, dtype=x.dtype)
self._rec_perform(node, x, y, inverse, outs[0], curdim=0)
def infer_shape(self, node, in_shapes):
shp_x = in_shapes[0]
shp_y = in_shapes[1]
assert len(shp_x) == len(shp_y)
out_shape = []
for i in xrange(len(shp_x)):
out_shape.append(maximum(shp_x[i], shp_y[i]))
return [out_shape]
def grad(self, inp, grads):
x, y, inverse = inp
gz, = grads
# First, compute the gradient wrt the broadcasted x.
# If 'inverse' is False (0), apply the inverse of y on gz.
# Else, apply y on gz.
gx = permute_row_elements(gz, y, eq(inverse, 0))
# If x has been broadcasted along some axes, we need to sum
# the gradient over these axes, but keep the dimension (as
# broadcastable)
broadcasted_dims = [dim for dim in xrange(gz.type.ndim)
if x.type.broadcastable[dim] and
not gz.type.broadcastable[dim]]
gx = Sum(axis=broadcasted_dims)(gx)
# Sum(...) removed the dimensions in broadcasted_dims,
# so we need to put them back.
newdims = []
i = 0
for dim in xrange(gz.type.ndim):
if dim in broadcasted_dims:
newdims.append('x')
else:
newdims.append(i)
i += 1
gx = DimShuffle(gx.type.broadcastable, newdims)(gx)
assert gx.type.broadcastable == x.type.broadcastable
# if x is an integer type, then so is the output.
# this means f(x+eps) = f(x) so the gradient with respect
# to x is zero
if x.type.dtype.find('int') != -1:
gx = x.zeros_like()
# The elements of y and of inverse both affect the output,
# so they are connected to the output,
# and the transformation isn't defined if their values
# are non-integer, so the gradient with respect to them is
# undefined
return [gx, grad_undefined(self, 1, y),
grad_undefined(self, 1, inverse)]
_permute_row_elements = PermuteRowElements()
def permute_row_elements(x, y, inverse=0):
return _permute_row_elements(x, y, inverse)
def inverse_permutation(perm):
"""Computes the inverse of permutations.
Each row of input should contain a permutation of the first integers.
"""
return permute_row_elements(
arange(perm.shape[-1], dtype=perm.dtype),
perm,
inverse=True)
#########################
# Linalg : Dot
#########################
#
# For BLAS-related ops see blas.py
#
# TODO: Dotinv should go here, Eigs, Svd, etc.
class Dot(Op):
"""
Computes the dot product of two variables. For two matrices, this is
equivalent to matrix multiplication. For two vectors, this is the inner
product.
:note: matrix-matrix products are sometimes optimized to Dot22 or Gemm ops.
(see tensor.blas)
:note: vector-vector products are sometimes optimized to Ger or CGer. (see
tensor.blas)
:note: matrix-vector products are sometimes optimized to Gemv, CGemv (see
tensor.blas)
"""
__props__ = ()
# the rationale for Dot22 is related to getting GEMM Ops into the
# graph. See Dot22 in tensor.blas for details.
def make_node(self, *inputs):
inputs = list(map(as_tensor_variable, inputs))
if len(inputs) != 2:
raise TypeError(
'theano.tensor.Dot: 2 arguments required, %d given ' %
len(inputs))
if inputs[0].ndim not in (1, 2):
raise TypeError(
'theano.tensor.Dot: input 0 (0-indexed) must have ndim of '
'1 or 2, %d given. Consider calling theano.tensor.dot '
'instead.' % inputs[0].ndim)
if inputs[1].ndim not in (1, 2):
raise TypeError(
'theano.tensor.Dot: input 1 (0-indexed) must have ndim of '
'1 or 2, %d given. Consider calling theano.tensor.dot '
'instead.' % inputs[1].ndim)
i_broadcastables = [input.type.broadcastable for input in inputs]
bx, by = i_broadcastables
if len(by) == 2: # y is a matrix
bz = bx[:-1] + by[-1:]
elif len(by) == 1: # y is vector
bz = bx[:-1]
i_dtypes = [input.type.dtype for input in inputs]
outputs = [tensor(scal.upcast(*i_dtypes), bz)]
return Apply(self, inputs, outputs)
def perform(self, node, inp, out):
x, y = inp
z, = out
# the asarray is here because dot between two vectors
# gives a numpy float object but we need to return a 0d
# ndarray
z[0] = numpy.asarray(numpy.dot(x, y))
def grad(self, inp, grads):
x, y = inp
gz, = grads
xdim, ydim, gdim = x.type.ndim, y.type.ndim, gz.type.ndim
# grad is scalar, so x is vector and y is vector
if gdim == 0:
xgrad = gz * y
ygrad = gz * x
# x is vector, y is matrix, grad is vector
elif xdim == 1 and ydim == 2:
xgrad = dot(gz, y.T)
ygrad = outer(x.T, gz)
# x is matrix, y is vector, grad is vector
elif xdim == 2 and ydim == 1:
xgrad = outer(gz, y.T)
ygrad = dot(x.T, gz)
# x is matrix, y is matrix, grad is matrix
elif xdim == ydim == 2:
xgrad = dot(gz, y.T)
ygrad = dot(x.T, gz)
# If x or y contain broadcastable dimensions but only one of
# them know that a matching dimensions is broadcastable, the
# above code don't always return the right broadcast pattern.
# This cause problem down the road. See gh-1461.
if xgrad.broadcastable != x.broadcastable:
xgrad = patternbroadcast(xgrad, x.broadcastable)
if ygrad.broadcastable != y.broadcastable:
ygrad = patternbroadcast(ygrad, y.broadcastable)
rval = xgrad, ygrad
for elem in rval:
assert elem.dtype.find('float') != -1
return rval
def R_op(self, inputs, eval_points):
# R_op for a \dot b evaluted at c for a and d for b is
# simply c \dot b + a \dot d
assert len(inputs) == 2
assert len(eval_points) == 2
if eval_points[0] is None and eval_points[1] is None:
return [None]
debugger_available = config.compute_test_value != 'off'
if debugger_available:
try:
iv0 = gof.op.get_test_value(inputs[0])
except AttributeError:
gof.op.missing_test_message(
'first input passed to Dot.R_op has no test value')
debugger_available = False
try:
iv1 = gof.op.get_test_value(inputs[1])
except AttributeError:
gof.op.missing_test_message(
'second input passed to Dot.R_op has no test value')
debugger_available = False
if eval_points[0]:
try:
ev0 = gof.op.get_test_value(eval_points[0])
except AttributeError:
gof.op.missing_test_message(
'first eval point passed to Dot.R_op '
'has no test value')
debugger_available = False
if eval_points[1]:
try:
ev1 = gof.op.get_test_value(eval_points[1])
except AttributeError:
gof.op.missing_test_message(
'second eval point passed to Dot.R_op '
'has no test value')
debugger_available = False
if debugger_available:
input_values = [iv0, iv1]
eval_point_values = [ev0, ev1]
for i in xrange(2):
if eval_point_values[i] is not None and \
input_values[i].shape != eval_point_values[i].shape:
raise ValueError(
'input ' + str(i) + ' and eval_point ' + str(i) +
' to Dot.R_op should have the same shape, but '
'their shapes are %s and %s, respectively' % (
str(input_values[i].shape),
str(eval_point_values[i].shape)))
if eval_points[0]:
t1 = self(eval_points[0], inputs[1])
if eval_points[1]:
t2 = self(inputs[0], eval_points[1])
if eval_points[0] and eval_points[1]:
return [t1 + t2]
elif eval_points[0]:
return [t1]
else:
return [t2]
def infer_shape(self, node, shapes):
xshp, yshp = shapes
x, y = node.inputs
# vector / vector
if x.ndim == 1 and y.ndim == 1:
return [()]
# matrix / vector
if x.ndim == 2 and y.ndim == 1:
return [xshp[:-1]]
# vector / matrix
if x.ndim == 1 and y.ndim == 2:
return [yshp[-1:]]
# matrix / matrix
if x.ndim == 2 and y.ndim == 2:
return [xshp[:-1] + yshp[-1:]]
raise NotImplementedError()
def __str__(self):
return "dot"
_dot = Dot()
pprint.assign(_dot, printing.OperatorPrinter(printing.special['middle_dot'],
-1, 'left'))
def dot(a, b):
"""
Computes the dot product of two variables. For two matrices, this is
equivalent to matrix multiplication. For two vectors, this is the inner
product. When one variable is a scalar, this is like elementwise
multiplication. For N dimensions, this is a sum product over the last axis
of the first array and the second-to-last axis of the second array:
dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
Note that this dot function does one of three things, in the following
sequence:
1. If either a or b is scalar, it returns the elementwise product
without calling the Theano Dot op.
2. If either a or b has more than 2 dimensions, it calls Theano's
tensordot function with appropriate axes. The tensordot function
expresses high-dimensional dot products in terms of 2D matrix
multiplications, so it may be possible to futherize optimize for
performance.
3. If both a and b have either 1 or 2 dimensions, it calls Theano's
Dot op on a and b.
:note: matrix-matrix products are sometimes optimized to Dot22 or Gemm ops.
(see tensor.blas)
:note: vector-vector products are sometimes optimized to Ger or CGer. (see
tensor.blas)
:note: matrix-vector products are sometimes optimized to Gemv, CGemv (see
tensor.blas)
"""
a, b = as_tensor_variable(a), as_tensor_variable(b)
if a.ndim == 0 or b.ndim == 0:
return a * b
elif a.ndim > 2 or b.ndim > 2:
return tensordot(a, b, [[a.ndim - 1], [numpy.maximum(0, b.ndim - 2)]])
else:
return _dot(a, b)
#########################
# Linalg : TensorDot
#########################
def tensordot(a, b, axes=2):
"""
Given two tensors a and b,tensordot computes a generalized dot product over
the provided axes. Theano's implementation reduces all expressions to
matrix or vector dot products and is based on code from Tijmen Tieleman's
gnumpy (http://www.cs.toronto.edu/~tijmen/gnumpy.html).
:param a: the first tensor variable
:type a: symbolic tensor
:param b: the second tensor variable
:type b: symbolic tensor
:param axes: an integer or array. If an integer, the number of axes
to sum over. If an array, it must have two array
elements containing the axes to sum over in each tensor.
Note that the default value of 2 is not guaranteed to work
for all values of a and b, and an error will be raised if
that is the case. The reason for keeping the default is to
maintain the same signature as numpy's tensordot function
(and np.tensordot raises analogous errors for non-compatible
inputs).
If an integer i, it is converted to an array containing
the last i dimensions of the first tensor and the first
i dimensions of the second tensor:
axes = [list(range(a.ndim - i, b.ndim)), list(range(i))]
If an array, its two elements must contain compatible axes
of the two tensors. For example, [[1, 2], [2, 0]] means sum
over the 2nd and 3rd axes of a and the 3rd and 1st axes of b.
(Remember axes are zero-indexed!) The 2nd axis of a and the
3rd axis of b must have the same shape; the same is true for
the 3rd axis of a and the 1st axis of b.
:type axes: int or array-like of length 2
:returns: a tensor with shape equal to the concatenation of a's shape
(less any dimensions that were summed over) and b's shape
(less any dimensions that were summed over).
:rtype: symbolic tensor
It may be helpful to consider an example to see what tensordot does.
Theano's implementation is identical to NumPy's. Here a has shape (2, 3, 4)
and b has shape (5, 6, 4, 3). The axes to sum over are [[1, 2], [3, 2]] --
note that a.shape[1] == b.shape[3] and a.shape[2] == b.shape[2]; these axes
are compatible. The resulting tensor will have shape (2, 5, 6) -- the
dimensions that are not being summed:
a = np.random.random((2,3,4))
b = np.random.random((5,6,4,3))
#tensordot
c = np.tensordot(a, b, [[1,2],[3,2]])
#loop replicating tensordot
a0, a1, a2 = a.shape
b0, b1, _, _ = b.shape
cloop = np.zeros((a0,b0,b1))
#loop over non-summed indices -- these exist
#in the tensor product.
for i in range(a0):
for j in range(b0):
for k in range(b1):
#loop over summed indices -- these don't exist
#in the tensor product.
for l in range(a1):
for m in range(a2):
cloop[i,j,k] += a[i,l,m] * b[j,k,m,l]
np.allclose(c, cloop) #true
This specific implementation avoids a loop by transposing a and b such that
the summed axes of a are last and the summed axes of b are first. The
resulting arrays are reshaped to 2 dimensions (or left as vectors, if
appropriate) and a matrix or vector dot product is taken. The result is
reshaped back to the required output dimensions.
In an extreme case, no axes may be specified. The resulting tensor
will have shape equal to the concatenation of the shapes of a and b:
c = np.tensordot(a, b, 0)
print(a.shape) #(2,3,4)
print(b.shape) #(5,6,4,3)
print(c.shape) #(2,3,4,5,6,4,3)
See the documentation of numpy.tensordot for more examples.
"""
a, b = as_tensor_variable(a), as_tensor_variable(b)
# axes must be a scalar or list/tuple of length 2
if not numpy.isscalar(axes) and len(axes) != 2:
raise ValueError('Axes should be an integer or a '
'list/tuple of len 2 (%s was provided)' % repr(axes))
# if 'axes' is a number of axes to multiply and sum over (trailing axes
# of a, leading axes of b), we can just reshape and use dot.
elif numpy.isscalar(axes):
axes = int(axes)
# check if axes is valid given the dimension of a and b
if axes > a.ndim:
raise ValueError('axes can not be larger than the dimension of '
'a (a.ndim=%i, axes=%i)' % (a.ndim, axes))
if axes > b.ndim:
raise ValueError('axes can not be larger than than the dimension '
'of b (b.ndim=%i, axes=%i)' % (b.ndim, axes))
outshape = concatenate([a.shape[:a.ndim - axes], b.shape[axes:]])
outbcast = a.broadcastable[:a.ndim - axes] + b.broadcastable[axes:]
outndim = a.ndim + b.ndim - (2 * axes)
a_shape_0 = b_shape_0 = a_shape_1 = b_shape_1 = 1
for s0 in xrange(a.ndim - axes):
a_shape_0 *= a.shape[s0]
for s0 in xrange(axes):
b_shape_0 *= b.shape[s0]
for s1 in xrange(a.ndim - axes, a.ndim):
a_shape_1 *= a.shape[s1]
for s1 in xrange(axes, b.ndim):
b_shape_1 *= b.shape[s1]
a_reshaped = a.reshape((a_shape_0, a_shape_1), ndim=2)
b_reshaped = b.reshape((b_shape_0, b_shape_1), ndim=2)
out = _dot(a_reshaped, b_reshaped).reshape(outshape, outndim)
# Make sure the broadcastable pattern of the result is correct,
# since some shape information can be lost in the reshapes.
return patternbroadcast(out, outbcast)
# if 'axes' is a list, transpose a and b such that the summed axes of a
# are last and the summed axes of b are first.
else:
# get first axis element as a tuple
try:
a_axes = tuple(axes[0])
except TypeError:
a_axes = tuple([axes[0]])
# get second axis element as a tuple
try:
b_axes = tuple(axes[1])
except TypeError:
b_axes = tuple([axes[1]])
# the two axes lists must have the same length
if len(a_axes) != len(b_axes):
raise ValueError('Axes elements must have the same length.')
# check that there aren't more axes than a has dimensions
if len(a_axes) > a.ndim:
raise ValueError('axes[0] should be array_like with length '
'less than the dimensions of a '
'(a.ndim=%i, len(axes[0])=%i).' %
(a.ndim, len(a_axes)))
# check that a_axes doesn't contain an axis greater than or equal to
# a's dimensions. also check if len > 0 so numpy.max won't raise an
# error.
if len(a_axes) > 0 and numpy.max(numpy.array(a_axes)) >= a.ndim:
raise ValueError('axes[0] contains dimensions greater than or '
'equal to a.ndim (a.ndim=%i, max(axes[0])=%i).' %
(a.ndim, numpy.max(numpy.array(a_axes))))
# check that there aren't more axes than b has dimensions
if len(b_axes) > b.ndim:
raise ValueError('axes[1] should be array_like, of length '
'smaller than the dimension of b '
'(a.ndim=%i, len(axes[0])=%i).' %
(b.ndim, len(b_axes)))
# check that b_axes doesn't contain an axis greater than or equal to
# b's dimensions. also check if len > 0 so numpy.max won't raise an
# error.
if len(b_axes) > 0 and numpy.max(numpy.array(b_axes)) >= b.ndim:
raise ValueError('axes[1] contains dimensions greater than or '
'equal to b.ndim (b.ndim=%i, max(axes[1])=%i).' %
(b.ndim, numpy.max(numpy.array(b_axes))))
a_order = (tuple(x for x in tuple(xrange(a.ndim)) if x not in a_axes) +
a_axes)
b_order = (b_axes + tuple(x
for x in tuple(xrange(b.ndim))
if x not in b_axes))
a_shuffled = a.dimshuffle(a_order)
b_shuffled = b.dimshuffle(b_order)
# now that a and b are in the right order, call tensordot recursively
return tensordot(a_shuffled, b_shuffled, len(a_axes))
def outer(x, y):
"""Return vector-vector outer product.
If an input isn't a vector, we flatten it first.
"""
if x.ndim != 1:
x = x.flatten()
if y.ndim != 1:
y = y.flatten()
return dot(
x.dimshuffle(0, 'x'),
y.dimshuffle('x', 0))
def any(x, axis=None, keepdims=False):
out = elemwise.Any(axis)(x)
if keepdims:
out = makeKeepDims(x, out, axis)
return out
def all(x, axis=None, keepdims=False):
out = elemwise.All(axis)(x)
if keepdims:
out = makeKeepDims(x, out, axis)
return out
# Some NumPy version like 1.9.2 return a view for numpy.diagonal
x = numpy.zeros((4, 4))
numpy_diagonal_return_view = numpy.may_share_memory(numpy.diagonal(x), x)
del x
class Diagonal(Op):
"""Return specified diagonals.
:param x: A tensor variable with x.ndim >= 2.
:return: A vector representing the diagonal elements.
"""
__props__ = ("offset", "axis1", "axis2")
def __init__(self, offset=0, axis1=0, axis2=1):
if numpy_diagonal_return_view:
self.view_map = {0: [0]}
self.offset = offset
self.axis1 = axis1
self.axis2 = axis2
def make_node(self, x):
x = as_tensor_variable(x)
assert x.ndim >= 2
return Apply(self, [x], [tensor(dtype=x.dtype,
broadcastable=[False] * (x.ndim - 1))])
def perform(self, node, inputs, outputs):
(x,) = inputs
(z,) = outputs
z[0] = x.diagonal(self.offset, self.axis1, self.axis2)
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
return [grad_not_implemented(self, 0, x)]
def infer_shape(self, node, shapes):
in_shape, = shapes
dim1 = in_shape[self.axis1]
dim2 = in_shape[self.axis2]
out_shape = [d for i, d in enumerate(in_shape)
if i not in (self.axis1, self.axis2)]
# The following logic is inspired by C code of PyArray_Diagonal().
offset = self.offset
if offset > 0:
diag_size = clip(dim2 - offset, 0, dim1)
elif offset < 0:
diag_size = clip(dim1 + offset, 0, dim2)
else:
diag_size = minimum(dim1, dim2)
out_shape.append(diag_size)
return [tuple(out_shape)]
def diagonal(a, offset=0, axis1=0, axis2=1):
if (offset, axis1, axis2) == (0, 0, 1):
return theano.tensor.nlinalg.extract_diag(a)
return Diagonal(offset, axis1, axis2)(a)
class Diag(Op):
__props__ = ()
def make_node(self, diag):
diag = as_tensor_variable(diag)
if diag.type.ndim != 1:
raise TypeError('data argument must be a vector', diag.type)
return Apply(self, [diag], [matrix(dtype=diag.dtype)])
def perform(self, node, inputs, outputs):
(z,) = outputs
z[0] = numpy.diag(inputs[0])
def grad(self, inputs, gout):
(gz,) = gout
return [diagonal(gz)]
def infer_shape(self, nodes, shapes):
return [(shapes[0][0],) * 2]
def diag(v, k=0):
if v.ndim == 1:
assert k == 0, "diagonals other than main are not implemented"
return Diag()(v)
elif v.ndim == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def stacklists(arg):
"""
Recursively stack lists of tensors to maintain similar structure.
This function can create a tensor from a shaped list of scalars:
>>> from theano.tensor import stacklists, scalars, matrices
>>> from theano import function
>>> a, b, c, d = scalars('abcd')
>>> X = stacklists([[a, b], [c, d]])
>>> f = function([a, b, c, d], X)
>>> f(1, 2, 3, 4)
array([[ 1., 2.],
[ 3., 4.]], dtype=float32)
We can also stack arbitrarily shaped tensors. Here we stack matrices into
a 2 by 2 grid:
>>> from numpy import ones
>>> a, b, c, d = matrices('abcd')
>>> X = stacklists([[a, b], [c, d]])
>>> f = function([a, b, c, d], X)
>>> x = ones((4, 4), 'float32')
>>> f(x, x, x, x).shape
(2, 2, 4, 4)
"""
if isinstance(arg, (tuple, list)):
return stack(*list(map(stacklists, arg)))
else:
return arg
def ptp(a, axis=None):
"""
Range of values (maximum - minimum) along an axis.
The name of the function comes from the acronym for peak to peak.
:param a : Input tensor.
:param axis : Axis along which to find the peaks. By default,
flatten the array.
:return : A new array holding the result.
"""
a = as_tensor_variable(a)
out = max(a, axis) - min(a, axis)
return out
def power(x, y):
return x ** y
def swapaxes(y, axis1, axis2):
"swap axes of inputted tensor"
y = as_tensor_variable(y)
ndim = y.ndim
li = list(range(0, ndim))
li[axis1], li[axis2] = li[axis2], li[axis1]
return y.dimshuffle(li)
def choose(a, choices, out=None, mode='raise'):
"""
Construct an array from an index array and a set of arrays to choose from.
First of all, if confused or uncertain, definitely look at the Examples -
in its full generality, this function is less simple than it might seem
from the following code description (below ndi = numpy.lib.index_tricks):
np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)]).
But this omits some subtleties. Here is a fully general summary:
Given an ``index`` array (a) of integers and a sequence of n arrays
(choices), a and each choice array are first broadcast, as necessary,
to arrays of a common shape; calling these Ba and
Bchoices[i], i = 0,...,n-1 we have that, necessarily,
Ba.shape == Bchoices[i].shape for each i.
Then, a new array with shape Ba.shape is created as follows:
- if mode=raise (the default), then, first of all, each element of a
(and thus Ba) must be in the range [0, n-1]; now, suppose that
i (in that range) is the value at the (j0, j1, ..., jm) position in Ba -
then the value at the same position in the new array is the value in
Bchoices[i] at that same position;
- if mode=wrap, values in a (and thus Ba) may be any (signed) integer;
modular arithmetic is used to map integers outside the range [0, n-1]
back into that range; and then the new array is constructed as above;
- if mode=clip, values in a (and thus Ba) may be any (signed) integer;
negative integers are mapped to 0; values greater than n-1 are mapped
to n-1; and then the new array is constructed as above.
:Parameter: *a* - int array
This array must contain integers in [0, n-1], where n is the number of
choices, unless mode=wrap or mode=clip, in which cases any integers
are permissible.
:Parameter: *choices* - sequence of arrays
Choice arrays. a and all of the choices must be broadcastable to
the same shape. If choices is itself an array (not recommended),
then its outermost dimension (i.e., the one corresponding to
choices.shape[0]) is taken as defining the ``sequence``.
:Parameter: *out* - array, optional
If provided, the result will be inserted into this array.
It should be of the appropriate shape and dtype.
:Parameter: *mode* - {``raise`` (default), ``wrap``, ``clip``}, optional
Specifies how indices outside [0, n-1] will be treated:
``raise`` : an exception is raised
``wrap`` : value becomes value mod n
``clip`` : values < 0 are mapped to 0, values > n-1 are mapped to n-1
:Returns: merged_array - array
The merged result.
:Raises:
ValueError - shape mismatch
If a and each choice array are not all broadcastable to the same shape.
"""
# This is done to keep the same function signature then NumPy.
assert out is None
return Choose(mode)(a, choices)
class Choose(Op):
__props__ = ('mode',)
def __init__(self, mode):
assert mode in ("raise", "wrap", "clip")
self.mode = mode
def infer_shape(self, node, shapes):
if isinstance(node.inputs[1], TensorVariable):
# We have padded node.inputs[0] to the right number of
# dimensions for the output
l = []
for sh1, sh2, b1 in zip(shapes[0],
shapes[1][1:],
node.inputs[0].broadcastable):
if b1:
l.append(sh2)
else:
l.append(sh1)
return [tuple(l)]
else:
import theano.typed_list
assert isinstance(node.inputs[1],
theano.typed_list.TypedListVariable)
raise ShapeError("Case not implemented")
shape = shapes[0]
for i in xrange(len(shapes[0]) - 1):
shape[i] = shapes[1][i]
return [(shape)]
def make_node(self, a, choices):
# Import here as it isn't imported by default and we can't
# import at the top as it would cause circular import.
import theano.typed_list
a = as_tensor_variable(a)
if a.dtype not in theano.tensor.discrete_dtypes:
raise TypeError(
'choose first argument must have an [u]int* dtype. Got %s.'
% a.dtype)
if isinstance(choices, (tuple, list,
theano.typed_list.TypedListVariable)):
choice = theano.typed_list.make_list(choices)
choice_ndim = choice.ttype.ndim
choice_bcast = choice.ttype.broadcastable
else:
choice = as_tensor_variable(choices)
choice_ndim = choice.ndim - 1
choice_bcast = choice.broadcastable[1:]
out_ndim = numpy.max([a.ndim, choice_ndim])
# Make explicit all added broadcastable dimensions.
a = shape_padleft(a, out_ndim - a.ndim)
if len(choice_bcast) != out_ndim:
if isinstance(choice.type, TensorType):
choice = choice.dimshuffle(0,
*(('x',) * (out_ndim - choice_ndim) +
tuple(range(1, choice.ndim))))
choice_ndim = choice.ndim - 1
choice_bcast = choice.broadcastable[1:]
else:
raise NotImplementedError(
"We currently didn't implemented that case. "
"To make it work, explicitly add dimensions "
"of size one for dimensions that will be broadcasted")
bcast = [False] * out_ndim
for idx, (b1, b2) in enumerate(
zip(a.broadcastable,
(True,) * (out_ndim - choice_ndim) + choice_bcast)):
if b1 and b2:
bcast[idx] = True
o = TensorType(choice.dtype, bcast)
return Apply(self, [a, choice], [o()])
def perform(self, node, inputs, outputs):
(z,) = outputs
a = inputs[0]
choice = inputs[1]
# TODO reuse out?
z[0] = numpy.choose(a, choice, mode=self.mode)
class AllocEmpty(gof.Op):
"""Implement Alloc on the cpu, but without initializing memory."""
__props__ = ("dtype",)
# specify the type of the data
def __init__(self, dtype):
assert isinstance(dtype, str)
self.dtype = dtype.lower()
def validate_shape(self, shape):
sh = [as_tensor_variable(s) for s in shape]
bcast = []
for s in sh:
if s.type.dtype[:3] not in ('int', 'uin'):
raise TypeError('Shape arguments must be integers', s)
# if s is constant 1, then we're broadcastable in that dim
try:
const_shp = get_scalar_constant_value(s)
except NotScalarConstantError:
const_shp = None
bcast.append(numpy.all(1 == const_shp))
otype = TensorType(dtype=self.dtype, broadcastable=bcast)
output = otype()
return sh, output
def make_node(self, *shape):
shape, output = self.validate_shape(shape)
output.tag.values_eq_approx = values_eq_approx_always_true
# The outut can contain nan/inf. output.type is a new
# instance, so we can do this only for that variable.
output.type.filter_checks_isfinite = False
return Apply(self, shape, [output])
def perform(self, node, inputs, out_):
out, = out_
sh = tuple([int(i) for i in inputs])
if out[0] is None or out[0].shape != sh:
out[0] = numpy.empty(sh, dtype=self.dtype)
def c_code(self, node, name, inputs, out_, sub):
dtype = "NPY_" + self.dtype.upper()
out, = out_
fail = sub['fail']
shps = inputs
nd = len(shps)
str = "npy_intp dims[%(nd)s];\n" % locals()
for idx, sh in enumerate(shps):
str += "dims[%(idx)s] =" \
"((npy_intp)((dtype_%(sh)s*)" \
" PyArray_DATA(%(sh)s))[0]);\n" % locals()
# Validate that the output storage exists
str += "if(%(out)s==NULL\n" % locals()
for idx, sh in enumerate(shps):
str += "||PyArray_DIMS(%(out)s)[%(idx)s]!=dims[%(idx)s]" % locals()
str += """){
/* Reference received to invalid output variable.
Decrease received reference's ref count and allocate new
output variable */
Py_XDECREF(%(out)s);
%(out)s = (PyArrayObject*)PyArray_EMPTY(%(nd)s,
dims,
%(dtype)s,
0);
if (!%(out)s)
{
PyErr_SetString(PyExc_MemoryError, "alloc failed");
%(fail)s;
}
}
""" % locals()
return str
def infer_shape(self, node, input_shapes):
return [node.inputs]
def c_code_cache_version(self):
return (3,)
def do_constant_folding(self, node):
return False
| {
"repo_name": "nke001/attention-lvcsr",
"path": "libs/Theano/theano/tensor/basic.py",
"copies": "1",
"size": "197364",
"license": "mit",
"hash": -2563540603326548500,
"line_mean": 33.6678376954,
"line_max": 121,
"alpha_frac": 0.5703927768,
"autogenerated": false,
"ratio": 3.8112194650960705,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9880600849280631,
"avg_score": 0.00020227852308795453,
"num_lines": 5693
} |
""" a type of plots used so frequently that I think they merit their own utility """
__author__ = 'Tom Schaul, tom@idsia.ch'
import pylab
from pylab import xlabel, ylabel, legend, plot, semilogy
from scipy import array, zeros
plotsymbols = ['-', ':', '-.']
psymbol = '-'
def plotFitnessProgession(fitdict, batchsize = 1, show = True, semilog = True,
targetcutoff = 1e-10, minimize = True,
onlysuccessful = True,
title = None, verbose = True,
varyplotsymbols = False):
""" Plot multiple fitness curves on a single figure, with the following customizations:
@param fitdict: a dictionnary mapping a name to a list of fitness-arrays
@param batchsize: the number of evaluations between two points in fitness-arrays
specific batchsizes can also be given given in fitdict
@param targetcutoff: this gives the cutoff point at the best fitness
@param onlysuccessful: ignore the runs that did not hit the target
@param title: specify a title.
@param varyplotsymbols: used different line types for each curve.
"""
def isSuccessful(l):
""" criterion for successful run """
if targetcutoff == None:
return True
elif minimize:
return min(l) <= targetcutoff
else:
return max(l) >= targetcutoff
def paddedClipped(l, maxLen):
assert len(l) <= maxLen
res = zeros(maxLen)
if targetcutoff == None:
res[:len(l)] += l
elif minimize:
res[:len(l)] += l.clip(min = targetcutoff, max = 1e100)
else:
res[:len(l)] += l.clip(max = targetcutoff, min = -1e100)
return res
def relevantPart(l):
""" the part of the vector that's above the cutoff. """
if targetcutoff != None:
for i, val in enumerate(l):
if minimize and val <= targetcutoff:
return l[:i+1]
elif not minimize and val >= targetcutoff:
return l[:i+1]
return l
i = 0
for name, flist in sorted(fitdict.items()):
if isinstance(flist, tuple):
batchsize = flist[1]
flist = flist[0]
i += 1
nbRuns = len(flist)
print name, nbRuns, 'runs',
if targetcutoff != None and onlysuccessful:
# filter out unsuccessful runs
flist = filter(isSuccessful, flist)
print ',', len(flist), 'of which were successful.'
if len(flist) == 0:
continue
flist = map(relevantPart, flist)
else:
print
longestRun = max(map(len, flist))
xAxis = array(range(longestRun))*batchsize
summed = zeros(longestRun)
for l in flist:
summed += paddedClipped(l, longestRun)
yPlot = paddedClipped(summed / len(flist), longestRun)
if semilog:
semilogy()
if varyplotsymbols:
psymbol = plotsymbols[i%len(plotsymbols)]
else:
psymbol = '-'
plot(xAxis, yPlot, psymbol, label = name)
ylabel('-fitness')
xlabel('number of evaluations')
pylab.title(title)
legend()
if show:
pylab.show()
| {
"repo_name": "daanwierstra/pybrain",
"path": "pybrain/tools/plotting/fitnessprogression.py",
"copies": "1",
"size": "3513",
"license": "bsd-3-clause",
"hash": 639755814044554500,
"line_mean": 32.1509433962,
"line_max": 91,
"alpha_frac": 0.5337318531,
"autogenerated": false,
"ratio": 4.305147058823529,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.03292505492020654,
"num_lines": 106
} |
# A type representing an image - this wraps the underlying C++ image type
# exposed via fract4dmodule and provides some higher-level options around it
import os
try:
import fract4dcgmp as fract4dc
except ImportError, err:
import fract4dc
file_types = {
".jpg" : fract4dc.FILE_TYPE_JPG,
".jpeg" : fract4dc.FILE_TYPE_JPG,
".png" : fract4dc.FILE_TYPE_PNG,
".tga" :fract4dc.FILE_TYPE_TGA
}
def file_matches():
return [ "*" + x for x in file_types.keys()]
class T:
FATE_SIZE = 4
COL_SIZE = 3
SOLID = 128
OUT=0
IN=32 | SOLID # in pixels have solid bit set
UNKNOWN=255
BLACK=[0,0,0]
WHITE=[255,255,255]
def __init__(self,xsize,ysize,txsize=-1,tysize=-1):
self._img = fract4dc.image_create(xsize,ysize,txsize, tysize)
self.update_bufs()
self.writer = None
self.fp = None
def get_xsize(self):
return self.get_dim(fract4dc.IMAGE_WIDTH)
def get_ysize(self):
return self.get_dim(fract4dc.IMAGE_HEIGHT)
def get_total_xsize(self):
return self.get_dim(fract4dc.IMAGE_TOTAL_WIDTH)
def get_total_ysize(self):
return self.get_dim(fract4dc.IMAGE_TOTAL_HEIGHT)
def get_xoffset(self):
return self.get_dim(fract4dc.IMAGE_XOFFSET)
def get_yoffset(self):
return self.get_dim(fract4dc.IMAGE_YOFFSET)
def get_dim(self,dim):
return fract4dc.image_dims(self._img)[dim]
xsize = property(get_xsize)
ysize = property(get_ysize)
total_xsize = property(get_total_xsize)
total_ysize = property(get_total_ysize)
xoffset = property(get_xoffset)
yoffset = property(get_yoffset)
def get_suggest_string(self):
k = file_types.keys()
k.sort()
available_types = ", ".join(k).upper()
suggest_string = "Please use one of: " + available_types
return suggest_string
def lookup(self,x,y):
return fract4dc.image_lookup(self._img,x,y)
def file_type(self,name):
ext = os.path.splitext(name)[1]
if ext == "":
raise ValueError(
"No file extension in '%s'. Can't determine file format. %s" %\
(name, self.get_suggest_string()))
type = file_types.get(ext.lower(), None)
if type == None:
raise ValueError(
"Unsupported file format '%s'. %s" % \
(ext, self.get_suggest_string()))
return type
def save(self,name):
self.start_save(name)
self.save_tile()
self.finish_save()
def load(self,name):
type = self.file_type(name)
fp = open(name,"rb")
fract4dc.image_read(self._img, fp,type)
def start_save(self,name):
ft = self.file_type(name)
try:
self.fp = open(name, "wb")
except IOError, err:
raise IOError("Unable to save image to '%s' : %s" % (name,err.strerror))
self.writer = fract4dc.image_writer_create(self._img, self.fp, ft)
fract4dc.image_save_header(self.writer)
return file
def save_tile(self):
if None == self.writer:
return
fract4dc.image_save_tile(self.writer)
def finish_save(self):
fract4dc.image_save_footer(self.writer)
self.fp.close()
self.fp = None
self.writer = None
def get_tile_list(self):
x = 0
y = 0
base_xres = self.xsize
base_yres = self.ysize
tiles = []
while y < self.total_ysize:
while x < self.total_xsize:
w = min(base_xres, self.total_xsize - x)
h = min(base_yres, self.total_ysize - y)
tiles.append((x,y,w,h))
x += base_xres
y += base_yres
x = 0
return tiles
def set_offset(self,x,y):
fract4dc.image_set_offset(self._img,x,y)
def update_bufs(self):
self.fate_buf = fract4dc.image_fate_buffer(self._img,0,0)
self.image_buf = fract4dc.image_buffer(self._img,0,0)
def resize_full(self,x,y):
fract4dc.image_resize(self._img, x, y,x,y)
self.update_bufs()
def resize_tile(self,x,y):
dims = fract4dc.image_dims(self._img)
fract4dc.image_resize(
self._img, x, y,
dims[fract4dc.IMAGE_TOTAL_WIDTH],
dims[fract4dc.IMAGE_TOTAL_HEIGHT])
def clear(self):
fract4dc.image_clear(self._img)
def pos(self,x,y,size):
return size * (y * self.xsize + x)
def fate_buffer(self,x=0,y=0):
return fract4dc.image_fate_buffer(self._img, x, y)
def image_buffer(self,x=0,y=0):
return fract4dc.image_buffer(self._img, x, y)
def get_fate(self,x,y):
n = ord(self.fate_buf[self.pos(x,y,T.FATE_SIZE)])
if n == T.UNKNOWN:
return None
elif n & T.SOLID:
is_solid = True
else:
is_solid = False
fate = n & ~T.SOLID
return (is_solid, fate)
def get_all_fates(self,x,y):
pos = self.pos(x,y,T.FATE_SIZE)
return map(ord,list(self.fate_buf[pos:pos+T.FATE_SIZE]))
def get_color(self,x,y):
pos = self.pos(x,y,T.COL_SIZE)
return map(ord,list(self.image_buf[pos:pos+T.COL_SIZE]))
def get_color_index(self,x,y,sub=0):
return fract4dc.image_get_color_index(self._img,x,y,sub)
def serialize(self):
return ""
| {
"repo_name": "ericchill/gnofract4d",
"path": "fract4d/image.py",
"copies": "1",
"size": "5511",
"license": "bsd-3-clause",
"hash": -3900973267689917400,
"line_mean": 28.1587301587,
"line_max": 84,
"alpha_frac": 0.5646888042,
"autogenerated": false,
"ratio": 3.136596471257826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42012852754578256,
"avg_score": null,
"num_lines": null
} |
'''Atypes is built on top of multimethods, which is in turn built on top of atypes.
To resolve this cyclic dependencies, we use Pythons type system to create
a low-functionality (cold) atypes-like systems.
The cold atypes can then be used to construct a cold multimethods as used in
the boostraping process.
'''
from __future__ import absolute_import
from functools import partial
from ..func import identity, noop, compose
from ..atypes.common import worst_score, best_score, no_score, atypes_multimethods_interface
__all__ = atypes_multimethods_interface
def typep(op, tp):
return isinstance(op, tp)
class Type(object):
def __init__(self, tps):
self.tps = tps
def as_optimized_type(tp):
return Type(set(tp)) if isinstance(tp, tuple) else Type(set([tp]))
def type_name(tp):
if len(tp.tps)==1:
return iter(tp.tps).next().__name__
return '(%s)' % ','.join(x.__name__ for x in tp.tps)
def compose_types_scorer(tps):
return type, [partial(score_type,tp) for tp in tps]
def score_type(tp, key):
acc = no_score
for x in tp.tps:
try:
score = key.mro().index(x)
except ValueError:
continue
except TypeError:
score = best_score if issubclass(x, key) else no_score
if acc is no_score:
acc = score
else:
acc = min(acc, score)
return acc
| {
"repo_name": "matthagy/Jamenson",
"path": "jamenson/runtime/atypes/cold.py",
"copies": "1",
"size": "1402",
"license": "apache-2.0",
"hash": 5627593077249432000,
"line_mean": 27.612244898,
"line_max": 92,
"alpha_frac": 0.6419400856,
"autogenerated": false,
"ratio": 3.4362745098039214,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45782145954039216,
"avg_score": null,
"num_lines": null
} |
a = "\u0030" #unicode for 0
b = "\u00B2" #unicode for ²
c = "10km2"
print(a.isnumeric())
print(b.isnumeric())
print(c.isnumeric())
txt = "565543"
x = txt.isnumeric()
print(x)
a = "Hello world!"
b = "hello 123"
c = "mynameisPeter"
print(a.islower())
print(b.islower())
print(c.islower())
txt = "hello world!"
x = txt.islower()
print(x)
a = "MyFolder"
b = "Demo002"
c = "2bring"
d = "my demo"
print(a.isidentifier())
print(b.isidentifier())
print(c.isidentifier())
print(d.isidentifier())
txt = "Demo"
x = txt.isidentifier()
print(x)
a = "\u0030" #unicode for 0
b = "\u00B2" #unicode for 2
print(a.isdigit())
print(b.isdigit())
txt = "50800"
x = txt.isdigit()
print(x)
a = "\u0030" #unicode for 0
b = "\u0047" #unicode for G
print(a.isdecimal())
print(b.isdecimal())
txt = "\u0033" #unicode for 3
x = txt.isdecimal()
print(x)
txt = "Company10"
x = txt.isalpha()
print(x)
txt = "CompanyX"
x = txt.isalpha()
print(x)
txt = "Company 12"
x = txt.isalnum()
print(x)
txt = "Company12"
x = txt.isalnum()
print(x)
txt = "Hello, welcome to my world."
print(txt.find("q"))
# print(txt.index("q"))
txt = "Hello, welcome to my world."
x = txt.index("e", 5, 10)
print(x)
txt = "Hello, welcome to my world."
x = txt.index("e")
print(x)
txt = "Hello, welcome to my world."
x = txt.index("welcome")
print(x)
#Use "%" to convert the number into a percentage format:
txt = "You scored {:%}"
print(txt.format(0.25))
#Or, without any decimals:
txt = "You scored {:.0%}"
print(txt.format(0.25))
#Use "X" to convert the number into upper-case Hex format:
txt = "The Hexadecimal version of {0} is {0:X}"
print(txt.format(255))
#Use "x" to convert the number into Hex format:
txt = "The Hexadecimal version of {0} is {0:x}"
print(txt.format(255))
#Use "o" to convert the number into octal format:
txt = "The octal version of {0} is {0:o}"
print(txt.format(10))
#Use "G" to convert the number into upper-case general format:
txt = "The general version of {0} is {0:G}"
print(txt.format(211e11))
#Use "g" to convert the number into general format:
txt = "The general version of {0} is {0:g}"
print(txt.format(211E11))
#Use "F" to convert a number into a fixed point number, but display inf and nan as INF and NAN:
x = float('inf')
txt = "The price is {:F} dollars."
print(txt.format(x))
#same example, but with a lower case f:
txt = "The price is {:f} dollars."
print(txt.format(x))
#Use "f" to convert a number into a fixed point number, default with 6 decimals, but use a period followed by a number to specify the number of decimals:
txt = "The price is {:.2f} dollars."
print(txt.format(45))
#without the ".2" inside the placeholder, this number will be displayed like this:
txt = "The price is {:f} dollars."
print(txt.format(45))
#Use "E" to convert a number into scientific number format (with an upper-case E):
txt = "We have {:E} chickens."
print(txt.format(5))
#Use "e" to convert a number into scientific number format (with a lower-case e):
txt = "We have {:e} chickens."
print(txt.format(5))
#Use "d" to convert a number, in this case a binary number, into decimal number format:
txt = "We have {:d} chickens."
print(txt.format(0b101))
#Use "b" to convert the number into binary format:
txt = "The binary version of {0} is {0:b}"
print(txt.format(5))
#Use "_" to add a underscore character as a thousand separator:
txt = "The universe is {:_} years old."
print(txt.format(13800000000))
#Use "," to add a comma as a thousand separator:
txt = "The universe is {:,} years old."
print(txt.format(13800000000))
#Use " " (a space) to insert a space before positive numbers and a minus sign before negative numbers:
txt = "The temperature is between {: } and {: } degrees celsius."
print(txt.format(-3, 7))
#Use "-" to always indicate if the number is negative (positive numbers are displayed without any sign):
txt = "The temperature is between {:-} and {:-} degrees celsius."
print(txt.format(-3, 7))
#Use "+" to always indicate if the number is positive or negative:
txt = "The temperature is between {:+} and {:+} degrees celsius."
print(txt.format(-3, 7))
#To demonstrate, we insert the number 8 to specify the available space for the value.
#Use "=" to place the plus/minus sign at the left most position:
txt = "The temperature is {:=8} degrees celsius."
print(txt.format(-5))
#To demonstrate, we insert the number 8 to set the available space for the value to 8 characters.
#Use "^" to center-align the value:
txt = "We have {:^8} chickens."
print(txt.format(49))
#To demonstrate, we insert the number 8 to set the available space for the value to 8 characters.
#Use ">" to right-align the value:
txt = "We have {:>8} chickens."
print(txt.format(49))
#To demonstrate, we insert the number 8 to set the available space for the value to 8 characters.
#Use "<" to left-align the value:
txt = "We have {:<8} chickens."
print(txt.format(49))
#named indexes:
txt1 = "My name is {fname}, I'm {age}".format(fname = "John", age = 36)
#numbered indexes:
txt2 = "My name is {0}, I'm {1}".format("John",36)
#empty placeholders:
txt3 = "My name is {}, I'm {}".format("John",36)
print(txt1)
print(txt2)
print(txt3)
txt = "For only {price:.2f} dollars!"
print(txt.format(price = 49))
txt = "Hello, welcome to my world."
print(txt.find("q"))
#print(txt.index("q"))
txt = "Hello, welcome to my world."
x = txt.find("e", 5, 10)
print(x)
txt = "Hello, welcome to my world."
x = txt.find("e")
print(x)
txt = "Hello, welcome to my world."
x = txt.find("welcome")
print(x) | {
"repo_name": "antalpeti/Python-Tutorial",
"path": "src/w3s_pg_004.py",
"copies": "1",
"size": "5610",
"license": "mit",
"hash": 2956738694532142600,
"line_mean": 15.6994047619,
"line_max": 153,
"alpha_frac": 0.6629233512,
"autogenerated": false,
"ratio": 2.764908822079842,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3927832173279842,
"avg_score": null,
"num_lines": null
} |
# Añadimos la raqueta de la CPU que sigue la pelota.
import pygame
from pong_v0 import SharedState
from pong_v1 import Pong_v1
import lib.colors as Color
WIDTH = 0
HEIGHT = 1
CPU_SPEED = 0.9
class CPURacket(pygame.sprite.Sprite):
def __init__(self,
color,
width,
height,
display_size):
super().__init__()
self.color = color
self.width = width
self.height = height
self.display_size = display_size
self.image = pygame.Surface([width, height])
self.image.fill(Color.black)
self.image.set_colorkey(Color.black)
self.rect = self.image.get_rect()
pygame.draw.rect(self.image,
color,
[self.rect.x, self.rect.y, width, height])
self.rect.x = 300
self.rect.y = 20
self.prev_ball_position = 0
self.motion = 0
def update(self):
self.motion = SharedState.ball_x_coor - self.prev_ball_position
self.prev_ball_position = SharedState.ball_x_coor
self.rect.x = CPU_SPEED*SharedState.ball_x_coor - self.width//2
#self.rect.y += self.display_size[1] - 10
if (self.rect.x + self.width) > self.display_size[0]:
self.rect.x = self.display_size[0] - self.width
elif self.rect.x < 0:
self.rect.x = 0
SharedState.CPU_motion += abs(self.motion)
class Pong_v2(Pong_v1):
def __init__(self,
width = 800,
height = 600,
caption = "A version of Pong"):
super().__init__(width, height, caption)
self.racket_width = 128
self.racket_height = 2
self.racket_color = Color.green
self.CPU_racket = CPURacket(
color = self.racket_color,
width = self.racket_width,
height = self.racket_height,
display_size = self.display_size)
self.all_sprites_list.add(self.CPU_racket)
def update_model(self):
super().update_model()
if pygame.sprite.collide_mask(self.ball, self.CPU_racket):
#self.ball.vertical_rebound()
distance_to_the_racket_center = self.player_racket.rect.x - self.ball.rect.x + 64 - 16
angle = distance_to_the_racket_center / 10
self.ball.x_direction_step = -angle
self.ball.y_direction_step = -self.ball.y_direction_step
if __name__ == "__main__":
display = Pong_v2()
display.run()
| {
"repo_name": "vicente-gonzalez-ruiz/YAPT",
"path": "workshops/programacion_python_ESO/pong_v2.py",
"copies": "1",
"size": "2575",
"license": "cc0-1.0",
"hash": -922051741333786000,
"line_mean": 31.175,
"line_max": 98,
"alpha_frac": 0.5516705517,
"autogenerated": false,
"ratio": 3.521203830369357,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45728743820693574,
"avg_score": null,
"num_lines": null
} |
# Añadimos la raqueta del juador que se mueve con el ratón.
import pygame
from pong_v0 import Pong_v0
import lib.colors as Color
WIDTH = 0
HEIGHT = 1
class PlayerRacket(pygame.sprite.Sprite):
def __init__(self,
color,
width,
height,
display_size):
super().__init__()
self.color = color
self.width = width
self.height = height
self.display_size = display_size
self.image = pygame.Surface([width, height])
self.image.fill(Color.black)
self.image.set_colorkey(Color.black)
self.rect = self.image.get_rect()
pygame.draw.rect(self.image,
color,
[self.rect.x, self.rect.y, width, height])
self.rect.x = pygame.mouse.get_rel()[0]
self.rect.y = self.display_size[1] - 20
def update(self):
self.motion = pygame.mouse.get_rel()[0]
self.rect.x += self.motion
#self.rect.y += self.display_size[1] - 10
if (self.rect.x + self.width) > self.display_size[0]:
self.rect.x = self.display_size[0] - self.width
elif self.rect.x < 0:
self.rect.x = 0
class Pong_v1(Pong_v0):
def __init__(self,
width = 800,
height = 600,
caption = "A version of Pong"):
super().__init__(width, height, caption)
self.racket_width = 128
self.racket_height = 2
self.racket_color = Color.green
self.player_racket = PlayerRacket(
color = self.racket_color,
width = self.racket_width,
height = self.racket_height,
display_size = self.display_size)
self.all_sprites_list.add(self.player_racket)
def update_model(self):
super().update_model()
if pygame.sprite.collide_mask(self.ball, self.player_racket):
#self.ball.vertical_rebound()
distance_to_the_racket_center = self.player_racket.rect.x - self.ball.rect.x + 64 - 16
angle = distance_to_the_racket_center / 10
self.ball.x_direction_step = -angle
self.ball.y_direction_step = -self.ball.y_direction_step
if __name__ == "__main__":
display = Pong_v1()
display.run()
| {
"repo_name": "vicente-gonzalez-ruiz/YAPT",
"path": "workshops/programacion_python_ESO/pong_v1.py",
"copies": "1",
"size": "2364",
"license": "cc0-1.0",
"hash": 8445700108988196000,
"line_mean": 31.3561643836,
"line_max": 98,
"alpha_frac": 0.5419136325,
"autogenerated": false,
"ratio": 3.56797583081571,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.460988946331571,
"avg_score": null,
"num_lines": null
} |
# Añadimos las raquetas del jugador y de la CPU. El jugador usa el
# ratón y la CPU sigue la pelota.
import pygame
from Pong_v0 import Pong_v0
import lib.colors as Color
import random
CPU_DIFFICULTY = 4 # 2 is more difficult than 1
class PlayerRacket(pygame.sprite.Sprite):
def __init__(self, color, width, height, display_size):
super().__init__()
self.color = color
self.width = width
self.height = height
self.display_size = display_size
self.image = pygame.Surface([width, height])
self.image.fill(Color.black)
self.image.set_colorkey(Color.black)
self.rect = self.image.get_rect()
pygame.draw.rect(self.image,
color,
[self.rect.x, self.rect.y, width, height])
self.rect.x = pygame.mouse.get_rel()[0]
self.rect.y = self.display_size[1] - 20
def update(self):
self.motion = pygame.mouse.get_rel()[0]
self.rect.x += self.motion
class CPURacket(pygame.sprite.Sprite):
def __init__(self, color, width, height, display_size, ball):
super().__init__()
self.color = color
self.width = width
self.height = height
self.display_size = display_size
self.image = pygame.Surface([width, height])
self.image.fill(Color.black)
self.image.set_colorkey(Color.black)
self.rect = self.image.get_rect()
pygame.draw.rect(self.image,
color,
[self.rect.x, self.rect.y, width, height])
self.rect.x = 300
self.rect.y = 20
self.prev_ball_position = 0
self.motion = 0
self.ball = ball
self.speed = 1.0
def update(self):
#self.motion = self.ball.rect.x - self.prev_ball_position
self.prev_ball_position = self.ball.rect.x
self.rect.x = self.ball.rect.x * self.speed - self.width//2
class Pong_v1(Pong_v0):
def __init__(self,
width = 800,
height = 600,
caption = "A version of Pong"):
super().__init__(width, height, caption)
self.racket_sound = pygame.mixer.Sound(file="243749__unfa__metronome-1khz-weak-pulse.wav")
self.racket_width = 128
self.racket_height = 2
self.racket_color = Color.green
self.player_racket = PlayerRacket(
color = self.racket_color,
width = self.racket_width,
height = self.racket_height,
display_size = self.display_size)
self.all_sprites_list.add(self.player_racket)
self.CPU_racket = CPURacket(
color = self.racket_color,
width = self.racket_width,
height = self.racket_height,
display_size = self.display_size,
ball = self.ball)
self.all_sprites_list.add(self.CPU_racket)
pygame.event.set_grab(True)
pygame.mouse.set_visible(False)
def compute_reflection(self):
distance_to_the_racket_center = self.player_racket.rect.x - self.ball.rect.x + 64 - 16
angle = distance_to_the_racket_center / 5
angle += random.randrange(-3, 3)
if angle > 8:
angle = 8
elif angle < -8:
angle = -8
return angle
def update_model(self):
super().update_model()
if pygame.sprite.collide_mask(self.CPU_racket, self.left_wall):
if self.CPU_racket.rect.x < 0:
self.CPU_racket.rect.x = 1
if pygame.sprite.collide_mask(self.CPU_racket, self.right_wall):
if (self.CPU_racket.rect.x + 128) > self.display_size[1]:
self.CPU_racket.rect.x = self.display_size[0] - 128
if pygame.sprite.collide_mask(self.ball, self.CPU_racket):
self.racket_sound.play()
self.CPU_racket.speed = 1 + (random.random() - 0.5)/CPU_DIFFICULTY
print(self.CPU_racket.speed)
angle = self.compute_reflection()
self.ball.x_direction_step = -angle
self.ball.y_direction_step = -self.ball.y_direction_step
if pygame.sprite.collide_mask(self.player_racket, self.left_wall):
if self.player_racket.rect.x < 1:
self.player_racket.rect.x = 1
if pygame.sprite.collide_mask(self.player_racket, self.right_wall):
if (self.player_racket.rect.x + 128) > self.display_size[1] - 1:
self.player_racket.rect.x = self.display_size[0] - 128 - 1
if pygame.sprite.collide_mask(self.ball, self.player_racket):
self.racket_sound.play()
angle = self.compute_reflection()
#self.ball.vertical_rebound()
self.ball.x_direction_step = -angle
self.ball.y_direction_step = -self.ball.y_direction_step
if __name__ == "__main__":
display = Pong_v1()
try:
display.run()
finally:
# Importante si por alguna razón pygame.quit() no fuera
# llamado antes. Ver:
# https://stackoverflow.com/questions/51901008/pygame-event-set-grab-remains-turned-on-after-exception-crash-and-makes-progra
pygame.quit()
| {
"repo_name": "vicente-gonzalez-ruiz/YAPT",
"path": "workshops/programacion_python_ESO/Pong_v1.py",
"copies": "1",
"size": "5285",
"license": "cc0-1.0",
"hash": -6917286210587525000,
"line_mean": 35.1780821918,
"line_max": 133,
"alpha_frac": 0.5721317683,
"autogenerated": false,
"ratio": 3.4613368283093053,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9496443829168155,
"avg_score": 0.007404953488229963,
"num_lines": 146
} |
# Añadimos puntuaciones.
import pygame
from Pong_v1 import Pong_v1
import lib.colors as Color
class Pong_v2(Pong_v1):
def __init__(self,
width = 800,
height = 600,
caption = "A version of Pong"):
super().__init__(width, height, caption)
self.player_score = 0
self.CPU_score = 0
def ball_hits_upper_wall(self):
super().ball_hits_upper_wall()
self.player_score += 1
def ball_hits_lower_wall(self):
super().ball_hits_lower_wall()
self.CPU_score += 1
def draw_frame(self):
super().draw_frame()
#self.ball.CPU_score -= BallPosition.CPU_motion
#self.ball.player_score -= BallPosition.player_motion
font = pygame.font.Font(None, 74)
text = font.render(str(int(self.CPU_score)), 1, Color.red)
self.display.blit(text, (10, 20))
text = font.render(str(int(self.player_score)), 1, Color.red)
self.display.blit(text, (10, self.display_size[1] - 60))
if __name__ == "__main__":
display = Pong_v2()
display.run()
| {
"repo_name": "vicente-gonzalez-ruiz/YAPT",
"path": "workshops/programacion_python_ESO/Pong_v2.py",
"copies": "1",
"size": "1124",
"license": "cc0-1.0",
"hash": -4687826936247811000,
"line_mean": 27.7948717949,
"line_max": 69,
"alpha_frac": 0.5636687444,
"autogenerated": false,
"ratio": 3.312684365781711,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4376353110181711,
"avg_score": null,
"num_lines": null
} |
# Demo of running a half-duplex protocol to a device. The device never sends
# unsolicited messages. An example is a communications device which responds
# to AT commands.
# The master sends a message to the device, which may respond with one or more
# lines of data. The master assumes that the device has sent all its data when
# a timeout has elapsed.
# In this test a physical device is emulated by the DEVICE class
# To test link X1-X4 and X2-X3
from pyb import UART
import uasyncio as asyncio
import aswitch
# Dummy device waits for any incoming line and responds with 4 lines at 1 second
# intervals.
class DEVICE():
def __init__(self, uart_no = 4):
self.uart = UART(uart_no, 9600)
self.loop = asyncio.get_event_loop()
self.swriter = asyncio.StreamWriter(self.uart, {})
self.sreader = asyncio.StreamReader(self.uart)
loop = asyncio.get_event_loop()
loop.create_task(self._run())
async def _run(self):
responses = ['Line 1', 'Line 2', 'Line 3', 'Goodbye']
while True:
res = await self.sreader.readline()
for response in responses:
await self.swriter.awrite("{}\r\n".format(response))
# Demo the fact that the master tolerates slow response.
await asyncio.sleep_ms(300)
# The master's send_command() method sends a command and waits for a number of
# lines from the device. The end of the process is signified by a timeout, when
# a list of lines is returned. This allows line-by-line processing.
# A special test mode demonstrates the behaviour with a non-responding device. If
# None is passed, no commend is sent. The master waits for a response which never
# arrives and returns an empty list.
class MASTER():
def __init__(self, uart_no = 2, timeout=4000):
self.uart = UART(uart_no, 9600)
self.timeout = timeout
self.loop = asyncio.get_event_loop()
self.swriter = asyncio.StreamWriter(self.uart, {})
self.sreader = asyncio.StreamReader(self.uart)
self.delay = aswitch.Delay_ms()
self.response = []
loop = asyncio.get_event_loop()
loop.create_task(self._recv())
async def _recv(self):
while True:
res = await self.sreader.readline()
self.response.append(res) # Append to list of lines
self.delay.trigger(self.timeout) # Got something, retrigger timer
async def send_command(self, command):
self.response = [] # Discard any pending messages
if command is None:
print('Timeout test.')
else:
await self.swriter.awrite("{}\r\n".format(command))
print('Command sent:', command)
self.delay.trigger(self.timeout) # Re-initialise timer
while self.delay.running():
await asyncio.sleep(1) # Wait for 4s after last msg received
return self.response
async def test():
print('This test takes 10s to complete.')
for cmd in ['Run', None]:
print()
res = await master.send_command(cmd)
# can use b''.join(res) if a single string is required.
if res:
print('Result is:')
for line in res:
print(line.decode('UTF8'), end='')
else:
print('Timed out waiting for result.')
loop = asyncio.get_event_loop()
master = MASTER()
device = DEVICE()
loop.run_until_complete(test())
# Expected output
# >>> import auart_hd
# This test takes 10s to complete.
#
# Command sent: Run
# Result is:
# Line 1
# Line 2
# Line 3
# Goodbye
#
# Timeout test.
# Timed out waiting for result.
# >>>
| {
"repo_name": "peterhinch/micropython-async",
"path": "v2/auart_hd.py",
"copies": "1",
"size": "3742",
"license": "mit",
"hash": -6905485254207354000,
"line_mean": 34.3018867925,
"line_max": 81,
"alpha_frac": 0.6421699626,
"autogenerated": false,
"ratio": 3.7608040201005024,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9881586324131821,
"avg_score": 0.004277531713736292,
"num_lines": 106
} |
""" Auction completion task runner """
from celery.signals import task_postrun
from datetime import datetime
from penelophant import crud, celery
from penelophant.database import db
from penelophant.models.Auction import Auction as Auction_model
from penelophant.models.Invoice import Invoice as Invoice_model
@celery.task()
def auction_completion():
""" Check if auction is complete
If it is, generate an invoice.
"""
session = db.session
# get all completed auctions
auctions = session.query(Auction_model)\
.filter(Auction_model.end_time < datetime.utcnow())
for auction in auctions:
inv = Invoice_model()
inv.bid, inv.amount = auction.find_winner()
inv.payer = inv.bid.user
inv.payee = auction.creator
# add the invoice if it does not exist
if session.query(Invoice_model).filter(Invoice_model.bid_id == inv.bid_id).count() == 0:
crud.add(inv)
return
@task_postrun.connect
def close_session(*args, **kwargs):
"""Flask SQLAlchemy will automatically create new sessions for you from
a scoped session factory, given that we are maintaining the same app
context, this ensures tasks have a fresh session (e.g. session errors
won't propagate across tasks)"""
db.session.remove()
| {
"repo_name": "kevinoconnor7/penelophant",
"path": "penelophant/tasks.py",
"copies": "1",
"size": "1245",
"license": "apache-2.0",
"hash": 2045860935003055600,
"line_mean": 29.3658536585,
"line_max": 92,
"alpha_frac": 0.7333333333,
"autogenerated": false,
"ratio": 3.619186046511628,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9666454075726031,
"avg_score": 0.03721306081711931,
"num_lines": 41
} |
""" Auction helper functions """
from penelophant.models import Auction
from penelophant.models import auctions
from penelophant.database import db
from penelophant.models.Auction import Auction
from flask_restful import abort
def find_subclasses(module, clazz):
""" Return all the classes in a given module """
for name in dir(module):
o = getattr(module, name)
try:
if (o != clazz) and issubclass(o, clazz):
yield name, o
except TypeError:
pass
def find_auction_type(typename):
""" Return the proper concrete class for an auction type """
for dummy, clazz in find_subclasses(auctions, Auction):
if clazz.__type__ == typename:
return clazz
return None
def get_all_auction_types():
""" Get a list of all auction types """
types = []
for dummy, clazz in find_subclasses(auctions, Auction):
types.append(clazz)
return types
def get_auction_by_id(auction_id):
""" Get a auction by a given id """
auction_id = int(auction_id)
session = db.session
auction = session.query(Auction).get(auction_id)
return auction
def get_auction_by_id_or_abort(auction_id):
""" Attempt to get auction by id, or abort if it goes south """
auction = get_auction_by_id(auction_id)
if not auction:
abort(404, message="Auction {} doesn't exist".format(auction))
return auction
| {
"repo_name": "kevinoconnor7/penelophant",
"path": "penelophant/helpers/auction.py",
"copies": "1",
"size": "1340",
"license": "apache-2.0",
"hash": -4066881796684643000,
"line_mean": 29.4545454545,
"line_max": 66,
"alpha_frac": 0.6970149254,
"autogenerated": false,
"ratio": 3.325062034739454,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9409816917409615,
"avg_score": 0.022452008545967846,
"num_lines": 44
} |
import suds
# Auction's iPay API endpoint
api_base = 'https://api.auction.co.kr/ArcheSystem/IpayService.asmx?wsdl'
class AddAttributePlugin(suds.plugin.MessagePlugin):
"""Plugin to add the item_option_name attribute to the IpayServiceItems."""
def marshalled(self, context):
body = context.envelope.getChild('Body')
if body[0].name == 'InsertIpayOrder':
for item in body[0][1]:
if item.get('item_option_name') is None:
# Then set an empty item_option_name
item.set('item_option_name', '')
class IpayAPI(object):
"""A client for the Auction iPay API."""
def __init__(self, seller_id, ipay_key):
self.seller_id = seller_id
self.client = suds.client.Client(
api_base, faults=False, plugins=[AddAttributePlugin()]
)
# Manual setting of soap header is required due to an iPay bug
xmlns = ('s2', 'http://www.auction.co.kr/Security')
ticket = suds.sax.element.Element('EncryptedTicket', ns=xmlns)
value = suds.sax.element.Element('Value', ns=xmlns).setText(ipay_key)
ticket.append(value)
self.client.set_options(soapheaders=ticket)
def create_item(self, name, seller_order_no, price, qty, item_url,
thumbnail_url, image_url=None, description=None,
option=None, cancellable=True):
"""
Creates an item to be ordered.
@return: The created item.
"""
item = self.client.factory.create('ns0:IpayServiceItems')
item._item_name = name
item._ipay_itemno = seller_order_no
item._item_option_name = option
item._item_price = price
item._order_qty = qty
item._item_url = item_url
item._thumbnail_url = thumbnail_url
item._item_image_url = image_url
item._item_description = description
item._cancel_restriction = 0 if cancellable else 1
return item
def create_order(self, payment_rule, total, shipping_price, shipping_type,
back_url, service_url, redirect_url, address_required=True,
buyer_name=None, buyer_tel_no=None, buyer_email=None,
redirection_enabled=False):
"""
Creates an order for the given items.
@param shipping_type: Type of shipping (1: Free, 2: COD, 3: Paid)
@type shipping_type: int
@return: The created order.
"""
order = self.client.factory.create('ns0:IpayServiceOrder')
order._payment_rule = payment_rule
order._pay_price = total
order._shipping_price = shipping_price
order._shipping_type = shipping_type
order._back_url = back_url
order._service_url = service_url
order._redirect_url = redirect_url
order._is_address_required = address_required
order._buyer_name = buyer_name
order._buyer_tel_no = buyer_tel_no
order._buyer_email = buyer_email
order._move_to_redirect_url = redirection_enabled
return order
def place_order(self, order, items):
"""Places an order for the given items."""
ipay_items = self.client.factory.create('ArrayOfIpayServiceItems')
for item in items:
ipay_items.IpayServiceItems.append(item)
return self.client.service.InsertIpayOrder(order, ipay_items)
def finalize_order(self, order_no, seller_order_no, reason):
"""Finalizes an order and requests payment from Auction."""
req = self.client.factory.create('ns0:DoOrderDecisionRequestT')
req._SellerID = self.seller_id
req._OrderNo = order_no
req._SellerManagementNumber = seller_order_no
req._RequestReason = reason
return self.client.service.DoIpayOrderDecisionRequest(req)
def get_order_status(self, cart_no, item_no):
"""Returns the status of an order."""
return self.client.service.GetIpayReceiptStatus(cart_no, item_no)
def get_order_data(self, pay_no):
"""Returns the payment and other data for the given order."""
return self.client.service.GetIpayAccountNumb(pay_no)
def get_order_list(self, search_type, value):
"""Returns the list of paid orders for the given query data."""
req = self.client.factory.create('ns0:GetOrderListRequestT')
req._SearchType = search_type
req._SearchValue = value
return self.client.service.GetIpayPaidOrderList(req)
def ship_order(self, order_no, ship_date):
"""Changes the shipment status of the order to shipped."""
req = self.client.factory.create('ns0:DoShippingGeneralRequestT')
req._SellerID = self.seller_id
req._OrderNo = order_no
# TODO: implement this method
return self.client.service.DoIpayShippingGeneral(req)
def test(self):
"""Returns the IP address of the client."""
return self.client.service.test()
| {
"repo_name": "narrowcast/pyipay",
"path": "pyipay/__init__.py",
"copies": "1",
"size": "5132",
"license": "mit",
"hash": 6250943742706417000,
"line_mean": 41.7666666667,
"line_max": 80,
"alpha_frac": 0.6237334373,
"autogenerated": false,
"ratio": 3.784660766961652,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9824108727291991,
"avg_score": 0.016857095393932202,
"num_lines": 120
} |
AUCTION_ITEM_CATEGORY_GENERAL = 1
AUCTION_ITEM_CATEGORY_ELECTRONICS = 2
AUCTION_ITEM_CATEGORY_MEDIA = 3
AUCTION_ITEM_CATEGORY_CLOTHING = 4
AUCTION_ITEM_CATEGORY_CHOICES = (
(AUCTION_ITEM_CATEGORY_ELECTRONICS, 'Electronics'),
(AUCTION_ITEM_CATEGORY_MEDIA, 'Media'),
(AUCTION_ITEM_CATEGORY_CLOTHING, 'Clothing'),
(AUCTION_ITEM_CATEGORY_GENERAL, 'General'),
)
AUCTION_ITEM_STATUS_IDLE = 1
AUCTION_ITEM_STATUS_RUNNING = 2
AUCTION_ITEM_STATUS_ON_HOLD = 3
AUCTION_ITEM_STATUS_SOLD = 4
AUCTION_ITEM_STATUS_EXPIRED = 5
AUCTION_ITEM_STATUS_DISPUTED = 6
AUCTION_ITEM_STATUS_CHOICES = (
(AUCTION_ITEM_STATUS_IDLE, 'Idle'),
(AUCTION_ITEM_STATUS_RUNNING, 'Running'),
(AUCTION_ITEM_STATUS_ON_HOLD, 'On Hold'),
(AUCTION_ITEM_STATUS_SOLD, 'Sold'),
(AUCTION_ITEM_STATUS_EXPIRED, 'Expired'),
(AUCTION_ITEM_STATUS_DISPUTED, 'Disputed'),
)
AUCTION_EVENT_SHIPPING_USPS = 1
AUCTION_EVENT_SHIPPING_FEDEX = 2
AUCTION_EVENT_SHIPPING_UPS = 3
AUCTION_EVENT_SHIPPING_DHL = 4
AUCTION_EVENT_SHIPPING_CHOICES = (
(AUCTION_EVENT_SHIPPING_USPS, 'USPS'),
(AUCTION_EVENT_SHIPPING_FEDEX, 'FedEx'),
(AUCTION_EVENT_SHIPPING_UPS, 'UPS'),
(AUCTION_EVENT_SHIPPING_DHL, 'DHL'),
)
AUCTION_ITEM_CONDITION_USED = 1
AUCTION_ITEM_CONDITION_USED_LIKE_NEW = 2
AUCTION_ITEM_CONDITION_NEW = 3
AUCTION_ITEM_CONDITION_CHOICES = (
(AUCTION_ITEM_CONDITION_USED, 'Used'),
(AUCTION_ITEM_CONDITION_USED_LIKE_NEW, 'Used Like New'),
(AUCTION_ITEM_CONDITION_NEW, 'New'),
)
SALES_PAYMENT_STATUS_PROCESSING = 1
SALES_PAYMENT_STATUS_CLEARED = 2
SALES_PAYMENT_STATUS_DISPUTED = 3
SALES_PAYMENT_STATUS_REFUNDED = 4
SALES_PAYMENT_STATUS_CHOICES = (
(SALES_PAYMENT_STATUS_PROCESSING, 'Processing'),
(SALES_PAYMENT_STATUS_CLEARED, 'Cleared'),
(SALES_PAYMENT_STATUS_DISPUTED, 'Disputed'),
(SALES_PAYMENT_STATUS_REFUNDED, 'Refunded'),
)
AUCTION_EVENT_SORTING_TITLE = 'title'
AUCTION_EVENT_SORTING_CONDITION = 'condition'
AUCTION_EVENT_SORTING_PRICE_ASC = 'price_asc'
AUCTION_EVENT_SORTING_PRICE_DESC = 'price_desc'
AUCTION_EVENT_SORTING_END_TIME_ASC = 'end_time_asc'
AUCTION_EVENT_SORTING_END_TIME_DESC = 'end_time_desc'
AUCTION_EVENT_SORTING_CHOICES = (
(AUCTION_EVENT_SORTING_TITLE, 'item__title'),
(AUCTION_EVENT_SORTING_CONDITION, 'item__condition'),
(AUCTION_EVENT_SORTING_PRICE_ASC, 'starting_price'),
(AUCTION_EVENT_SORTING_PRICE_DESC, '-starting_price'),
(AUCTION_EVENT_SORTING_END_TIME_ASC, 'end_time'),
(AUCTION_EVENT_SORTING_END_TIME_DESC, '-end_time'),
) | {
"repo_name": "tarequeh/little-ebay",
"path": "lebay/apps/lebay/constants.py",
"copies": "1",
"size": "2515",
"license": "mit",
"hash": 921785972464057500,
"line_mean": 32.1052631579,
"line_max": 60,
"alpha_frac": 0.7161033797,
"autogenerated": false,
"ratio": 2.628004179728318,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.8776324465329434,
"avg_score": 0.013556618819776715,
"num_lines": 76
} |
""" Auction Model """
from flask import g
from datetime import datetime
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm import object_session
from penelophant.database import db
from .Model import Model
from .User import User
from .Bid import Bid
class Auction(Model):
""" Auction data representation """
id = db.Column(db.Integer, primary_key=True)
creator_user_id = db.Column(db.Integer,
db.ForeignKey(User.id, ondelete='RESTRICT', onupdate='CASCADE'),
nullable=False
)
start_time = db.Column(db.TIMESTAMP, default=db.func.now())
end_time = db.Column(db.TIMESTAMP)
title = db.Column(db.String(100), nullable=False)
description = db.Column(db.Text)
start_price = db.Column(db.Numeric('13,2'), default=0)
reserve = db.Column(db.Numeric('13,2'), default=0)
type = db.Column('type', db.String(50), nullable=False)
creator = db.relationship(User, backref="auctions")
sealed_bids = False
# Populated by the backref
bids_rel = db.relationship(Bid, backref="auction")
__mapper_args__ = {
'polymorphic_on': type
}
@property
def bids(self):
""" Returns the bids rel if not currently sealed """
if self.sealed_bids and not self.has_ended:
return self.my_bids
return self.bids_rel
@property
def reserve_met(self):
""" Return whether not not the reserve has been met """
highest_bid = self.get_highest_bid()
if highest_bid is None:
return False
return highest_bid.price >= self.reserve
@property
def has_ended(self):
""" Whether or not the auction has ended """
return self.end_time < datetime.utcnow()
@property
def has_started(self):
""" Whether or not the auction has started """
return self.start_time <= datetime.utcnow()
@property
def current_price(self):
""" What the current price is """
highest_bid = self.get_highest_bid()
if highest_bid is None:
return self.start_price
return highest_bid.price
@property
def posted_bids(self):
""" Get posted bids for the auction """
if self.sealed_bids:
return None
return object_session(self)\
.query(Bid)\
.with_parent(self)\
.order_by(Bid.price.desc())\
.order_by(Bid.bid_time.desc())\
.all()
@property
def my_bids(self):
""" Return the logged in user's bid for the auction """
if not g.user:
return None
return object_session(self)\
.query(Bid)\
.with_parent(self)\
.filter(Bid.user == g.user)\
.order_by(Bid.price.desc())\
.order_by(Bid.bid_time.desc())\
.all()
@property
def highest_bid(self):
""" Return the highest bid """
if self.sealed_bids and not self.has_ended:
return None
return self.get_highest_bid()
def create_bid(self, bid):
""" Create bid handler, returns (bid, message). Return None for bid if it shouldn't be made """
return bid, None
def find_winner(self):
""" Determine winner logic and return (winning_bid, invoice_amount) """
return None, None
def get_highest_bid(self):
""" Return the highest bid """
try:
return object_session(self)\
.query(Bid)\
.with_parent(self)\
.order_by(Bid.price.desc())\
.order_by(Bid.bid_time.asc())\
.first()
except NoResultFound:
return None
| {
"repo_name": "kevinoconnor7/penelophant",
"path": "penelophant/models/Auction.py",
"copies": "1",
"size": "3339",
"license": "apache-2.0",
"hash": 3380692652957668000,
"line_mean": 24.4885496183,
"line_max": 99,
"alpha_frac": 0.6445043426,
"autogenerated": false,
"ratio": 3.4745057232049947,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4619010065804995,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.