text stringlengths 0 1.05M | meta dict |
|---|---|
"""A module to provide decorators which change methods"""
from six import StringIO
def _represent_arguments(*arguments, **keyword_arguments):
"""Represent the aruments in a form suitable as a key (hashable)
And which will be recognisable to user in error messages
>>> print(_represent_arguments([1, 2], **{'fred':'here'}))
[1, 2], fred='here'
"""
argument_strings = [repr(a) for a in arguments]
keyword_strings = [
'='.join((k, repr(v))) for k, v in keyword_arguments.items()]
return ', '.join(argument_strings + keyword_strings)
def memoize(method):
"""A new method which acts like the given method but memoizes arguments
See https://en.wikipedia.org/wiki/Memoization for the general idea
>>> @memoize
... def test(arg):
... print('called')
... return arg + 1
>>> test(1)
called
2
>>> test(2)
called
3
>>> test(1)
2
The returned method also has an attached method "invalidate"
which removes given values from the cache
Or empties the cache if no values are given
>>> test.invalidate(2)
>>> test(1)
2
>>> test(2)
called
3
"""
method.cache = {}
def invalidate(*arguments, **keyword_arguments):
key = _represent_arguments(*arguments, **keyword_arguments)
if not key:
method.cache = {}
elif key in method.cache:
del method.cache[key]
else:
raise KeyError(f'Not prevously cached: {method.__name__}({key})')
def new_method(*arguments, **keyword_arguments):
"""Cache the arguments and return values of the call
The key cached is the repr() of arguments
This allows more types of values to be used as keys to the cache
Such as lists and tuples
"""
key = _represent_arguments(*arguments, **keyword_arguments)
if key not in method.cache:
method.cache[key] = method(*arguments, **keyword_arguments)
return method.cache[key]
new_method.invalidate = invalidate
new_method.__doc__ = method.__doc__
new_method.__name__ = f'memoize({method.__name__})'
return new_method
def debug(method):
"""Decorator to debug the given method"""
def new_method(*args, **kwargs):
import pdb
try:
import pudb
except ImportError:
pudb = pdb
try:
pudb.runcall(method, *args, **kwargs)
except pdb.bdb.BdbQuit:
sys.exit('Normal quit from debugger')
new_method.__doc__ = method.__doc__
new_method.__name__ = f'debug({method.__name__})'
return new_method
def argparser(main_method, options, **kwargs):
def main(argv):
args = parser.parse_args(argv)
for method, _flag, name, _help in options:
if not method:
continue
value = getattr(args, name)
if not value:
continue
method(value)
return main_method(args)
from argparse import ArgumentParser
parser = ArgumentParser(description='Process some integers.')
for _method, flag, name, help in options:
name_or_flags = [flag, name] if flag else name
parser.add_argument(name_or_flags, help=help, **kwargs)
return main
def streamer(main_method):
"""Open a stream for the first file in arguments, or stdin"""
def main(arguments):
streams = [StringIO(get_clipboard_data())] if (arguments and '-c' in arguments) else []
streams = streams or ([file(_, 'r') for _ in arguments if os.path.isfile(argument)] if arguments else [])
streams = streams or ([sys.stdin] if not (streams or arguments) else [])
return main_method(arguments, streams)
return main
def old_streamer(main_method):
"""Open a stream for the first file in arguments, or stdin"""
if not arguments:
return [sys.stdin]
elif arguments[0] == '-c':
return [StringIO(get_clipboard_data())]
for argument in arguments:
if os.path.isfile(argument):
return file(argument, 'r')
return method
def globber(main_method, globs):
"""Recognise globs in args"""
import os
from glob import glob
def main(arguments):
lists_of_paths = [_ for _ in arguments if glob(pathname, recursive=True)]
return main_method(arguments, lists_of_paths)
return main
| {
"repo_name": "jalanb/dotsite",
"path": "pysyte/decorators.py",
"copies": "1",
"size": "4433",
"license": "mit",
"hash": 8335084232060323000,
"line_mean": 30.2183098592,
"line_max": 113,
"alpha_frac": 0.6050078953,
"autogenerated": false,
"ratio": 4.08195211786372,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00030795350320276946,
"num_lines": 142
} |
"""A module to read input files.
"""
from dmrg_helpers.core.dmrg_exceptions import DMRGException
from dmrg_helpers.core.dmrg_logging import logger
from collections import Iterable
class InputFileReader(object):
"""A class to read input file data and extract parameter info.
Parameters
----------
watched_keywords: a list of strings (default to None).
The list of keywords (i.e. parameter names) you want to extract from
the file.
data: a dictionary of strings to strings or string lists.
The name of the parameters as key and its value as value.
open_keywords: a stack with the keywords whose value has not been read yet.
Examples
--------
#>>> from dmrg_helpers.extract.input_file_reader import InputFileReader
#>>> # create a temporary input file
#>>> with open('tmp.xml', 'w') as f:
#... f.writelines(['<param>\n', '1.0\n', '</param>'])
#>>> reader = InputFileReader(['param'])
#>>> reader.read(f.name)
#>>> reader.append_data_to_file('estimators_with_data.dat')
"""
def __init__(self, watched_keywords):
if (isinstance(watched_keywords, Iterable) and
not isinstance(watched_keywords, basestring)):
self.watched_keywords = watched_keywords
else:
self.watched_keywords = []
self.watched_keywords.append(watched_keywords)
self.data = {}
self.open_keywords = []
@classmethod
def get_keyword(cls, line):
"""Strips the XML stuff for the line and gets the parameter name.
"""
word = line.strip()[1:-1]
if word.startswith("/"):
word = word[1:]
return word
def close_keyword(self, keyword):
"""Closes a keyword.
Some keywords have an extra argument after the parameter name when they
open, such as '<run n=0>', but the closing statement does not use the
extra argument. To allow this, you have to call split.
"""
tmp = self.open_keywords.pop().split()[0]
if keyword != tmp:
raise DMRGException("Bad input file")
def open_keyword(self, keyword):
"""Opens a keyword.
"""
self.open_keywords.append(keyword)
return self.open_keywords[-1]
def set_value(self, keyword, value):
"""Sets a value for an open keyword.
"""
value = value.split()
if len(value) == 1:
value = value[0]
self.data[keyword] = value
def read(self, filename):
"""Reads an input file and extracts the parameters you're watching.
Examples
--------
#>>> from dmrg_helpers.extract.input_file_reader import InputFileReader
#>>> # create a temporary input file
#>>> with open('tmp.xml', 'w') as f:
#... f.writelines(['<param>\n', '1.0\n', '</param>'])
#>>> reader = InputFileReader(['param'])
#>>> reader.read(f.name)
#>>> reader.data['param']
#'1.0'
#>>> import os
#>>> os.remove('tmp.xml')
"""
opened_keyword = ''
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip() # get blanks off
if line.startswith("<"):
keyword = InputFileReader.get_keyword(line)
if line.startswith("/", 1):
self.close_keyword(keyword)
else:
opened_keyword = self.open_keyword(keyword)
else:
if opened_keyword in self.watched_keywords:
self.set_value(opened_keyword, line)
for k in self.watched_keywords:
if k not in self.data.keys():
raise DMRGException("Missing keyword")
logger.info('Reading metadata from input file {0}'.format(filename))
def get_data_as_metadata(self):
"""Get the dictionary with parameters and values as a formatted string.
The metadata has to follow some format to be read by the FileReader
class. Specifically, metadata lines start with '# META ', followed by
the parameter name and value, separated by a whitespace.
"""
metadata = []
for k, v in self.data.iteritems():
metadata.append('# META ' + str(k) + ' ' + str(v))
return metadata
def append_data_to_file(self, filename):
"""Appends the data to the file using the proper format for metadata.
"""
with open(filename, 'a') as f:
f.write('\n'.join(self.get_data_as_metadata()))
logger.info('Metadata appended to {0}'.format(filename))
def prepend_data_to_file(self, filename):
"""Prepends the data to the file using the proper format for metadata.
This is slower than appending, as you have to read the whole file,
keep it in memory, rewrite it starting with the metadata and then
append the old stuff you read.
"""
with open(filename, 'r') as f:
lines = f.readlines()
with open(filename, 'w') as f:
f.write('\n'.join(self.get_data_as_metadata()))
f.write('\n')
f.writelines(lines)
logger.info('Metadata prepended to {0}'.format(filename))
| {
"repo_name": "iglpdc/dmrg_helpers",
"path": "dmrg_helpers/extract/input_file_reader.py",
"copies": "1",
"size": "5321",
"license": "mit",
"hash": -2311839348766401000,
"line_mean": 36.4718309859,
"line_max": 79,
"alpha_frac": 0.5782747604,
"autogenerated": false,
"ratio": 4.163536776212832,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5241811536612833,
"avg_score": null,
"num_lines": null
} |
"""A module to receive data from UR CB2 robots."""
# The MIT License (MIT)
#
# Copyright (c) 2016 GTRC.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import socket
import struct
import array
import threading
class URReceiver(object):
"""A class to receive and process data from a UR Robot
The receiving and processing can be run in a separate thread by calling
start(). The stop() command must be called before exiting to halt the
additional thread. Alternatively, receive(), decode(), and
print_parsed_data() can be called in sequence in order to receive,
decode, and print data. One should not call receive(), decode(), or any
of the print methods, if a separate thread is being used. You should
never write to any of the data fields externally, however you can read
from them. Python's atomic read/write architecture should prevent you
from getting any half baked results from basic types, for all lists and
tuples, you must lock using lock (recommend that you use `with lock:`
paradigm.
Attributes:
clean_data: Double array of length 101 for all of the data returned by
the robot
raw_data: String of complete raw data packet
__socket: The socket for communications
clean_packets: The Integer number of packets which have been received
cleanly
stub_packets: The Integer number of packets which have been received
as stubs
received: The total Integer number of complete data sets which have
been received
waiting_data: String to hold incomplete data sets
new_data: Boolean whether new data is available for processing
time: Double of time elapsed since the controller was started
target_joint_positions: 6 member Double list of target joint positions
target_joint_velocities: 6 member Double list of target joint velocities
target_joint_accelerations: 6 member Double list of target joint
accelerations
target_joint_currents: 6 member Double list of target joint currents
target_joint_moments: 6 member Double list of target joint moments as
torques
actual_joint_positions: 6 member Double list of actual joint positions
actual_joint_velocities: 6 member Double list of actual joint velocities
actual_joint_currents: 6 member Double list of actual joint currents
tool_accelerometer: 3 member Double list of ool x,y and z accelerometer
values (software version 1.7)
force_tcp: 6 member Double list of generalised forces in the TCP
position: 6 member Double list of cartesian coordinates of the tool:
(x,y,z,rx,ry,rz), where rx, ry and rz is a rotation vector
representation of the tool orientation
tool_speed: 6 member Double list of speed of the tool given in cartesian
coordinates
digital_inputs: Current state of the digital inputs. NOTE: these are
bits encoded as int64_t, e.g. a value of 5 corresponds to bit 0 and
bit 2 set high
joint_temperature: 6 member Double list of temperature of each joint in
degrees celsius
controller_period: Double of controller real time thread execution time
robot_control_mode: Double of robot control mode (see
PolyScopeProgramServer on the "How to" page
joint_control_modes: 6 member Double list of joint control modes (see
PolyScopeProgramServer on the "How to" page) (only from software
version 1.8 and on)
run: Boolean on whether to run or not
__receiving_thread: Thread object for running the receiving and parsing
loops
verbose: Boolean defining whether or not to print data
lock: A threading lock which is used to protect data from race
conditions
_is_stopped: A boolean specifying whether the robot is stopped
"""
# Format specifier:
# ! : network (big endian)
# I : unsigned int, message size
# 85d : 85 doubles
# q : int64_t for digital inputs
# 15d : 15 doubles
#: Format spec for complete data packet
format = struct.Struct('! I 85d q 15d')
#: The format spec for the packet length field
formatLength = struct.Struct('! I')
#: The width to be given to name items when printing out
name_width = 30
#: The precision for printing data
precision = 7
double_format_string = "{:+0"+str(precision+4)+"."+str(precision)+"f}"
def __init__(self, open_socket, verbose=False):
"""Construct a UR Robot connection given connection parameters
Args:
open_socket (socket.socket): The socket to use for communications.
verbose (bool): Whether to print received data in main loop
"""
self.clean_data = array.array('d', [0] * 101)
self.raw_data = ''
self.__socket = open_socket
self.clean_packets = 0
self.stub_packets = 0
self.received = 0
self.waiting_data = ''
self.new_data = False
self.time = 0.0
self.target_joint_positions = [0.0]*6
self.target_joint_velocities = [0.0]*6
self.target_joint_accelerations = [0.0]*6
self.target_joint_currents = [0.0]*6
self.target_joint_moments = [0.0]*6
self.actual_joint_positions = [0.0]*6
self.actual_joint_velocities = [0.0]*6
self.actual_joint_currents = [0.0]*6
self.tool_accelerometer = [0.0]*3
self.force_tcp = [0.0]*6
self.position = [0.0]*6
self.tool_speed = [0.0]*6
self.digital_inputs = 0
self.joint_temperature = [0.0]*6
self.controller_period = 0.0
self.robot_control_mode = 0.0
self.joint_control_modes = [0.0]*6
self.run = False
self.__receiving_thread = None
self.verbose = verbose
self.lock = threading.Lock()
self._is_stopped = False
if verbose:
print "\033[2J" # Clear screen
def __del__(self):
"""Shutdown side thread and print aggregated connection stats"""
self.stop()
print "Received: "+str(self.received) + " data sets"
print "Received: "+str(self.clean_packets) + " clean packets"
print "Received: "+str(self.stub_packets) + " stub packets"
def decode(self):
"""Decode the data stored in the class's rawData field.
Only process the data if there is new data available. Unset the
self.newData flag upon completion. Note, this will lock the data set
and block execution in a number of other functions
"""
with self.lock:
if self.new_data:
self.clean_data = self.format.unpack(self.raw_data)
self.time = self.clean_data[1]
self.target_joint_positions = self.clean_data[2:8]
self.target_joint_velocities = self.clean_data[8:14]
self.target_joint_accelerations = self.clean_data[14:20]
self.target_joint_currents = self.clean_data[20:26]
self.target_joint_moments = self.clean_data[26:32]
self.actual_joint_positions = self.clean_data[32:38]
self.actual_joint_velocities = self.clean_data[38:44]
self.actual_joint_currents = self.clean_data[44:50]
self.tool_accelerometer = self.clean_data[50:53]
# unused = self.clean_data[53:68]
self.force_tcp = self.clean_data[68:74]
self.position = self.clean_data[74:80]
self.tool_speed = self.clean_data[80:86]
self.digital_inputs = self.clean_data[86]
self.joint_temperature = self.clean_data[87:93]
self.controller_period = self.clean_data[93]
# test value = self.clean_data[94]
self.robot_control_mode = self.clean_data[95]
self.joint_control_modes = self.clean_data[96:102]
self.new_data = False
self._is_stopped = self.is_stopped()
def receive(self):
"""Receive data from the UR Robot.
If an entire data set is not received, then store the data in a
temporary location (self.waitingData). Once a complete packet is
received, place the complete packet into self.rawData and set the
newData flag. Note, this will lock the data set and block execution in a
number of other functions once a full data set is built.
"""
incoming_data = self.__socket.recv(812) # expect to get 812 bytes
if len(incoming_data) == 812:
self.clean_packets += 1
else:
self.stub_packets += 1
if self.formatLength.unpack(incoming_data[0:4])[0] == 812:
self.waiting_data = incoming_data
else:
self.waiting_data += incoming_data
if len(self.waiting_data) == 812:
with self.lock:
self.raw_data = self.waiting_data
self.received += 1
self.new_data = True
def print_raw_data(self):
"""Print the raw data which is stored in self.raw_data.
Note, this will lock the data set and block execution in a number of
other functions
"""
with self.lock:
print "Received (raw): "+self.raw_data + "\n"
def print_data(self):
"""Print the processed data stored in self.clean_data
Note, this will lock the data set and block execution in a number of
other functions
"""
with self.lock:
print "Received (unpacked):\n "
print self.clean_data
print "\n"
def output_data_item(self, name, values):
"""Output item with name and values.
Formatting is specified by self.name_width and self.precision.
Args:
name (str): The name of the value
values (float, int, tuple of float, list of float): The list of
values
"""
to_print = ("%-"+str(self.name_width)+"s") % name
if isinstance(values, (list, tuple)):
to_print += ": [%s]" % ', '.join(self.double_format_string.format(x)
for x in values)
elif isinstance(values, (int, bool)):
to_print += ": [%s]" % str(values)
elif isinstance(values, float):
to_print += ": [%s]" % self.double_format_string.format(values)
else:
print "I don't know that data type: " + str(type(values))
print to_print
def print_parsed_data(self):
"""Print the parsed data
Note, this will lock the data set and block execution in a number of
other functions
"""
with self.lock:
print "\033[H"
self.output_data_item("Time since controller turn on",
self.time)
self.output_data_item("Target joint positions",
self.target_joint_positions)
self.output_data_item("Target joint velocities",
self.target_joint_velocities)
self.output_data_item("Target joint accelerations",
self.target_joint_accelerations)
self.output_data_item("Target joint currents",
self.target_joint_currents)
self.output_data_item("Target joint moments (torque)",
self.target_joint_moments)
self.output_data_item("Actual joint positions",
self.actual_joint_positions)
self.output_data_item("Actual joint velocities",
self.actual_joint_velocities)
self.output_data_item("Actual joint currents",
self.actual_joint_currents)
self.output_data_item("Tool accelerometer values",
self.tool_accelerometer)
self.output_data_item("Generalised forces in the TCP",
self.force_tcp)
self.output_data_item("Cartesian tool position",
self.position)
self.output_data_item("Cartesian tool speed",
self.tool_speed)
self.output_data_item("Joint temperatures (deg C)",
self.joint_temperature)
self.output_data_item("Controller period",
self.controller_period)
self.output_data_item("Robot control mode",
self.robot_control_mode)
self.output_data_item("Joint control modes",
self.joint_control_modes)
print ((("%-"+str(self.name_width)+"s") % "Digital Input Number") +
": " + '|'.join('{:^2d}'.format(x) for x in range(0, 18)))
print ((("%-"+str(self.name_width)+"s") % "Digital Input Value: ") +
": " + '|'.join('{:^2s}'.format(x) for x in '{:018b}'.format(
self.digital_inputs)[::-1]))
self.output_data_item("Is Stopped:",
self._is_stopped)
def start(self):
"""Spawn a new thread for receiving and run it"""
if (self.__receiving_thread is None or
not self.__receiving_thread.is_alive()):
self.run = True
self.__receiving_thread = threading.Thread(group=None,
target=self.loop,
name='receiving_thread',
args=(),
kwargs={})
self.__receiving_thread.start()
def loop(self):
"""The main loop which receives, decodes, and optionally prints data"""
while self.run:
self.receive()
self.decode()
if self.verbose:
self.print_parsed_data()
def stop(self):
"""Stops execution of the auxiliary receiving thread"""
if self.__receiving_thread is not None:
if self.__receiving_thread.is_alive():
self.verbose_print('attempting to shutdown auxiliary thread',
'*')
self.run = False # Python writes like this are atomic
self.__receiving_thread.join()
self.verbose_print('\033[500D')
self.verbose_print('\033[500C')
self.verbose_print('-', '-', 40)
if self.__receiving_thread.is_alive():
self.verbose_print('failed to shutdown auxiliary thread',
'*')
else:
self.verbose_print('shutdown auxiliary thread', '*')
else:
self.verbose_print('auxiliary thread already shutdown', '*')
else:
self.verbose_print('no auxiliary threads exist', '*')
def verbose_print(self, string_input, emphasis='', count=5):
"""Print input if verbose is set
Args:
string_input (str): The input string to be printed.
emphasis (str): Emphasis character to be placed around input.
count (int): Number of emphasis characters to use.
"""
if self.verbose:
if emphasis == '':
print string_input
else:
print (emphasis*count + " " + string_input + " " +
emphasis * count)
def is_stopped(self, error=0.005):
"""Check whether the robot is stopped.
Check whether the joint velocities are all below some error. Note, this
will lock the data set and block execution in a number of other
functions
Args:
error (float): The error range to define "stopped"
Returns: Boolean, whether the robot is stopped.
"""
with self.lock:
to_return = (
all(v == 0 for v in self.target_joint_velocities) and
all(v < error for v in self.actual_joint_velocities))
return to_return
def at_goal(self, goal, cartesian, error=0.005):
"""Check whether the robot is at a goal point.
Check whether the differences between the joint or cartesian
coordinates are all below some error. This can be used to
determine if a move has been completed. It can also be used to
create blends by beginning the next move prior to the current one
reaching its goal. Note, this will lock the data set and block execution
in a number of other functions.
Args:
goal (6 member tuple or list of floats): The goal to check against
cartesian (bool): Whether the goal is in cartesian coordinates or
not (in which case joint coordinates)
error (float): The error range in which to consider an object at
its goal, in meters for cartesian space and radians for axis
space.
Returns: Boolean, whether the current position is within the error
range of the goal.
"""
with self.lock:
to_return = (
all(abs(g-a) < error for g, a in zip(self.position, goal))
if cartesian else
all(abs(g-a) < error for g, a in
zip(self.actual_joint_positions, goal)))
return to_return
def __enter__(self):
"""Enters the URRobot receiver from a with statement"""
return self
def __exit__(self, *_):
"""Exits at the end of a context manager statement by destructing."""
self.stop()
| {
"repo_name": "IRIM-Technology-Transition-Lab/ur_cb2",
"path": "ur_cb2/receive/cb2_receive.py",
"copies": "1",
"size": "19023",
"license": "mit",
"hash": -9197845572874843000,
"line_mean": 43.76,
"line_max": 80,
"alpha_frac": 0.5854491931,
"autogenerated": false,
"ratio": 4.433232346772314,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003550168547242003,
"num_lines": 425
} |
"""A module to retrieve astro data from Hong Kong Observatory"""
import json
import requests
import xmltodict
BASE_URL = 'http://pda.weather.gov.hk/'
URL_UC = 'locspc/android_data/earthquake/eq_app_uc.xml'
URL_EN = 'locspc/android_data/earthquake/eq_app_e.xml'
def earthquake(lang='UC'):
"""A function to retrieve astro data from Hong Kong Observatory"""
response = {}
if lang in ['UC', 'EN']:
try:
if lang == 'UC':
data = requests.get(BASE_URL + URL_UC)
if lang == 'EN':
data = requests.get(BASE_URL + URL_EN)
data.encoding = 'utf8'
data = json.loads(json.dumps(xmltodict.parse(data.text)))
response['result'] = data
response['status'] = 1
except IndexError:
response['result'] = ''
response['status'] = 2
except requests.exceptions.RequestException:
response['result'] = ''
response['status'] = 5
else:
response['result'] = ''
response['status'] = 0
return response
| {
"repo_name": "nhho/hko",
"path": "hko/earthquake.py",
"copies": "2",
"size": "1088",
"license": "mit",
"hash": -4383841921384452000,
"line_mean": 27.6315789474,
"line_max": 70,
"alpha_frac": 0.5643382353,
"autogenerated": false,
"ratio": 3.651006711409396,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5215344946709396,
"avg_score": null,
"num_lines": null
} |
"""A module to retrieve local weather data from Hong Kong Observatory"""
import json
import pkg_resources
from operator import itemgetter
import requests
from hko.distance_calculation import distance_calculation
with open(pkg_resources.resource_filename(__name__, 'assets/grid_location.json')) as f:
GRID = json.load(f)
BASE_URL = 'http://pda.weather.gov.hk/'
def local_weather(lat, lng):
"""A function to retrieve local weather data from Hong Kong Observatory"""
response = {}
if isinstance(lat, float) and isinstance(lng, float) and\
-90 <= lat <= 90 and -180 <= lng <= 180:
temp_dict = GRID
for i in temp_dict:
distance = distance_calculation(lat, lng, float(i['lat']), float(i['lng']))
i['dis'] = distance
newlist = sorted(temp_dict, key=itemgetter('dis'))
if newlist[0]['dis'] < 10:
try:
grid = newlist[0]['grid']
url = 'locspc/android_data/gridData/{}_tc.xml'.format(grid)
grid_data = json.loads(requests.get(BASE_URL + url).text)
response['status'] = 1
response['result'] = grid_data
response['place'] = newlist[0]['name']
except IndexError:
response['result'] = ''
response['status'] = 2
except requests.exceptions.RequestException:
response['result'] = ''
response['status'] = 5
else:
response['result'] = ''
response['status'] = 3
else:
response['result'] = ''
response['status'] = 0
return response
| {
"repo_name": "slgphantom/hko",
"path": "hko/local_weather.py",
"copies": "2",
"size": "1650",
"license": "mit",
"hash": -6523315131180840000,
"line_mean": 32.6734693878,
"line_max": 87,
"alpha_frac": 0.5648484848,
"autogenerated": false,
"ratio": 4.034229828850855,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5599078313650856,
"avg_score": null,
"num_lines": null
} |
"""A module to retrieve major cities weather forecast data from Hong Kong Observatory"""
import requests
BASE_URL = 'http://pda.weather.gov.hk/'
URL_ASIA_UC = 'locspc/android_data/asis_wwic.xml'
URL_ASIA_EN = 'locspc/android_data/asis_wwi.xml'
URL_AFRICA_UC = 'locspc/android_data/africa_wwic.xml'
URL_AFRICA_EN = 'locspc/android_data/africa_wwi.xml'
URL_AUSTRALIASOUTHPACIFIC_UC = 'locspc/android_data/australia_south_pacific_wwic.xml'
URL_AUSTRALIASOUTHPACIFIC_EN = 'locspc/android_data/australia_south_pacific_wwi.xml'
URL_EUROPE_UC = 'locspc/android_data/europe_wwic.xml'
URL_EUROPE_EN = 'locspc/android_data/europe_wwi.xml'
URL_NORTHCENTRALAMERICA_UC = 'locspc/android_data/north_central_america_wwic.xml'
URL_NORTHCENTRALAMERICA_EN = 'locspc/android_data/north_central_america_wwi.xml'
URL_SOUTHAMERICA_UC = 'locspc/android_data/south_america_wwic.xml'
URL_SOUTHAMERICA_EN = 'locspc/android_data/south_america_wwi.xml'
def asia(lang='UC'):
"""A function to retrieve major Asian cities weather forecast data from Hong Kong Observatory"""
response = []
if lang == 'UC':
data = requests.get(BASE_URL + URL_ASIA_UC)
if lang == 'EN':
data = requests.get(BASE_URL + URL_ASIA_EN)
data.encoding = 'utf8'
data2 = data.text.split('@')
for i in data2:
temp = i.split('#')
temp_dict = {}
temp_dict['place'] = temp[0]
temp_dict['mintemp'] = temp[1]
temp_dict['maxtemp'] = temp[2]
temp_dict['status'] = temp[3]
temp_dict['photo'] = temp[4]
response.append(temp_dict)
return response
def africa(lang='UC'):
"""A function to retrieve major African cities weather forecast
data from Hong Kong Observatory"""
response = []
if lang == 'UC':
data = requests.get(BASE_URL + URL_AFRICA_UC)
if lang == 'EN':
data = requests.get(BASE_URL + URL_AFRICA_EN)
data.encoding = 'utf8'
data2 = data.text.split('@')
for i in data2:
temp = i.split('#')
temp_dict = {}
temp_dict['place'] = temp[0]
temp_dict['mintemp'] = temp[1]
temp_dict['maxtemp'] = temp[2]
temp_dict['status'] = temp[3]
temp_dict['photo'] = temp[4]
response.append(temp_dict)
return response
def australia_south_pacific(lang='UC'):
"""A function to retrieve major Australian and South Pacific cities weather forecast
data from Hong Kong Observatory"""
response = []
if lang == 'UC':
data = requests.get(BASE_URL + URL_AUSTRALIASOUTHPACIFIC_UC)
if lang == 'EN':
data = requests.get(BASE_URL + URL_AUSTRALIASOUTHPACIFIC_EN)
data.encoding = 'utf8'
data2 = data.text.split('@')
for i in data2:
temp = i.split('#')
temp_dict = {}
temp_dict['place'] = temp[0]
temp_dict['mintemp'] = temp[1]
temp_dict['maxtemp'] = temp[2]
temp_dict['status'] = temp[3]
temp_dict['photo'] = temp[4]
response.append(temp_dict)
return response
def europe(lang='UC'):
"""A function to retrieve major European cities weather forecast
data from Hong Kong Observatory"""
response = []
if lang == 'UC':
data = requests.get(BASE_URL + URL_EUROPE_UC)
if lang == 'EN':
data = requests.get(BASE_URL + URL_EUROPE_EN)
data.encoding = 'utf8'
data2 = data.text.split('@')
for i in data2:
temp = i.split('#')
temp_dict = {}
temp_dict['place'] = temp[0]
temp_dict['mintemp'] = temp[1]
temp_dict['maxtemp'] = temp[2]
temp_dict['status'] = temp[3]
temp_dict['photo'] = temp[4]
response.append(temp_dict)
return response
def north_central_america(lang='UC'):
"""A function to retrieve major North and Central American cities weather forecast
data from Hong Kong Observatory"""
response = []
if lang == 'UC':
data = requests.get(BASE_URL + URL_NORTHCENTRALAMERICA_UC)
if lang == 'EN':
data = requests.get(BASE_URL + URL_NORTHCENTRALAMERICA_EN)
data.encoding = 'utf8'
data2 = data.text.split('@')
for i in data2:
temp = i.split('#')
temp_dict = {}
temp_dict['place'] = temp[0]
temp_dict['mintemp'] = temp[1]
temp_dict['maxtemp'] = temp[2]
temp_dict['status'] = temp[3]
temp_dict['photo'] = temp[4]
response.append(temp_dict)
return response
def south_america(lang='UC'):
"""A function to retrieve major South American cities weather forecast
data from Hong Kong Observatory"""
response = []
if lang == 'UC':
data = requests.get(BASE_URL + URL_SOUTHAMERICA_UC)
if lang == 'EN':
data = requests.get(BASE_URL + URL_SOUTHAMERICA_EN)
data.encoding = 'utf8'
data2 = data.text.split('@')
for i in data2:
temp = i.split('#')
temp_dict = {}
temp_dict['place'] = temp[0]
temp_dict['mintemp'] = temp[1]
temp_dict['maxtemp'] = temp[2]
temp_dict['status'] = temp[3]
temp_dict['photo'] = temp[4]
response.append(temp_dict)
return response
def major_city_forecast(lang='UC'):
"""A function to retrieve major cities weather forecast data from Hong Kong Observatory"""
response = {}
if lang in ['UC', 'EN']:
try:
response['result'] = {}
response['result']['Asia'] = asia(lang)
response['result']['Africa'] = africa(lang)
response['result']['AustraliaSouthPacific'] = australia_south_pacific(lang)
response['result']['Europe'] = europe(lang)
response['result']['NorthCentralAmerica'] = north_central_america(lang)
response['result']['SouthAmerica'] = south_america(lang)
response['status'] = 1
except IndexError:
response['result'] = ''
response['status'] = 2
except requests.exceptions.RequestException:
response['result'] = ''
response['status'] = 5
else:
response['result'] = ''
response['status'] = 0
return response
| {
"repo_name": "slgphantom/hko",
"path": "hko/major_city_forecast.py",
"copies": "2",
"size": "6125",
"license": "mit",
"hash": 8040165586452866000,
"line_mean": 31.579787234,
"line_max": 100,
"alpha_frac": 0.6011428571,
"autogenerated": false,
"ratio": 3.2001044932079417,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4801247350307941,
"avg_score": null,
"num_lines": null
} |
"""A module to retrieve rainfall nowcast data from Hong Kong Observatory"""
import json
import pkg_resources
import re
from operator import itemgetter
import requests
from hko.distance_calculation import distance_calculation
with open(pkg_resources.resource_filename(__name__, 'assets/rainfall_nowcast_mapping.json')) as f:
MAPPING = json.load(f)
BASE_URL = 'http://pda.weather.gov.hk/'
def rainfall_nowcast(lat, lng):
"""A function to retrieve rainfall nowcast data from Hong Kong Observatory"""
response = {}
if isinstance(lat, float) and isinstance(lng, float) and\
-90 <= lat <= 90 and -180 <= lng <= 180:
temp_dict = MAPPING
for i in temp_dict:
distance = distance_calculation(lat, lng, float(i['lat']), float(i['lng']))
i['dis'] = distance
newlist = sorted(temp_dict, key=itemgetter('dis'))
if newlist[0]['dis'] > 10:
response['result'] = ''
response['status'] = 3
return response
lat_2 = newlist[0]['lat']
lng_2 = newlist[0]['lng']
try:
url = 'locspc/android_data/rainfallnowcast/{}_{}.xml'.format(float(lat_2), float(lng_2))
data = requests.get(BASE_URL + url).content
data2 = re.split('[@#]', data.decode('utf-8'))
temp = {}
temp['0-30'] = {'from_time': data2[0], 'to_time': data2[2], 'value': data2[1]}
temp['30-60'] = {'from_time': data2[2], 'to_time': data2[4], 'value': data2[3]}
temp['60-90'] = {'from_time': data2[4], 'to_time': data2[6], 'value': data2[5]}
temp['90-120'] = {'from_time': data2[6], 'to_time': data2[8], 'value': data2[7]}
temp['description_en'] = data2[9]
temp['description_tc'] = data2[10]
temp['description_sc'] = data2[11]
response['result'] = temp
response['status'] = 1
except IndexError:
response['result'] = ''
response['status'] = 2
except requests.exceptions.RequestException:
response['result'] = ''
response['status'] = 5
else:
response['result'] = ''
response['status'] = 0
return response
| {
"repo_name": "slgphantom/hko",
"path": "hko/rainfall_nowcast.py",
"copies": "2",
"size": "2226",
"license": "mit",
"hash": 6319064540505614000,
"line_mean": 36.7288135593,
"line_max": 100,
"alpha_frac": 0.5615453729,
"autogenerated": false,
"ratio": 3.483568075117371,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5045113448017371,
"avg_score": null,
"num_lines": null
} |
"""A module to retrieve serval days weather forecast data from Hong Kong Observatory"""
import json
import requests
BASE_URL = 'http://pda.weather.gov.hk/'
URL_UC = 'locspc/android_data/fnd_uc.xml'
URL_EN = 'locspc/android_data/fnd_e.xml'
def serval_days_weather_forecast(lang='UC'):
"""A function to retrieve serval days weather forecast data from Hong Kong Observatory"""
response = {}
if lang in ['UC', 'EN']:
try:
if lang == 'UC':
data = json.loads(requests.get(BASE_URL + URL_UC).content)
if lang == 'EN':
data = json.loads(requests.get(BASE_URL + URL_EN).content)
response['result'] = data
response['status'] = 1
except IndexError:
response['result'] = ''
response['status'] = 2
except requests.exceptions.RequestException:
response['result'] = ''
response['status'] = 5
else:
response['result'] = ''
response['status'] = 0
return response
| {
"repo_name": "slgphantom/hko",
"path": "hko/serval_days_weather_forecast.py",
"copies": "2",
"size": "1042",
"license": "mit",
"hash": 1325616572586007800,
"line_mean": 28.7714285714,
"line_max": 93,
"alpha_frac": 0.5777351248,
"autogenerated": false,
"ratio": 3.789090909090909,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5366826033890909,
"avg_score": null,
"num_lines": null
} |
"""A module to retrieve serval days weather forecast data from Hong Kong Observatory"""
import json
import requests
BASE_URL = 'http://pda.weather.gov.hk/'
URL_UC = 'locspc/android_data/sccw_json_uc.xml'
URL_EN = 'locspc/android_data/sccw_json.xml'
def south_china_coastal_waters(lang='UC'):
"""A function to retrieve serval days weather forecast data from Hong Kong Observatory"""
response = {}
if lang in ['UC', 'EN']:
try:
if lang == 'UC':
data = json.loads(requests.get(BASE_URL + URL_UC).content)
if lang == 'EN':
data = json.loads(requests.get(BASE_URL + URL_EN).content)
response['result'] = data
response['status'] = 1
except IndexError:
response['result'] = ''
response['status'] = 2
except requests.exceptions.RequestException:
response['result'] = ''
response['status'] = 5
else:
response['result'] = ''
response['status'] = 0
return response
| {
"repo_name": "nhho/hko",
"path": "hko/south_china_coastal_waters.py",
"copies": "2",
"size": "1050",
"license": "mit",
"hash": 8802484427759201000,
"line_mean": 29,
"line_max": 93,
"alpha_frac": 0.58,
"autogenerated": false,
"ratio": 3.736654804270463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006286266924564796,
"num_lines": 35
} |
"""A module to retrieve tide data from Hong Kong Observatory"""
import re
import requests
BASE_URL = 'http://pda.weather.gov.hk/'
URL = 'locspc/android_data/astro_tide.xml'
def tide():
"""A function to retrieve tide data from Hong Kong Observatory"""
response = {}
try:
data = requests.get(BASE_URL + URL).content
data2 = re.split('[@#]', data.decode('utf-8'))
temp = {}
temp['low_tide_1'] = {}
temp['low_tide_2'] = {}
temp['high_tide_1'] = {}
temp['high_tide_2'] = {}
temp['low_tide_1']['value'] = data2[4]
temp['low_tide_1']['time'] = data2[5]
temp['high_tide_1']['value'] = data2[6]
temp['high_tide_1']['time'] = data2[7]
temp['low_tide_2']['value'] = data2[8]
temp['low_tide_2']['time'] = data2[9]
temp['high_tide_2']['value'] = data2[10]
temp['high_tide_2']['time'] = data2[11]
temp['date'] = data2[12]
response['result'] = temp
response['status'] = 1
except IndexError:
response['result'] = ''
response['status'] = 2
except requests.exceptions.RequestException:
response['result'] = ''
response['status'] = 5
return response
| {
"repo_name": "nhho/hko",
"path": "hko/tide.py",
"copies": "2",
"size": "1239",
"license": "mit",
"hash": -2916213204858627000,
"line_mean": 28.5,
"line_max": 69,
"alpha_frac": 0.5407586764,
"autogenerated": false,
"ratio": 3.1607142857142856,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47014729621142853,
"avg_score": null,
"num_lines": null
} |
"""A module to retrieve weather warning data from Hong Kong Observatory"""
import json
import requests
BASE_URL = 'http://www.weather.gov.hk/'
URL_UC = 'wxinfo/json/warnsumc.xml'
URL_EN = 'wxinfo/json/warnsum.xml'
def weather_warning(lang='UC'):
"""A function to retrieve weather warning data from Hong Kong Observatory"""
response = {}
if lang in ['UC', 'EN']:
try:
if lang == 'UC':
data = requests.get(BASE_URL + URL_UC)
if lang == 'EN':
data = requests.get(BASE_URL + URL_EN)
data_2 = json.loads(data.text.replace('var weather_warning_summary = ', '')[:-2] + '}')
response['result'] = data_2
response['status'] = 1
except IndexError:
response['result'] = ''
response['status'] = 2
except requests.exceptions.RequestException:
response['result'] = ''
response['status'] = 5
else:
response['result'] = ''
response['status'] = 0
return response
| {
"repo_name": "nhho/hko",
"path": "hko/weather_warning.py",
"copies": "2",
"size": "1053",
"license": "mit",
"hash": -331712446974401000,
"line_mean": 28.25,
"line_max": 99,
"alpha_frac": 0.5546058879,
"autogenerated": false,
"ratio": 3.8713235294117645,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006207133058984912,
"num_lines": 36
} |
"""A module to send commands to a UR robot"""
# The MIT License (MIT)
#
# Copyright (c) 2016 GTRC.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import socket
def deg_2_rad(x):
"""Converts from degrees to radians
Args:
x (float): The input in degrees
Returns: A float of the value input converted to radians
"""
return 3.14 * x / 180
def rad_2_deg(x):
"""Converts from radians to degrees
Args:
x (float): the input value in radians
Returns: A float of the value input converted to degrees.
"""
return (x / 3.14) * 180
def double_range(start, stop, step):
""" Create a list from start to stop with interval step
Args:
start (float): The initial value
stop (float): The ending value
step (float): The step size
Returns: A list from start to stop with interval step
"""
r = start
while r < stop:
yield r
r += step
def scale_path(origin, goal, multiplier=2):
"""Creates a new goal pose along a path.
Takes the linear path from the origin to the goal and finds a pose on the
path which is the length of the original path times multiplier from the
origin.
Args:
origin (tuple or list of 6 floats): The origin pose
goal (tuple or list of 6 floats): The goal pose
multiplier (float): The multiplier which defines the new path's length
Returns:
tuple of 6 floats: the new pose along the path.
"""
output = []
for x, y in zip(origin, goal):
output.append(x + (multiplier * (y - x)))
return tuple(output)
def check_pose(pose):
"""Checks to make sure that a pose is valid.
Checks that the pose is a 6 member tuple or list of floats. Does not return
anything, simply raises exceptions if the pose is not valid.
Args:
pose: The pose to check
Raises:
TypeError: The pose was not valid.
"""
if not isinstance(pose, (tuple, list)):
raise TypeError("Expected tuple or list for pose")
if not all([isinstance(x, float) for x in pose]):
raise TypeError("Expected floats in pose")
if not len(pose) == 6:
raise TypeError("Expected 6 members in pose")
def check_xyz(pose):
"""Checks to make sure that a 3 tuple or list x,y,z is valid.
Checks that the pose is a 3 member tuple or list of floats. Does not return
anything, simply raises exceptions if the pose is not valid.
Args:
pose: The pose to check
Raises:
TypeError: The pose was not valid.
"""
if not isinstance(pose, (tuple, list)):
raise TypeError("Expected tuple or list for pose")
if not all([isinstance(x, float) for x in pose]):
raise TypeError("Expected floats in pose")
if not len(pose) == 3:
raise TypeError("Expected 3 members in pose")
def clean_list_tuple(input_data):
"""Return a string of the input without brackets or parentheses.
Args:
input_data (tuple or list): The tuple or list to convert to a string
and strip of brackets or parentheses
Raises:
TypeError: input_data was not a tuple or list
"""
if not isinstance(input_data, (tuple, list)):
raise TypeError("Expected tuple for pose")
return str(input_data)[1:-1]
class URSender(object):
"""A class to send commands to a UR CB2 Robot.
Acceleration, velocity, and the blend radius are all stored and reused
for each command. There is a separate values of acceleration and velocity
for joint motions and cartesian motions. The user should adjust the
values for acceleration and velocity as needed.
Attributes:
__socket: The Socket used to connect to the robot
a_tool: Float, tool acceleration [m/s^2]
v_tool: Float, tool speed [m/s]
radius: Float, blend radius [m]. This allows the robots to miss
points, so long as they are within the radius and continue
moving. If you would like the robot to stop at every point,
set this to zero. Because of the real time nature of this system,
the value of the blend radius is low.
a_joint: Float, joint acceleration of leading axis [rad/s^2]
v_joint: Float, joint speed of leading axis [rad/s]
tool_voltage_set: Boolean, whether the tool voltage has been set
force_settings: Tuple of values to set force following settings on robot
verbose: Boolean of whether to print info to the console
sent: Integer of the number of commands sent
"""
def __init__(self, open_socket, verbose=False):
"""Construct a UR Robot connection to send commands
Args:
open_socket (socket.socket): An already open and connected socket
to a
UR robot
verbose (bool): Whether to print information to the terminal
"""
self.__socket = open_socket
self.a_tool = 1.2
self.v_tool = 0.3
self.radius = 0.0
self.a_joint = 1.2
self.v_joint = 0.3
self.tool_voltage_set = False
self.force_settings = None
self.verbose = verbose
self.sent = 0
def __del__(self):
"""Destructor which prints the number of commands which were sent"""
print 'Sent: {} commands'.format(self.sent)
def send(self, message):
"""Sends the message over the IP pipe.
Args:
message (str): The message to be sent.
"""
message += '\n'
if self.verbose:
print message
self.__socket.send(message)
self.sent += 1
def set_force_mode(self, task_frame, selection_vector, wrench, frame_type,
limits):
"""Set robot to be controlled in force mode
Args:
task_frame (tuple or list of 6 floats): A pose vector that defines
the force frame relative to the base frame.
selection_vector (tuple or list of 6 binaries): A 6d vector that
may only contain 0 or 1. 1 means that the robot will be
compliant in the corresponding axis of the task frame,
0 means the robot is not compliant along/about that axis.
wrench (tuple or list of 6 floats): The forces/torques the robot
is to apply to its environment. These values have different
meanings whether they correspond to a compliant axis or not.
Compliant axis: The robot will adjust its position along/about
the axis in order to achieve the specified force/torque.
Non-compliant axis: The robot follows the trajectory of the
program but will account for an external force/torque of the
specified value.
frame_type (int): Specifies how the robot interprets the force
frame. 1: The force frame is transformed in a way such that its
y-axis is aligned with a vector pointing from the robot tcp
towards the origin of the force frame. 2: The force frame is not
transformed. 3: The force frame is transformed in a way such
that its x-axis is the projection of the robot tcp velocity
vector onto the x-y plane of the force frame. All other values
of frame_type are invalid.
limits (tuple or list of 6 floats): A 6d vector with float values
that are interpreted differently for compliant/non-compliant
axes: Compliant axes: The limit values for compliant axes
are the maximum allowed tcp speed along/about the axis.
Non-compliant axes: The limit values for non-compliant axes
are the maximum allowed deviation along/about an axis between
the actual tcp position and the one set by the program
Raises:
TypeError: The selection_vector was not a tuple, it did not
have 6 members, or it was not filled with booleans; or
frame_type was not an integer
IndexError: frame_type was not in the set (1,2,3)
"""
check_pose(task_frame)
check_pose(wrench)
check_pose(limits)
if not isinstance(selection_vector, (tuple, list)):
raise TypeError("Expected tuple or list for selection_vector")
if not all([isinstance(x, bool) for x in selection_vector]):
raise TypeError("Expected booleans in selection_vector")
if not len(selection_vector) == 6:
raise TypeError("Expected 6 members in selection_vector")
if not isinstance(frame_type, int):
raise TypeError("frame_type must be an integer")
if frame_type not in (1, 2, 3):
raise IndexError("frame_type must be in the set (1,2,3)")
self.force_settings = (task_frame, selection_vector, wrench, frame_type,
limits)
def force_mode_on(self):
"""Activates force mode.
Requires that force mode settings have been passed in by
set_force_mode()
Raises:
StandardError: Force settings have not been called.
"""
if self.force_settings is None:
raise StandardError('Force Settings have not been set with '
'set_force_mode')
self.send('forcemode({},{},{},{},{})'.format(*self.force_settings))
def force_mode_off(self):
"""Deactivates force mode"""
self.send('end_force_mode()')
def move_circular(self, pose_via, pose_to, cartesian=True):
"""Move to position, circular in tool-space.
TCP moves on the circular arc segment from current pose, through pose
via to pose to. Accelerates to and moves with constant tool speed
self.v_tool.
Args:
pose_via (tuple or list of 6 floats): Path point through which to
draw arc, only x,y,z are used
pose_to (tuple or list of 6 floats): Destination point
cartesian (bool): Whether supplied poses are cartesian or joint
coordinates
Raises:
TypeError: cartesian was not a boolean
"""
check_pose(pose_to)
check_pose(pose_via)
if not isinstance(cartesian, bool):
raise TypeError('Cartesian must be a boolean')
point = 'p' if cartesian else ''
self.send('movec({}[{}],{}[{}],a={},v={},r={}'.format(
point, clean_list_tuple(pose_via), point, clean_list_tuple(
pose_to), self.a_tool, self.v_tool, self.radius))
def move_joint(self, goal, time=None, cartesian=False):
"""
Move to position (linear in joint-space).
When using this command, the robot must be at standstill or come from a
movej or movel with a blend. The speed and acceleration parameters
controls the trapezoid speed profile of the move. The time parameter
can be used in stead to set the time for this move. Time setting has
priority over speed and acceleration settings. The blend radius can
be set with the radius parameters, to avoid the robot stopping at the
point. However, if the blend region of this mover overlaps with
previous or following regions, this move will be skipped, and an
'Overlapping Blends' warning message will be generated.
Args:
goal (tuple or list of 6 floats): Destination pose
time (float): The time in which to complete the move, ignored if
value is zero. Overrides speed and acceleration otherwise.
cartesian (bool): Whether the goal point is in cartesian
coordinates.
Raises:
TypeError: cartesian was not a boolean or time was not a float
ValueError: time was not a positive value
"""
check_pose(goal)
if not isinstance(cartesian, bool):
raise TypeError('Cartesian must be a boolean')
if time is not None:
if not isinstance(time, float):
raise TypeError('time must be a float')
if time <= 0:
raise ValueError('time must be greater than zero')
self.send('movej({}[{}],a={},v={},t={},r={})'.format(
'p' if cartesian else '', clean_list_tuple(goal),
self.a_joint, self.v_joint, time, self.radius))
else:
self.send('movej({}[{}],a={},v={},r={})'.format('p' if cartesian
else '',
clean_list_tuple(
goal),
self.a_joint,
self.v_joint,
self.radius))
def move_line(self, goal, time=None, cartesian=True):
"""Move to position (linear in tool-space).
When using this command, the robot must be at standstill or come from a
movej or movel with a blend. The speed and acceleration parameters
controls the trapezoid speed profile of the move. The time parameter
can be used in stead to set the time for this move. Time setting has
priority over speed and acceleration settings. The blend radius can
be set with the radius parameters, to avoid the robot stopping at the
point. However, if the blend region of this mover overlaps with
previous or following regions, this move will be skipped, and an
'Overlapping Blends' warning message will be generated.
Args:
goal (tuple or list of 6 floats): Destination pose
time (float): The time in which to complete the move. Overrides
speed and acceleration if set. Must be a positive value.
cartesian (bool): Whether the goal point is in cartesian
coordinates.
Raises:
TypeError: cartesian was not a boolean or time was not a float
ValueError: time was not a positive value
"""
check_pose(goal)
if not isinstance(cartesian, bool):
raise TypeError('Cartesian must be a boolean')
if time is not None:
if not isinstance(time, float):
raise TypeError('time must be a float')
if time <= 0:
raise ValueError('time must be greater than zero')
self.send('movel({}[{}],a={},v={},t={},r={})'.format(
'p' if cartesian else '', clean_list_tuple(goal),
self.a_tool, self.v_tool, time, self.radius))
else:
self.send('movel({}[{}],a={},v={},r={})'.format('p' if cartesian
else '',
clean_list_tuple(
goal),
self.a_tool,
self.v_tool,
self.radius))
def move_process(self, goal, cartesian=True):
"""Move Process, guarantees that speed will be maintained.
Blend circular (in tool-space) and move linear (in tool-space) to
position. Accelerates to and moves with constant tool speed v.
Failure to maintain tool speed leads to an error. Ideal for
applications such as gluing
Args:
goal (tuple or list of 6 floats): Destination pose
cartesian (bool): Whether the goal point is in cartesian
coordinates.
Raises:
TypeError: cartesian was not a boolean
"""
check_pose(goal)
if not isinstance(cartesian, bool):
raise TypeError('Cartesian must be a boolean')
self.send('movep({}[{}],a={},v={},r={})'.format('p' if cartesian
else '',
clean_list_tuple(goal),
self.a_tool,
self.v_tool,
self.radius))
def servo_circular(self, goal, cartesian=True):
"""Servo to position (circular in tool-space).
Accelerates to and moves with constant tool speed v.
Args:
goal (tuple or list of 6 floats): Destination pose
cartesian (bool): Whether the goal point is in cartesian
coordinates.
Raises:
TypeError: cartesian was not a boolean
"""
check_pose(goal)
if not isinstance(cartesian, bool):
raise TypeError('Cartesian must be a boolean')
self.send('servoc({}[{}],a={},v={},r={})'.format('p' if cartesian
else '',
clean_list_tuple(
goal),
self.a_tool,
self.v_tool,
self.radius))
def servo_joint(self, goal, time):
"""Servo to position (linear in joint-space).
Args:
goal (tuple or list of 6 floats): Destination pose
time (float): The time in which to complete the move in seconds
Raises:
TypeError: time was not a float
ValueError: time was non positive
"""
check_pose(goal)
if not isinstance(time, float):
raise TypeError('Time must be a float')
if time <= 0:
raise ValueError('Time must be a positive value')
self.send('servoj([{}],t={})'.format(clean_list_tuple(goal), time))
def stop_joint(self):
"""Stop (linear in joint space)"""
self.send('stopj({})'.format(self.a_joint))
def stop_linear(self):
"""Stop (linear in tool space)"""
self.send('stopl({})'.format(self.a_tool))
def set_normal_gravity(self):
"""Sets a normal gravity for an upright mounted robot"""
self.send('set_gravity([0,0,9.82])')
def set_payload(self, mass, cog=None):
"""Set payload mass and center of gravity
This function must be called, when the payload weight or weight
distribution changes significantly - I.e when the robot picks up or puts
down a heavy workpiece. The CoG argument is optional - If not provided,
the Tool Center Point (TCP) will be used as the Center of Gravity (CoG).
If the CoG argument is omitted, later calls to set tcp(pose) will change
CoG to the new TCP. The CoG is specified as a Vector,
[CoGx, CoGy, CoGz], displacement, from the tool mount.
Args:
mass (float): mass in kilograms
cog (tuple or list of 3 floats): Center of Gravity: [CoGx, CoGy,
CoGz] in meters.
Raises:
TypeError: mass was not a float
ValueError: mass was negative
"""
if not isinstance(mass, float):
raise TypeError("Expected float for mass")
if mass < 0:
raise ValueError("Cannot have negative mass")
if cog is not None:
check_xyz(cog)
self.send('set_payload(m={},[{}])'.format(mass,
clean_list_tuple(cog)))
else:
self.send('set_payload(m={})'.format(mass))
def set_tcp(self, pose):
"""Set the TCP transformation.
Set the transformation from the output flange coordinate system to the
TCP as a pose.
Args:
pose (tuple or list of 6 floats): A pose describing the
transformation.
"""
check_pose(pose)
self.send('set_tcp([{}])'.format(clean_list_tuple(pose)))
def set_analog_input_range(self, port, input_range):
"""Set input_range of analog inputs
Port 0 and 1 are in the control box, 2 and three are on the tool flange.
Args:
port (int): Port ID (0,1,2,3)
input_range (int): On the controller: [0: 0-5V, 1: -5-5V, 2: 0-10V
3: -10-10V] On the tool flange: [0: 0-5V, 1: 0-10V 2: 0-20mA]
Raises:
TypeError: Either port or input_range was not an integer
IndexError: input_range was not a valid value for the selected port
"""
if not isinstance(port, int):
raise TypeError("port must be an integer")
if not isinstance(input_range, int):
raise TypeError("input_range must be an integer")
if port in (0, 1):
if input_range not in (0, 1, 2, 3):
raise IndexError("input_range must be in the set (0,1,2,3) for"
"the controller outputs.")
elif port in (2, 3):
raise IndexError("input_range must be in the set (0,1,2) for "
"the tool outputs.")
else:
raise IndexError("port must be in the set (0,1,2,3)")
self.send('set_analog_inputrange({},{})'.format(port, input_range))
def set_analog_out(self, ao_id, level):
"""Set analog output level
Args:
ao_id (int): The output ID#. AO 0 and 1 are in the control box.
There is not analog output on the tool.
level (float): The output signal level 0-1, corresponding to 4-20mA
or 0-10V based on set_analog_output_domain.
Raises:
TypeError: Either ao_id was not an integer or level was not a float
IndexError: ao_id was not a valid value (0,1)
"""
if not isinstance(ao_id, int):
raise TypeError("Expected int for ao_id")
if not isinstance(level, float):
raise TypeError("Expected int for domain")
if ao_id not in (0, 1):
raise IndexError('The Analog output ID must be either 0 or 1')
if level > 1 or level < 0:
raise ValueError("Level must be 0-1")
self.send('set_analog_out({},{})'.format(ao_id, level))
def set_analog_output_domain(self, ao_id, domain):
"""Sets the signal domain of the analog outputs.
The analog outputs can be flexibly set to operate on a 4-20mA or 0-10V
scale. There are two analog outputs on the controller and none on the
tool.
Args:
ao_id (int): The port number (0 or 1).
domain (int): 0 for 4-20mA and 1 for 0-10V
Raises:
TypeError: Either ao_id or domain was not an integer
IndexError: ao_id or domain was not a valid value (0,1)
"""
if not isinstance(ao_id, int):
raise TypeError("Expected int for ao_id")
if not isinstance(domain, int):
raise TypeError("Expected int for domain")
if ao_id not in (0, 1):
raise IndexError('The Analog output ID must be either 0 or 1')
if domain not in (0, 1):
raise IndexError('The Analog domain must be either 0 or 1')
self.send('set_analog_outputdomain({},{})'.format(
ao_id, domain))
def set_digital_out(self, do_id, level):
"""Set the value for DO[do_id]
Args:
do_id (int): The digital output #. Values 0-7 are on the control
box. Values 8 and 9 are on the tool flange. You must set the
tool voltage prior to attempting to modify the tool flange
outputs.
level (bool): High or low setting for output
Raises:
TypeError: do_id was not an integer or level was not a boolean
StandardError: The tool voltage was not set prior to attempting
this call
IndexError: do_id was out of range (0-9)
"""
if not isinstance(do_id, int):
raise TypeError("Expected int for do_id")
if do_id in (8, 9) and not self.tool_voltage_set:
raise StandardError("The tool voltage must be set prior to "
"attempting to alter tool outputs")
if do_id > 9 or do_id < 0:
raise IndexError("The valid range for digital outputs is 0-9")
if not isinstance(level, bool):
raise TypeError("Expected boolean for level")
self.send('set_digital_out({},{})'.format(do_id, 1 if level else 0))
def set_tool_voltage(self, voltage):
"""Sets the voltage level for the power supply that delivers power to
the connector plug in the tool flange of the robot. The voltage can
be 0,
12 or 24 volts.
Args:
voltage (int):The voltage to set at the tool connector
Raises:
TypeError: voltage was not an integer
ValueError: voltage was not valued 0, 12, or 24
"""
if not isinstance(voltage, int):
raise TypeError("Expected int for voltage")
if voltage not in (0, 12, 24):
raise ValueError("Voltage must be 0, 12, or 24")
self.send('set_tool_voltage({})'.format(voltage))
self.tool_voltage_set = True
| {
"repo_name": "IRIM-Technology-Transition-Lab/ur_cb2",
"path": "ur_cb2/send/cb2_send.py",
"copies": "1",
"size": "26744",
"license": "mit",
"hash": -5875094746888046000,
"line_mean": 39.5212121212,
"line_max": 80,
"alpha_frac": 0.5746335627,
"autogenerated": false,
"ratio": 4.491770238495129,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00014964459408903854,
"num_lines": 660
} |
"""A module to test the HKO package"""
import unittest
from hko import astro, blog, earthquake, local_weather, lunar_date,\
major_city_forecast, marine_forecast, rainfall_nowcast,\
regional_weather, serval_days_weather_forecast,\
south_china_coastal_waters, tide, uv_index, weather_warning
HONG_KONG_LAT_LNG = 22.352493, 113.8474984
MACAU_LAT_LNG = 22.161817, 113.5001117
FUNCTIONS_WITH_LANG = [earthquake, major_city_forecast, marine_forecast,
serval_days_weather_forecast, south_china_coastal_waters,
uv_index, weather_warning]
FUNCTIONS_WITH_LAT_LNG = [local_weather, rainfall_nowcast]
FUNCTIONS_WITH_LANG_ST4 = set([uv_index])
FUNCTIONS_WITHOUT_ARGS = [astro, blog, lunar_date, regional_weather, tide]
class TestHKO(unittest.TestCase):
"""A class to test the HKO package"""
def test_functions_with_lang(self):
"""A function to test functions with language"""
for func in FUNCTIONS_WITH_LANG:
self.assertEqual(func('XXX')['status'], 0)
self.assertEqual(func(123.4)['status'], 0)
ok_status = [1, 2, 4, 5] if func in FUNCTIONS_WITH_LANG_ST4 else [1, 2, 5]
self.assertIn(func()['status'], ok_status)
self.assertIn(func('UC')['status'], ok_status)
self.assertIn(func('EN')['status'], ok_status)
def test_functions_with_lat_lng(self):
"""A function to test functions with latitude and longitude"""
for func in FUNCTIONS_WITH_LAT_LNG:
self.assertEqual(func('XXX', 'XXX')['status'], 0)
self.assertEqual(func(45.6, 'XXX')['status'], 0)
self.assertEqual(func('XXX', 123.4)['status'], 0)
self.assertEqual(func(90.1, 123.4)['status'], 0)
self.assertEqual(func(-90.1, 123.4)['status'], 0)
self.assertEqual(func(45.6, 180.1)['status'], 0)
self.assertEqual(func(45.6, -180.1)['status'], 0)
self.assertIn(func(*HONG_KONG_LAT_LNG)['status'], [1, 2, 5])
self.assertEqual(func(*MACAU_LAT_LNG)['status'], 3)
def test_functions_without_args(self):
"""A function to test functions without arguments"""
for func in FUNCTIONS_WITHOUT_ARGS:
self.assertIn(func()['status'], [1, 2, 5])
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "slgphantom/hko",
"path": "tests/test_hko.py",
"copies": "2",
"size": "2342",
"license": "mit",
"hash": 2719038991160667600,
"line_mean": 37.393442623,
"line_max": 86,
"alpha_frac": 0.6187019641,
"autogenerated": false,
"ratio": 3.3457142857142856,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9962462157036753,
"avg_score": 0.0003908185555067032,
"num_lines": 61
} |
"""A module to test whether doctest recognizes some 2.2 features,
like static and class methods.
>>> print('yup') # 1
yup
We include some (random) encoded (utf-8) text in the text surrounding
the example. It should be ignored:
ЉЊЈЁЂ
"""
import sys
import unittest
from test import support
if sys.flags.optimize >= 2:
raise unittest.SkipTest("Cannot test docstrings with -O2")
class C(object):
"""Class C.
>>> print(C()) # 2
42
We include some (random) encoded (utf-8) text in the text surrounding
the example. It should be ignored:
ЉЊЈЁЂ
"""
def __init__(self):
"""C.__init__.
>>> print(C()) # 3
42
"""
def __str__(self):
"""
>>> print(C()) # 4
42
"""
return "42"
class D(object):
"""A nested D class.
>>> print("In D!") # 5
In D!
"""
def nested(self):
"""
>>> print(3) # 6
3
"""
def getx(self):
"""
>>> c = C() # 7
>>> c.x = 12 # 8
>>> print(c.x) # 9
-12
"""
return -self._x
def setx(self, value):
"""
>>> c = C() # 10
>>> c.x = 12 # 11
>>> print(c.x) # 12
-12
"""
self._x = value
x = property(getx, setx, doc="""\
>>> c = C() # 13
>>> c.x = 12 # 14
>>> print(c.x) # 15
-12
""")
@staticmethod
def statm():
"""
A static method.
>>> print(C.statm()) # 16
666
>>> print(C().statm()) # 17
666
"""
return 666
@classmethod
def clsm(cls, val):
"""
A class method.
>>> print(C.clsm(22)) # 18
22
>>> print(C().clsm(23)) # 19
23
"""
return val
def test_main():
from test import test_doctest2
EXPECTED = 19
f, t = support.run_doctest(test_doctest2)
if t != EXPECTED:
raise support.TestFailed("expected %d tests to run, not %d" %
(EXPECTED, t))
# Pollute the namespace with a bunch of imported functions and classes,
# to make sure they don't get tested.
from doctest import *
if __name__ == '__main__':
test_main()
| {
"repo_name": "pleaseproject/python-for-android",
"path": "python3-alpha/python3-src/Lib/test/test_doctest2.py",
"copies": "194",
"size": "2359",
"license": "apache-2.0",
"hash": 1834643503480457700,
"line_mean": 18.0975609756,
"line_max": 73,
"alpha_frac": 0.4465730098,
"autogenerated": false,
"ratio": 3.580792682926829,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A module to to gather metrics about the bootstrap process."""
from collections import OrderedDict
from sqlalchemy import and_
from sqlalchemy.orm import sessionmaker
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy.ext.declarative import declarative_base
from common.config import DB_ENGINE
Base = declarative_base()
class Snapshot(Base):
__tablename__ = 'snapshot'
id = Column(Integer, primary_key=True, autoincrement=True)
pipeline_id = Column(String)
timestamp = Column(DateTime)
pipeline_status = Column(String)
validation_status = Column(String)
nb_validation_errors = Column(Integer)
resource_type = Column(String)
extension = Column(String)
scraper_required = Column(Boolean)
has_scraper = Column(Boolean)
country_code = Column(String)
nuts_code = Column(String)
Base.metadata.create_all(DB_ENGINE)
def get_latest_stats():
"""Get simple stats about the latest update."""
session = sessionmaker(bind=DB_ENGINE)()
timestamp = (
session.query(Snapshot, Snapshot.timestamp)
.order_by(Snapshot.timestamp.desc())
.limit(1)
.all()
.pop()
).timestamp
stats = session.query(
Snapshot.pipeline_id,
Snapshot.pipeline_status,
Snapshot.resource_type,
Snapshot.extension,
Snapshot.validation_status,
).filter(Snapshot.timestamp == timestamp)
# noinspection PyComparisonWithNone
sum_queries = OrderedDict((
('Up', Snapshot.pipeline_status == 'up'),
('Remote', Snapshot.resource_type == 'url'),
('Local', Snapshot.resource_type == 'path'),
('PDF', Snapshot.extension == '.pdf'),
('Excel', Snapshot.extension.in_(['.xls', '.xlsx'])),
('Broken', Snapshot.validation_status == 'broken'),
('Loaded', Snapshot.validation_status == 'loaded'),
('Valid', Snapshot.validation_status == 'valid'),
('Unknown origin', Snapshot.resource_type == None), # noqa
('Unknown extension', Snapshot.extension == None), # noqa
))
sums = OrderedDict()
for key, select in sum_queries.items():
sums[key] = (
session.query(Snapshot)
.filter(and_(Snapshot.timestamp == timestamp, select))
.count()
)
return timestamp, stats, sums
| {
"repo_name": "Victordeleon/os-data-importers",
"path": "eu-structural-funds/common/metrics.py",
"copies": "1",
"size": "2447",
"license": "mit",
"hash": 5612009688358135000,
"line_mean": 29.2098765432,
"line_max": 67,
"alpha_frac": 0.6518185533,
"autogenerated": false,
"ratio": 4.300527240773286,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5452345794073287,
"avg_score": null,
"num_lines": null
} |
'''A module to visualize the underwater automata learning the best depth to
operate at.'''
import turtle
from lrp import Linear_Reward_Penalty as LRP
from mse import MSE
from environment import Environment
from pinger import Pinger
import numpy as np
# import tune_lrp as tune
import time
# setup window
window = turtle.Screen()
window.setup(800, 1000, 0, 0)
window.title("UUAV Learning the best depth")
window.register_shape("sub.gif")
turtle.colormode(255)
transmission = []
source = turtle.Turtle()
receiver = turtle.Turtle()
# For 5 actions we need 5 depths
depths = [400, 200, 0, -200, -400]
# Source setup
source.penup()
source.setpos(-200, 400)
source.shape("circle")
source.color("green")
# Receiver setup
receiver.penup()
receiver.setpos(200, 400)
receiver.shape("sub.gif")
# Simulation setup
num_actions = 5
n = 10000
interval = 1
time_between = (n / interval) - 1
# Define the environment with the number of discrete depths for the detectable
# object.
env = Environment(num_actions)
# Define the LRI automata with the same number of actions. This number does
# not correspond to the number of receivers on the array. It is merely the
# representation of the array's ability to detect the object at that depth.
lrp = LRP(num_actions) # The learning automata.
# The most probable depth that the object exists at, as calculated by the
# learner.
bestdepth = np.zeros(num_actions)
# Define the Markovian Switching Environment that will feed probabilities to
# the Pinger object.
Es = [[0.48796, 0.024438, 0.067891, 0.41971, 0.00],
[0.021431, 0.071479, 0.40562, 0.50147, 0.00],
[0.018288, 0.083153, 0.50582, 0.39274, 0.00],
[0.48455, 0.015527, 0.18197, 0.31795, 0.00],
[0.01675, 0.58845, 0.11313, 0.28167, 0.00]]
mse = MSE(Es)
det_obj = Pinger(mse.env_now()) # Create the detectable object.
# set up transmission vectors
for i in range(num_actions):
transmission.append(turtle.Turtle())
# transmission[i].color("green")
# transmission[i].shape("arrow")
# transmission[i].penup()
# transmission[i].setpos(-200, 400)
# transmission[i].pendown()
# transmission[i].goto(150, depths[i])
# Run 5 individual experiments experiments.
for k in range(len(mse.envs)):
# Generate an ensemble of n experiments
source.goto(-200, depths[k])
receiver.clear()
for i in range(num_actions):
transmission[i].clear()
transmission[i].color("green")
transmission[i].shape("arrow")
transmission[i].shapesize(.5, .5)
transmission[i].penup()
transmission[i].setpos(-200, depths[k])
transmission[i].pendown()
transmission[i].goto(150, depths[i])
transmission[i].write(mse.env_now()[i])
det_obj.set_env(mse.env_now())
print("The desired vector is now: " + str(mse.env_now()))
# lrp.a = tune.find_optimal_a(lrp, env, det_obj)
# print("Optimized value for a is: " + str(lrp.a))
lrp.a = 0.99999999999999
lrp.b = 0.5
bestdepth = np.zeros(num_actions)
current_best = 0
for j in range(n):
# reset the action probabilities.
# lrp.reset_actions()
count = 0
# lrp.b = tune.find_optimal_b(lrp, env, det_obj)
# Run a single experiment. Terminate if it reaches 10000 iterations.
while(True and count < 10000):
# Define m as the next action predicting the depth of the object.
m = lrp.next_action()
# Define req as the next detectable object depth.
req = det_obj.request()
# reward if m = req.
resp = env.response(m, req)
if(not resp):
lrp.do_reward(m)
else:
lrp.do_penalty(m)
if(max(lrp.p) > 0.999):
# The best depth counting from 0.
# Break at 98% convergence to a single depth.
bestdepth[np.argmax(lrp.p)] += 1
break
count += 1
if (current_best != np.argmax(bestdepth)):
receiver.goto(200, depths[np.argmax(bestdepth)])
current_best = np.argmax(bestdepth)
receiver.goto(200, depths[np.argmax(bestdepth)])
receiver.write(bestdepth[np.argmax(bestdepth)] / sum(bestdepth))
print("The probability vector is: " + str(bestdepth / sum(bestdepth)))
print("Best depth is: " + str(np.argmax(bestdepth) * 14 + 14) + "m. " +
"The desired depth is: " + str(np.argmax(mse.env_now()) * 14 + 14) +
"m.")
print("*************************************************************")
mse.next_env()
time.sleep(5)
print("Ready to exit.")
# Exit
turtle.exitonclick()
| {
"repo_name": "0xSteve/detection_learning",
"path": "P_model/Visualizations/UUAV_depth_finding/visualize.py",
"copies": "1",
"size": "4678",
"license": "apache-2.0",
"hash": 7208277316041361000,
"line_mean": 34.1729323308,
"line_max": 78,
"alpha_frac": 0.6250534416,
"autogenerated": false,
"ratio": 3.2690426275331936,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4394096069133193,
"avg_score": null,
"num_lines": null
} |
"""a module to work with higher level transcript objects
"""
import sys, random, string, uuid
from collections import namedtuple
from seqtools.range import GenomicRange, Bed
from seqtools.range.multi import ranges_to_coverage, merge_ranges
from seqtools.sequence import rc
import seqtools.graph
from math import sqrt
from seqtools.structure.transcript.converters import transcript_to_gpd_line, transcript_to_fake_psl_line
class Exon(GenomicRange):
"""A more full featured exon definition"""
def __init__(self,rng,dir=None):
super(Exon,self).__init__(rng.chr,rng.start,rng.end,rng.payload,dir)
self.leftmost = None
self.rightmost = None
self.fiveprime = None
self.threeprime = None
def set_leftmost(self,b=True): self.leftmost = b
def set_rightmost(self,b=True): self.rightmost = b
def set_fiveprime(self,b=True): self.fiveprime = b
def set_threeprime(self,b=True): self.threeprime = b
TranscriptOptions = namedtuple('TranscriptOptions',
['direction',
'ref',
'sequence',
'name',
'gene_name',
'payload'
])
class Transcript(seqtools.structure.MappingGeneric):
"""Class to describe a transcript
This is a basic transcript where all the exons are in order and on the same chromosome
Mapping is sliceable by genomic coordinate. Example:
* mymapping - a mapping that ranges between 1 and 100
* mymapping[1:99] - a new mapping that rangse between 2 and 99
:param rngs:
:param options:
:param options.direction:
:param options.ref:
:param options.sequence:
:param options.name:
:param options.gene_name:
:type rngs: GeneomicRange []
:type options: namedtuple
:type options.direction: Char
:type options.ref: dict()
:type options.sequence: String
:type options.name: String
:type options.gene_name: String
"""
def __init__(self,rngs,options=None):
if not options: options = Transcript.Options()
if len(rngs) > 0:
"""convert rngs to Exons"""
rngs = [Exon(x) for x in rngs]
rngs[0].set_leftmost(True)
rngs[-1].set_rightmost(True)
super(Transcript,self).__init__(rngs,options)
@staticmethod
def Options(**kwargs):
""" A method for declaring options for the class"""
construct = TranscriptOptions #IMPORTANT! Set this
names = construct._fields
d = {}
for name in names: d[name] = None #default values
"""set defaults here"""
for k,v in kwargs.iteritems():
if k in names: d[k] = v
else: raise ValueError('Error '+k+' is not an options property')
"""Create a set of options based on the inputs"""
return construct(**d)
def copy(self):
return Transcript(self._rngs,Transcript.Options(
direction=self.options.direction,
ref=self.options.ref,
sequence=self.options.sequence,
name=self.options.name,
gene_name=self.options.gene_name,
payload=self.options.payload))
def rc(self):
"""Flip the direction"""
ntx = self.copy()
newstrand = '+'
if ntx.strand == '+': newstrand = '-'
ntx._options = ntx._options._replace(direction=newstrand)
return ntx
def slice_target(self,chr,start,end):
"""Slice the mapping by the target coordinate
First coordinate is 0-indexed start
Second coordinate is 1-indexed finish
"""
# create a range that we are going to intersect with
trng = Bed(chr,start,end)
nrngs = []
for r in self._rngs:
i = r.intersect(trng)
if not i: continue
nrngs.append(i)
if len(nrngs) == 0: return None
return Transcript(nrngs,self._options)
def slice_sequence(self,start,end,directionless=False):
"""Slice the mapping by the position in the sequence
First coordinate is 0-indexed start
Second coordinate is 1-indexed finish
"""
if end > self.length: end = self.length
if start < 0: start = 0
if not directionless and self.direction == '-':
newend = self.length-start
newstart = self.length-end
end = newend
start = newstart
#find the sequence length
l = self.length
indexstart = start
indexend = end
ns = []
tot = 0
for r in self._rngs:
tot += r.length
n = r.copy()
if indexstart > r.length:
indexstart-=r.length
continue
n.start = n.start+indexstart
if tot > end:
diff = tot-end
n.end -= diff
tot = end
indexstart = 0
ns.append(n)
if tot == end: break
if len(ns)==0: return None
return Transcript(ns,self._options)
@property
def range(self):
"""Get the range from the leftmost exon to the rightmost
:return: total range
:rtype: GenomicRange
"""
return GenomicRange(self._rngs[0].chr,self._rngs[0].start,self._rngs[-1].end)
def set_strand(self,dir):
"""Set the strand (direction)
:param dir: direction + or -
:type dir: char
"""
self._options = self._options._replace(direction = dir)
@property
def strand(self):
"""Get the strand
:return: direction + or -
:rtype: char
"""
return self._options.direction
@property
def direction(self):
"""alias for strand"""
return self._options.direction
@property
def chr(self):
"""the reference chromosome. greedy return the first chromosome in exon array
:return: chromosome
:rtype: string
"""
if len(self.exons)==0:
sys.stderr.write("WARNING can't return chromsome with nothing here\n")
return None
return self._rngs[0].chr
@property
def junctions(self):
"""Can be inferred from the exons, this is not implemented yet"""
if len(self.exons) < 2: return []
junctions = []
for i in range(1,len(self.exons)):
junctions.append(Junction(self.exons[i-1],self.exons[i]))
return junctions
def get_gpd_line(self,transcript_name=None,gene_name=None,direction=None):
"""Get the genpred format string representation of the mapping"""
return transcript_to_gpd_line(self,transcript_name=transcript_name,gene_name=gene_name,direction=direction)
def set_gene_name(self,name):
"""assign a gene name
:param name: name
:type name: string
"""
self._options = self._options._replace(gene_name = name)
@property
def gene_name(self):
"""retrieve the gene name
:return: gene name
:rtype: string
"""
return self._options.gene_name
def set_transcript_name(self,name):
"""assign a transcript name
:param name: name
:type name: string
"""
self._options = self._options._replace(name = name)
@property
def transcript_name(self):
"""retrieve the transcript name
:return: transcript name
:rtype: string
"""
return self._options.name
def get_fake_psl_line(self,ref):
"""Convert a mapping to a fake PSL line"""
return transcript_to_fake_psl_line(self,ref)
def get_junctions_string(self):
"""Get a string representation of the junctions. This is almost identical to a previous function.
:return: string representation of junction
:rtype: string
"""
return ';'.join([x.get_range_string() for x in self.junctions])
def junction_overlap(self,tx,tolerance=0):
"""Calculate the junction overlap between two transcripts
:param tx: Other transcript
:type tx: Transcript
:param tolerance: how close to consider two junctions as overlapped (default=0)
:type tolerance: int
:return: Junction Overlap Report
:rtype: Transcript.JunctionOverlap
"""
return JunctionOverlap(self,tx,tolerance)
def exon_overlap(self,tx,multi_minover=10,multi_endfrac=0,multi_midfrac=0.8,single_minover=50,single_frac=0.5,multi_consec=True):
"""Get a report on how mucht the exons overlap
:param tx:
:param multi_minover: multi-exons need to overlap by at lest this much to be considered overlapped (default 10)
:param multi_endfrac: multi-exons need an end fraction coverage of at least this by default (default 0)
:param multi_midfrac: multi-exons need (default 0.8) mutual coverage for internal exons
:parma single_minover: single-exons need at least this much shared overlap (default 50)
:param single_frac: at least this fraction of single exons must overlap (default 0.5)
:parma multi_consec: exons need to have multiexon consecutive mapping to consider it a match (default True)
:type tx:
:type multi_minover: int
:type multi_endfrac: float
:type multi_midfrac: float
:type single_minover: int
:type single_frac: float
:type multi_consec: bool
:return: ExonOverlap report
:rtype: Transcript.ExonOverlap
"""
return ExonOverlap(self,tx,multi_minover,multi_endfrac,multi_midfrac,single_minover,single_frac,multi_consec=multi_consec)
class ExonOverlap:
"""class to describe exon overlap
:param tx:
:param multi_minover: multi-exons need to overlap by at lest this much to be considered overlapped (default 10)
:param multi_endfrac: multi-exons need an end fraction coverage of at least this by default (default 0)
:param multi_midfrac: multi-exons need (default 0.8) mutual coverage for internal exons
:parma single_minover: single-exons need at least this much shared overlap (default 50)
:param single_frac: at least this fraction of single exons must overlap (default 0.5)
:parma multi_consec: exons need to have multiexon consecutive mapping to consider it a match (default True)
:type tx:
:type multi_minover: int
:type multi_endfrac: float
:type multi_midfrac: float
:type single_minover: int
:type single_frac: float
:type multi_consec: bool
:return: ExonOverlap report
:rtype: Transcript.ExonOverlap
"""
def __init__(self1,tx_obj1,tx_obj2,multi_minover=10,multi_endfrac=0,multi_midfrac=0.8,single_minover=50,single_frac=0.5,multi_consec=True):
self1.tx_obj1 = tx_obj1
self1.tx_obj2 = tx_obj2
self1.multi_minover = multi_minover # multi-exon minimum overlap of each exon
self1.multi_endfrac = multi_endfrac # multi-exon minimum fractional overlap of first or last exon
self1.multi_midfrac = multi_midfrac # multi-exon minimum fractional overlap of internal exons
self1.multi_consec = multi_consec # require consecutive exons for exon overlap of multi_exon
self1.single_minover = single_minover # single-exon minimum overlap
self1.single_frac = single_frac #single-exon minimum overlap
self1.overs = [] # set by calculate_overlap()
self1.dif1 = []
self1.dif2 = []
self1.calculate_overlap()
if len(self1.overs) == 0: return None# nothing to analyze
if self1.tx_obj1.get_exon_count() > 1 and self1.tx_obj1.get_exon_count() > 1 \
and self1.multi_consec and len(self1.overs) < 2:
return None #not enough to consider multi exon transcript overlap
self1.analyze_overs()
if self1.tx_obj1.get_exon_count() > 1 and self1.tx_obj1.get_exon_count() > 1 \
and self1.multi_consec and (min(self1.dif1) != 1 or min(self1.dif2) !=1):
return None #not enough to consider multi exon transcript overlap
def __nonzero__(self1):
if len(self1.overs) > 0: return True
return False
def overlap_size(self1):
return self1.tx_obj1.overlap_size(self1.tx_obj2)
def min_overlap_fraction(self1):
return float(self1.overlap_size())/float(max(self1.tx_obj1.length,self1.tx_obj2.length))
def match_exon_count(self1):
"""Total number of exons that overlap
:return: matched exon count
:rtype: int
"""
return len(self1.overs)
def consecutive_exon_count(self1):
"""Best number of consecutive exons that overlap
:return: matched consecutive exon count
:rtype: int
"""
best = 1
consec = 1
for i in range(0,len(self1.dif1)):
if self1.dif1[i] == 1 and self1.dif2[i] == 1:
consec += 1
else:
consec = 1
if consec > best:
best = consec
return best
def is_subset(self1):
""" Return value if tx_obj2 is a complete subset of tx_obj1 or
tx_obj1 is a complete subset of tx_obj2
Values are:
* Return 1: Full overlap (mutual subests)
* Return 2: two is a subset of one
* Return 3: one is a subset of two
* Return False if neither is a subset of the other
:return: subset value
:rtype: int
"""
if len(self1.overs) == 0: return False
if len(self1.dif1) > 0: # make sure they are consecutive if more than one
if max(self1.dif1) != 1 or max(self1.dif2) != 1: return False
onecov = self1.start1 and self1.end1
twocov = self1.start2 and self1.end2
if onecov and twocov:
return 1
elif twocov: return 2
elif onecov: return 3
return False
def is_full_overlap(self1):
"""true if they are a full overlap
:return: is full overlap
:rtype: bool
"""
if len(self1.overs) == 0: return False
if len(self1.dif1) > 0:
if max(self1.dif1) != 1 or max(self1.dif2) != 1: return False
if self1.start1 and self1.end1 and self1.start2 and self1.end2:
return True
return False
def is_compatible(self1):
""" Return True if the transcripts can be combined together
:return: can be combined together
:rtype: bool
"""
if len(self1.overs) == 0: return False
if len(self1.dif1) > 0:
if max(self1.dif1) != 1 or max(self1.dif2) != 1: return False
# If we are still here it is a single run
if (self1.start1 or self1.start2) and (self1.end1 or self1.end2):
return True
return False
def analyze_overs(self1):
"""A helper function that prepares overlap and consecutive matches data"""
#check for full overlap first
self1.dif1 = [self1.overs[i][0]-self1.overs[i-1][0] for i in range(1,len(self1.overs))]
self1.dif2 = [self1.overs[i][1]-self1.overs[i-1][1] for i in range(1,len(self1.overs))]
#see if it starts and ends on first or last junction
self1.start1 = self1.overs[0][0] == 0
self1.start2 = self1.overs[0][1] == 0
self1.end1 = self1.overs[-1][0] == len(self1.tx_obj1.exons)-1
self1.end2 = self1.overs[-1][1] == len(self1.tx_obj2.exons)-1
return
def calculate_overlap(self1):
"""Create the array that describes how junctions overlap"""
overs = []
if not self1.tx_obj1.range.overlaps(self1.tx_obj2.range): return # if they dont overlap wont find anything
for i in range(0,len(self1.tx_obj1.exons)):
for j in range(0,len(self1.tx_obj2.exons)):
osize = self1.tx_obj1.exons[i].range.overlap_size(self1.tx_obj2.exons[j].range)
ofrac = 0
if osize > 0:
ofrac = min(float(osize)/float(self1.tx_obj1.exons[i].range.length)\
,float(osize)/float(self1.tx_obj2.exons[j].range.length))
if self1.tx_obj1.get_exon_count() == 1 or self1.tx_obj2.get_exon_count() == 1:
# use single exon rules
if osize >= self1.single_minover and ofrac >= self1.single_frac:
#print 'single exon match'
overs.append([i,j])
else: # for multi exons
if i == 0 or j == 0 or i == len(self1.tx_obj1.exons)-1 or j == len(self1.tx_obj2.exons)-1:
#its on an end
if osize >= self1.multi_minover and ofrac >= self1.multi_endfrac:
#print 'end exon match'
overs.append([i,j])
#else its a middle
elif osize >= self1.multi_minover and ofrac >= self1.multi_midfrac:
#print 'mid exon match'
overs.append([i,j])
#print overs
self1.overs = overs
class JunctionOverlap:
"""Class for describing the overlap of junctions between transcripts
This should probably be not a child.
:param tx_obj1: transcript1
:param tx_obj2: transcript2
:param tolerance: how far before its no longer a matched junction
:type tx_obj1: Transcript
:type tx_obj2: Transcript
:type tolerance: int
"""
def __init__(self,tx_obj1,tx_obj2,tolerance=0):
self.tx_obj1 = tx_obj1
self.tx_obj2 = tx_obj2
self.j1 = self.tx_obj1.junctions
self.j2 = self.tx_obj2.junctions
self.tolerance = tolerance
self.dif1 = None
self.dif2 = None
self.overs = self.calculate_overlap()
if len(self.overs) > 0:
self.analyze_overs()
def __nonzero__(self):
if len(self.overs) > 0: return True
return False
def match_junction_count(self):
return len(self.overs)
def is_subset(self):
"""Return value if tx_obj2 is a complete subset of tx_obj1 or tx_obj1 is a complete subset of tx_obj2
values:
* Return 1: Full overlap (mutual subests)
* Return 2: two is a subset of one
* Return 3: one is a subset of two
* Return False if neither is a subset of the other
"""
if len(self.overs) == 0: return False
if len(self.dif1) > 0: # make sure they are consecutive if more than one
if len([x for x in self.dif1 if x != 1]) != 0: return False
if len([x for x in self.dif2 if x != 1]) != 0: return False
#look closely at what we are doing here
onecov = self.start1 and self.end1
twocov = self.start2 and self.end2
if onecov and twocov:
if self.tx_obj1.get_exon_count() != self.tx_obj2.get_exon_count():
raise ValueError('how can be same with different exons'+"\n"+str(self.overs)+"\n"+str(self.dif1)+"\n"+str(self.dif2)+"\n"+str(len(self.j1))+"\n"+str(len(self.j2))+"\n"+str(self.tx_obj1.get_exon_count())+"\n"+str(self.tx_obj2.get_exon_count()))
return 1
elif twocov: return 2
elif onecov: return 3
return False
def analyze_overs(self):
"""A helper function to prepare values describing overlaps"""
#check for full overlap first
self.dif1 = [self.overs[i][0]-self.overs[i-1][0] for i in range(1,len(self.overs))]
self.dif2 = [self.overs[i][1]-self.overs[i-1][1] for i in range(1,len(self.overs))]
#see if it starts and ends on first or last junction
self.start1 = self.overs[0][0] == 0
self.start2 = self.overs[0][1] == 0
self.end1 = self.overs[-1][0] == len(self.j1)-1
self.end2 = self.overs[-1][1] == len(self.j2)-1
return
def calculate_overlap(self):
"""Create the array that describes how junctions overlap"""
overs = []
if not self.tx_obj1.range.overlaps(self.tx_obj2.range): return [] # if they dont overlap wont find anything
for i in range(0,len(self.j1)):
for j in range(0,len(self.j2)):
if self.j1[i].overlaps(self.j2[j],tolerance=self.tolerance):
overs.append([i,j])
return overs
class Junction:
""" class to describe a junction
:param rng_left: left side of junction
:param rng_right: right side of junction
:type rng_left: GenomicRange
:type rng_right: GenomicRange
"""
def __init__(self,rng_left=None,rng_right=None):
self.left = rng_left
self.right = rng_right
self.left_exon = None
self.right_exon = None
def dump_serialized(self):
"""Get string representation of the junction
:return: serialized object
:rtype: string
"""
return pickle.dumps(self)
def load_serialized(self,instr):
"""load the string
:param instr:
:type instr: string
"""
self = pickle.loads(instr)
def get_string(self):
"""A string representation of the junction
:return: string represnetation
:rtype: string
"""
return self.left.chr+':'+str(self.left.end)+'-'+self.right.chr+':'+str(self.right.start)
def get_left_exon(self):
""" get the exon to the left of the junction
:return: left exon
:rtype: Exon
"""
return self.left_exon
def get_right_exon(self):
""" get the exon to the right of the junction
:return: right exon
:rtype: Exon or GenomicRange
"""
return self.right_exon
def get_range_string(self):
"""Another string representation of the junction. these may be redundant."""
return self.left.chr+":"+str(self.left.end)+'/'+self.right.chr+":"+str(self.right.start)
def set_left(self,rng):
""" Assign the leftmost range"""
self.left = rng
def set_right(self,rng):
""" Assign the right most range"""
self.right = rng
def equals(self,junc):
"""test equality with another junction"""
if self.left.equals(junc.left): return False
if self.right.equals(junc.right): return False
return True
def overlaps(self,junc,tolerance=0):
"""see if junction overlaps with tolerance"""
if not self.left.overlaps(junc.left,padding=tolerance): return False
if not self.right.overlaps(junc.right,padding=tolerance): return False
return True
def cmp(self,junc,tolerance=0):
""" output comparison and allow for tolerance if desired
* -1 if junc comes before self
* 1 if junc comes after self
* 0 if overlaps
* 2 if else
:param junc:
:param tolerance: optional search space (default=0, no tolerance)
:type junc: Junction
:type tolerance: int
:return: value of comparison
:rtype: int
"""
if self.overlaps(junc,tolerance):
return 0 #equal
if self.left.chr == junc.right.chr:
if self.left.start > junc.right.start:
return -1 #comes before
if self.right.chr == junc.left.chr:
if self.right.start < junc.right.start:
return 1 #comes after
return 2
def set_exon_left(self,ex):
"""assign the left exon"""
self.left_exon = ex
ex.right_junc = self
def set_exon_right(self,ex):
"""assign the right exon"""
self.right_exon = ex
ex.left_junc = self
class Exon99:
"""class to describe an exon
:param rng:
:type rng: GenomicRange
"""
def __init__(self,rng=None):
self.rng = rng
self.left_junc = None
self.right_junc = None
self._is_leftmost = False #bool is it a start or end
self._is_rightmost = False
def dump_serialized(self):
return pickle.dumps(self)
def load_serialized(self,instr):
self = pickle.loads(instr)
def range(self):
return self.rng
def get_length(self):
return self.rng.length()
def set_left_junc(self,junc):
self.left_junc = junc
junc.set_right_exon = self
def set_right_junc(self,junc):
self.right_junc = junc
junc.set_left_exon = self
def set_is_leftmost(self,boo=True):
self._is_leftmost = boo # is leftmost
def set_is_rightmost(self,boo=True):
self._is_rightmost = boo #is rightmost
def _mode(mylist):
counts = [mylist.count(x) for x in mylist]
maxcount = max(counts)
avg = sum([float(x) for x in mylist])/len(mylist)
#print counts
dist = [abs(float(x)-avg) for x in mylist]
best_list = []
best_dist = []
for i in range(0,len(mylist)):
counts[i] == maxcount
best_list.append(mylist[i])
best_dist.append(dist[i])
abs_best_dist = min(best_dist)
for i in range(0,len(best_dist)):
if best_dist[i] == abs_best_dist:
return best_list[i]
sys.stderr.write("Warning: trouble finding best\n")
return best_list[0]
| {
"repo_name": "jason-weirather/py-seq-tools",
"path": "seqtools/structure/transcript/__init__.py",
"copies": "1",
"size": "23478",
"license": "apache-2.0",
"hash": 2354088479183599600,
"line_mean": 32.976845152,
"line_max": 254,
"alpha_frac": 0.6406423034,
"autogenerated": false,
"ratio": 3.389346037245561,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4529988340645561,
"avg_score": null,
"num_lines": null
} |
"""A module to work with slug generation."""
from mongoengine import *
import bson.objectid
import unicodedata
import re
from hashlib import sha1
from random import randint
from flask import redirect, abort
from functools import wraps
class Slug( Document):
"""Represents a slug in the database. Fields are:
* namespace - A namespace under which this slug falls (e.g. match, team, user etc)
* slug - Actual slug.
* slug_lower_hash - A hash of lower-case slug. This is useful for
* discovering the actual slug in case user changes the case in the URL.
* new_slug - useful when the name of a resource is changed. The old
* slug object will have new_slug populated. With this, the application
* can do a 302 redirect to the new slug.
"""
namespace = StringField()
slug = StringField()
slug_lower_hash = StringField()
new_slug = StringField() # In case slug was changed after a name change.
meta = {
"indexes": [ ("namespace", "slug"), ("namespace", "slug_lower_hash")]
}
class SlugMixin(object):
"""Mixin to add slugs related functionality to your mongoengine documents.
To use:
a) Use this mixin in your mongoengine document classes.
b) Override slug_base_text method if required. It should return the
text from which slug should be generated. By default, it uses the name
property of the class.
c) Create an index on the slug field for your document.
"""
slug = StringField()
def slug_base_text( self):
"""This function returns the value based on which the slug will be
generated. It defaults to the value of name property. Override it in
your class.
"""
return self.name
def set_slug( self):
self.slug = generate_slug( self.__class__.__name__, self.slug_base_text(), old_slug_value=self.slug)
def generate_slug_value( base_text, max_length=80):
"""Generate a potential slug value."""
base_text = unicode( base_text)
value = unicodedata.normalize('NFKD', base_text).encode('ascii', 'ignore')[:max_length]
value = unicode(re.sub('[^\w\s-]', '', value).strip())
value = re.sub('[-\s]+', '-', value)
return value
def generate_slug( namespace, base_text, max_length=80, old_slug_value=""):
"""Generate a slug. These are the arguments:
* namespace - A namespace under which this slug should be unique.
* base_text - Text for generating a slug.
* max_length - 80 is enough.
* old_slug_value - In case, a new slug is being generated for a
* resource, this will be existing slug value. Used for generating a
* redirect from old value to new value.
"""
value = generate_slug_value( base_text, max_length)
if( value == old_slug_value):
return value
orig_value = value
collision_free = False
while not collision_free:
s = Slug.objects( namespace=namespace, slug=value).first()
if( not s):
collision_free = True
else:
counter = randint(1,1000000)
value = orig_value + '-' + str(counter)
s = Slug( namespace=namespace, slug=value, slug_lower_hash=sha1( value.lower()).hexdigest())
s.save()
if( old_slug_value):
old = Slug.objects( namespace=namespace, slug=old_slug_value).first()
if( old):
old.new_slug = s.slug
old.save()
return value
def lookup_slug( namespace, value):
"""Find out if a case-insensitive match exists for the given slug value."""
s = Slug.objects( namespace=namespace, slug_lower_hash=sha1( value.lower()).hexdigest()).first()
if( not s):
return None
if( s.new_slug):
return s.new_slug
return s.slug
def slug_to_obj_converter( objclass, url_template):
"""A decorator converting a slug into the actual class object. In case the
object is not found, it will raise 404.
"""
def view_wrapper( view_func):
@wraps( view_func)
def wrapper( slug, *args, **kwargs):
obj = objclass.objects( slug=slug).first()
if( not obj):
alternate_slug = lookup_slug( objclass.__name__, slug)
if( alternate_slug and alternate_slug != slug):
if( url_template):
return redirect( url_template % (alternate_slug), 301)
else:
obj = objclass.objects( slug=alternate_slug).first()
if( not obj):
abort( 404)
# Useful for scenarios where object id based url should be
# redirected to slug based one. Not required as of now.
#
# try:
# obj_id = bson.objectid.ObjectId( slug)
# except bson.objectid.InvalidId:
# abort( 404)
# obj = objclass.objects(id=obj_id).first()
# if obj:
# return redirect( url_template % (obj.slug), 302)
# else:
# abort( 404)
return view_func( obj, *args, **kwargs)
return wrapper
return view_wrapper
| {
"repo_name": "manasgarg/slugifier",
"path": "slugifier/slug.py",
"copies": "1",
"size": "5327",
"license": "bsd-3-clause",
"hash": 1705176678568670700,
"line_mean": 35.2380952381,
"line_max": 108,
"alpha_frac": 0.5860709593,
"autogenerated": false,
"ratio": 4.201104100946372,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5287175060246372,
"avg_score": null,
"num_lines": null
} |
"""A module used for downloading files."""
import hashlib
import os
import shutil
import subprocess as sp
import tempfile
from ftplib import FTP
import requests
from tqdm import tqdm
from .logger import geoparse_logger as logger
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
class Downloader(object):
"""Downloader class."""
def __init__(self, url, outdir=None, filename=None):
self.url = url
if outdir is None:
self.outdir = os.getcwd()
else:
self.outdir = outdir
if filename is None:
self.filename = self._get_filename()
else:
self.filename = filename
with tempfile.NamedTemporaryFile(delete=True) as tmpf:
self._temp_file_name = tmpf.name
@property
def destination(self):
"""Get the destination path.
This is the property should be calculated every time it is used because
a user could change the outdir and filename dynamically.
"""
return os.path.join(os.path.abspath(self.outdir), self.filename)
def download(self, force=False, silent=False):
"""Download from URL."""
def _download():
if self.url.startswith("http"):
self._download_http(silent=silent)
elif self.url.startswith("ftp"):
self._download_ftp(silent=silent)
else:
raise ValueError("Invalid URL %s" % self.url)
logger.debug("Moving %s to %s" % (self._temp_file_name, self.destination))
shutil.copyfile(self._temp_file_name, self.destination)
logger.debug("Successfully downloaded %s" % self.url)
try:
is_already_downloaded = os.path.isfile(self.destination)
if is_already_downloaded:
if force:
try:
os.remove(self.destination)
except Exception:
logger.error("Cannot delete %s" % self.destination)
logger.info("Downloading %s to %s" % (self.url, self.destination))
logger.debug(
"Downloading %s to %s" % (self.url, self._temp_file_name)
)
_download()
else:
logger.info(
(
"File %s already exist. Use force=True if you"
" would like to overwrite it."
)
% self.destination
)
else:
_download()
finally:
try:
os.remove(self._temp_file_name)
except OSError:
pass
def download_aspera(self, user, host, silent=False):
"""Download file with Aspera Connect.
For details see the documentation ov Aspera Connect
Args:
user (:obj:`str`): FTP user.
host (:obj:`str`): FTP host. Defaults to "ftp-trace.ncbi.nlm.nih.gov".
"""
aspera_home = os.environ.get("ASPERA_HOME", None)
if not aspera_home:
raise ValueError("environment variable $ASPERA_HOME not set")
if not os.path.exists(aspera_home):
raise ValueError(
"$ASPERA_HOME directory {} does not exist".format(aspera_home)
)
ascp = os.path.join(aspera_home, "connect/bin/ascp")
key = os.path.join(aspera_home, "connect/etc/asperaweb_id_dsa.openssh")
if not os.path.exists(ascp):
raise ValueError("could not find ascp binary")
if not os.path.exists(key):
raise ValueError("could not find openssh key")
parsed_url = urlparse(self.url)
cmd = "{} -i {} -k1 -T -l400m {}@{}:{} {}".format(
ascp, key, user, host, parsed_url.path, self._temp_file_name
)
logger.debug(cmd)
try:
pr = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE)
stdout, stderr = pr.communicate()
if not silent:
logger.debug("Aspera stdout: " + str(stdout))
logger.debug("Aspera stderr: " + str(stderr))
if pr.returncode == 0:
logger.debug(
"Moving %s to %s" % (self._temp_file_name, self.destination)
)
shutil.move(self._temp_file_name, self.destination)
logger.debug("Successfully downloaded %s" % self.url)
else:
logger.error("Failed to download %s using Aspera Connect" % self.url)
finally:
try:
os.remove(self._temp_file_name)
except OSError:
pass
def _get_filename(self):
filename = os.path.basename(urlparse(self.url).path).strip(" \n\t.")
if len(filename) == 0:
raise Exception("Cannot parse filename from %s" % self.url)
return filename
def _download_ftp(self, silent=False):
parsed_url = urlparse(self.url)
try:
ftp = FTP(parsed_url.netloc)
ftp.login()
total_size = ftp.size(parsed_url.path)
if total_size is None:
total_size = 0
wrote = list() # cannot add in the callback, has to be a list
with open(self._temp_file_name, "wb") as f:
if silent:
def _write(data):
f.write(data)
wrote.append(len(data))
ftp.retrbinary("RETR %s" % parsed_url.path, _write)
else:
with tqdm(
total=total_size,
unit="B",
unit_scale=True,
unit_divisor=1024,
leave=True,
) as pbar:
def _write(data):
data_length = len(data)
pbar.update(data_length)
f.write(data)
wrote.append(data_length)
ftp.retrbinary("RETR %s" % parsed_url.path, _write)
ftp.quit()
except Exception:
try:
ftp.quit()
logger.error(
"Error when trying to retreive %s." % self.url, exc_info=True
)
except Exception:
logger.error("Error when quiting FTP server.", exc_info=True)
if total_size != 0:
if sum(wrote) != total_size:
raise ValueError(
"Downloaded size do not match the expected size for %s" % (self.url)
)
else:
logger.debug("Size validation passed")
def _download_http(self, silent=False):
r = requests.get(self.url, stream=True)
r.raise_for_status()
# Total size in bytes.
total_size = int(r.headers.get("content-length", 0))
logger.debug("Total size: %s" % total_size)
md5_header = r.headers.get("Content-MD5")
logger.debug("md5: %s" % str(md5_header))
chunk_size = 1024
wrote = 0
with open(self._temp_file_name, "wb") as f:
if silent:
for chunk in r.iter_content(chunk_size):
if chunk:
f.write(chunk)
wrote += len(chunk)
else:
with tqdm(
total=total_size,
unit="B",
unit_scale=True,
unit_divisor=1024,
leave=True,
) as pbar:
for chunk in r.iter_content(chunk_size):
if chunk:
f.write(chunk)
pbar.update(len(chunk))
wrote += len(chunk)
if total_size != 0:
if wrote != total_size:
raise ValueError(
"Downloaded size do not match the expected size for %s" % (self.url)
)
else:
logger.debug("Size validation passed")
if md5_header:
logger.debug("Validating MD5 checksum...")
if md5_header == Downloader.md5sum(self._temp_file_name):
logger.debug("MD5 checksum passed")
else:
raise ValueError("MD5 checksum do NOT passed")
@staticmethod
def md5sum(filename, blocksize=8192):
"""Get the MD5 checksum of a file."""
with open(filename, "rb") as fh:
m = hashlib.md5()
while True:
data = fh.read(blocksize)
if not data:
break
m.update(data)
return m.hexdigest()
| {
"repo_name": "guma44/GEOparse",
"path": "src/GEOparse/downloader.py",
"copies": "1",
"size": "8957",
"license": "bsd-3-clause",
"hash": 2915908222042761700,
"line_mean": 35.1169354839,
"line_max": 88,
"alpha_frac": 0.4924639946,
"autogenerated": false,
"ratio": 4.360759493670886,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5353223488270886,
"avg_score": null,
"num_lines": null
} |
""" A module which helps prevent eventual consistency issues on the Datastore. """
# SYSTEM
import logging
# 3RD PARTY
from django.conf import settings
from django.db import models
from django.dispatch import receiver
from google.appengine.datastore.datastore_rpc import BaseConnection
# CONSISTENCY
from .caches import get_caches
DEFAULT_CONFIG = {
"cache_on_creation": True,
"cache_on_modification": False,
"cache_time": 60, # seconds
"caches": ["django"],
"only_cache_matching": [],
}
########################## API ##########################
def improve_queryset_consistency(queryset):
""" Makes a queryset eventual-consistency-resistant (but not immune to it) by:
1. Explicitly including PKs of recently-created/-modified objects (if they match the query).
2. Re-fetching each object by PK to ensure that we get the latest version and exclude
objects which no longer match the query.
"""
original = queryset.all()
recent_pks = get_recent_object_pks_for_model(queryset.model)
max_existing_pks = BaseConnection.MAX_GET_KEYS - len(recent_pks)
high_mark = queryset.query.high_mark
low_mark = queryset.query.low_mark or 0
if high_mark is None or (high_mark - low_mark > max_existing_pks):
# Having no limit set or a limit which is too high can cause 2 issues:
# 1. Potential slowness because we fetch PKs for the whole result.
# 2. Potential death because we might end up with more than 1000 PKs.
# We avoid 2 and keep 1 to a minimum by imposing a limit to ensure a total result of <= 1000.
# Note that this is imperfect because not all of the objects from recent_pks will
# necessarily match the query, so we might limit more than necessary.
imposed_limit = max_existing_pks + (queryset.query.low_mark or 0)
logging.info("Limiting queryset for %s to %d", queryset.model, imposed_limit)
queryset = queryset.all()[:imposed_limit]
pks = list(queryset.all().values_list('pk', flat=True)) # this may include recently-created objects
combined_pks = list(set(pks + recent_pks))
# By using pk__in we cause the objects to be re-fetched with datastore.Get so we get the
# up-to-date version of every object.
# We keep the original filtering as well so that objects which don't match the query are excluded.
# Keeping the original queryset also retains the ordering.
return original.filter(pk__in=combined_pks)
def get_recent_objects(queryset):
""" Get and return a queryset of recently-created/-modified objects which match the given queryset.
You can append/include/merge this with the results of the original queryset as you wish.
Note that this may include objects which are also returned by your original queryset.
"""
return queryset.filter(pk__in=get_recent_object_pks_for_model(queryset.model))
######################## SIGNALS ########################
@receiver(models.signals.post_save, dispatch_uid="consistency_post_save")
def handle_post_save(sender, instance, created, **kwargs):
config = get_config(sender)
if should_cache(instance, created, config):
add_object_pk_to_caches(instance, config)
@receiver(models.signals.post_delete, dispatch_uid="consistency_post_delete")
def handle_post_delete(sender, instance, **kwargs):
config = get_config(sender)
if might_be_cached(sender, config):
remove_object_pk_from_caches(instance, config)
#########################################################
def get_config(model_class):
""" Get the config for the given model class. """
model_identifier = u"%s.%s" % (model_class._meta.app_label, model_class._meta.model_name)
config = DEFAULT_CONFIG.copy()
overrides = getattr(settings, "CONSISTENCY_CONFIG", {})
config.update(overrides.get("defaults", {}))
config.update(overrides.get("models", {}).get(model_identifier, {}))
return config
def should_cache(obj, created, config):
if created:
if not config["cache_on_creation"]:
return False
else:
if not config["cache_on_modification"]:
return False
if not config["only_cache_matching"]:
return True
return object_matches_a_check(obj, config["only_cache_matching"])
def might_be_cached(obj, config):
""" Might the given object be cached? """
if not (config["cache_on_creation"] or config["cache_on_modification"]):
return False
if not config["only_cache_matching"]:
return True
return object_matches_a_check(obj, config["only_cache_matching"])
def object_matches_a_check(obj, checks):
""" Does the object match *any* of the given checks from the "only_cache_matching" list? """
for check in checks:
if callable(check):
if check(obj):
return True
else:
try:
for field, value in check.iteritems():
if not getattr(obj, field) == value:
break
else:
return True
except AttributeError:
logging.error("Invalid filter for model %s, %s", obj.__class__, check)
raise
return False
def get_recent_object_pks_for_model(model_class):
config = get_config(model_class)
cache_key = get_model_cache_key(model_class)
pks = set()
for cache in get_caches(config["caches"]):
pks.update(cache.get_pks(model_class, config, cache_key))
return list(pks)
def add_object_pk_to_caches(obj, config):
cache_key = get_model_cache_key(obj.__class__)
for cache in get_caches(config["caches"]):
cache.add(obj, config, cache_key)
def remove_object_pk_from_caches(obj, config):
cache_key = get_model_cache_key(obj.__class__)
for cache in get_caches(config["caches"]):
cache.remove(obj, config, cache_key)
def get_model_cache_key(model_class):
return "recently-created-{0}-{1}".format(model_class._meta.app_label, model_class._meta.db_table)
| {
"repo_name": "adamalton/djangae-consistency",
"path": "consistency/consistency.py",
"copies": "1",
"size": "6066",
"license": "mit",
"hash": 37994077708953544,
"line_mean": 37.1509433962,
"line_max": 103,
"alpha_frac": 0.6516650181,
"autogenerated": false,
"ratio": 3.90096463022508,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0019335164991796873,
"num_lines": 159
} |
""" A module which helps prevent eventual consistency issues on the Datastore. """
# SYSTEM
import logging
# 3RD PARTY
from django.conf import settings
from google.appengine.datastore.datastore_rpc import BaseConnection
# CONSISTENCY
from djangae.contrib.consistency.caches import get_caches
DEFAULT_CONFIG = {
"cache_on_creation": True,
"cache_on_modification": False,
"cache_time": 60, # seconds
"caches": ["django"],
"only_cache_matching": [],
}
########################## API ##########################
def improve_queryset_consistency(queryset):
""" Makes a queryset eventual-consistency-resistant (but not immune to it) by:
1. Explicitly including PKs of recently-created/-modified objects (if they match the query).
2. Re-fetching each object by PK to ensure that we get the latest version and exclude
objects which no longer match the query.
"""
original = queryset.all()
recent_pks = get_recent_object_pks_for_model(queryset.model)
max_existing_pks = BaseConnection.MAX_GET_KEYS - len(recent_pks)
high_mark = queryset.query.high_mark
low_mark = queryset.query.low_mark or 0
if high_mark is None or (high_mark - low_mark > max_existing_pks):
# Having no limit set or a limit which is too high can cause 2 issues:
# 1. Potential slowness because we fetch PKs for the whole result.
# 2. Potential death because we might end up with more than 1000 PKs.
# We avoid 2 and keep 1 to a minimum by imposing a limit to ensure a total result of <= 1000.
# Note that this is imperfect because not all of the objects from recent_pks will
# necessarily match the query, so we might limit more than necessary.
imposed_limit = max_existing_pks + (queryset.query.low_mark or 0)
logging.info("Limiting queryset for %s to %d", queryset.model, imposed_limit)
queryset = queryset.all()[:imposed_limit]
pks = list(queryset.all().values_list('pk', flat=True)) # this may include recently-created objects
combined_pks = list(set(pks + recent_pks))
# By using pk__in we cause the objects to be re-fetched with datastore.Get so we get the
# up-to-date version of every object.
# We keep the original filtering as well so that objects which don't match the query are excluded.
# Keeping the original queryset also retains the ordering.
return original.filter(pk__in=combined_pks)
def get_recent_objects(queryset):
""" Get and return a queryset of recently-created/-modified objects which match the given queryset.
You can append/include/merge this with the results of the original queryset as you wish.
Note that this may include objects which are also returned by your original queryset.
"""
return queryset.filter(pk__in=get_recent_object_pks_for_model(queryset.model))
######################## SIGNALS ########################
# See signals.py for registraion
def handle_post_save(sender, instance, created, **kwargs):
config = get_config(sender)
if should_cache(instance, created, config):
add_object_pk_to_caches(instance, config)
def handle_post_delete(sender, instance, **kwargs):
config = get_config(sender)
if might_be_cached(sender, config):
remove_object_pk_from_caches(instance, config)
#########################################################
def get_config(model_class):
""" Get the config for the given model class. """
model_identifier = u"%s.%s" % (model_class._meta.app_label, model_class._meta.model_name)
config = DEFAULT_CONFIG.copy()
overrides = getattr(settings, "CONSISTENCY_CONFIG", {})
config.update(overrides.get("defaults", {}))
config.update(overrides.get("models", {}).get(model_identifier, {}))
return config
def should_cache(obj, created, config):
if created:
if not config["cache_on_creation"]:
return False
else:
if not config["cache_on_modification"]:
return False
if not config["only_cache_matching"]:
return True
return object_matches_a_check(obj, config["only_cache_matching"])
def might_be_cached(obj, config):
""" Might the given object be cached? """
if not (config["cache_on_creation"] or config["cache_on_modification"]):
return False
if not config["only_cache_matching"]:
return True
return object_matches_a_check(obj, config["only_cache_matching"])
def object_matches_a_check(obj, checks):
""" Does the object match *any* of the given checks from the "only_cache_matching" list? """
for check in checks:
if callable(check):
if check(obj):
return True
else:
try:
for field, value in check.iteritems():
if not getattr(obj, field) == value:
break
else:
return True
except AttributeError:
logging.error("Invalid filter for model %s, %s", obj.__class__, check)
raise
return False
def get_recent_object_pks_for_model(model_class):
config = get_config(model_class)
cache_key = get_model_cache_key(model_class)
pks = set()
for cache in get_caches(config["caches"]):
pks.update(cache.get_pks(model_class, config, cache_key))
return list(pks)
def add_object_pk_to_caches(obj, config):
cache_key = get_model_cache_key(obj.__class__)
for cache in get_caches(config["caches"]):
cache.add(obj, config, cache_key)
def remove_object_pk_from_caches(obj, config):
cache_key = get_model_cache_key(obj.__class__)
for cache in get_caches(config["caches"]):
cache.remove(obj, config, cache_key)
def get_model_cache_key(model_class):
return "recently-created-{0}-{1}".format(model_class._meta.app_label, model_class._meta.db_table)
| {
"repo_name": "kirberich/djangae",
"path": "djangae/contrib/consistency/consistency.py",
"copies": "11",
"size": "5910",
"license": "bsd-3-clause",
"hash": 4636768681652226000,
"line_mean": 36.4050632911,
"line_max": 103,
"alpha_frac": 0.6473773266,
"autogenerated": false,
"ratio": 3.900990099009901,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A module which implements Classical Least Squares Regression."""
from . import *
class CLS(RegressionBase):
"""Classical Least Squares Regression
The classical least squares regression approach is to initially swap the
roles of the X and Y variables, perform linear regression and then to
invert the result. It is useful when the number of X variables is larger
than the number of calibration samples available, when conventional
multiple linear regression would be unable to proceed.
Note :
The regression matrix A_pinv is found using the pseudo-inverse. In
order for this to be calculable, the number of calibration samples
``N`` has be be larger than the number of Y variables ``m``, the
number of X variables ``n`` must at least equal the number of Y
variables, there must not be any collinearities in the calibration Y
data and Yt X must be non-singular.
Args:
X (ndarray N x n): X calibration data, one row per data sample
Y (ndarray N x m): Y calibration data, one row per data sample
Attributes:
A (ndarray m x n): Resulting regression matrix of X on Y
A_pinv (ndarray m x n): Pseudo-inverse of A
"""
def __init__(self, X, Y):
Xc, Yc = super()._prepare_data(X, Y)
if Yc.shape[0] <= Yc.shape[1]:
raise ParameterError('CLS requires more rows (data samples) than '
'output variables (columns of Y data)')
if Xc.shape[1] < Yc.shape[1]:
raise ParameterError('CLS requires at least as input variables '
'(columns of X data) as output variables '
'(columns of Y data)')
self.A = linalg.inv(Yc.T @ Yc) @ Yc.T @ Xc
self.A_pinv = self.A.T @ linalg.inv(self.A @ self.A.T)
def prediction(self, Z):
"""Predict the output resulting from a given input
Args:
Z (ndarray of floats): The input on which to make the
prediction. Must either be a one dimensional array of the
same length as the number of calibration X variables, or a
two dimensional array with the same number of columns as
the calibration X data and one row for each input row.
Returns:
Y (ndarray of floats) : The predicted output - either a one
dimensional array of the same length as the number of
calibration Y variables or a two dimensional array with the
same number of columns as the calibration Y data and one row
for each input row.
"""
if len(Z.shape) == 1:
if Z.shape[0] != self.X_variables:
raise ParameterError('Data provided does not have the same '
'number of variables as the original X '
'data')
return self.Y_offset + (Z - self.X_offset) @ self.A_pinv
else:
if Z.shape[1] != self.X_variables:
raise ParameterError('Data provided does not have the same '
'number of variables as the original X '
'data')
result = np.empty((Z.shape[0], self.Y_variables))
for i in range(0, Z.shape[0]):
result[i, :] = self.Y_offset + (Z[i, :] - self.X_offset) \
@ self.A_pinv
return result
| {
"repo_name": "jhumphry/regressions",
"path": "regressions/cls.py",
"copies": "1",
"size": "3530",
"license": "isc",
"hash": 1642314509265903000,
"line_mean": 41.0238095238,
"line_max": 78,
"alpha_frac": 0.5779036827,
"autogenerated": false,
"ratio": 4.479695431472082,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5557599114172082,
"avg_score": null,
"num_lines": null
} |
"""A module which implements goodness-of-fit statistics."""
try:
import scipy.stats
_stats_available = True
except ImportError:
_stats_available = False
from . import *
def SS(Y):
"""Implements the Sum of Squares
This function calculates the sum of the squared input data. The input
data is first centered by subtracting the mean.
Args:
Y (ndarray N x m): Y calibration data, one row per data sample
Returns:
SS (float): The sum of the squares of the input data.
"""
# Change 1-D array into column vector
if len(Y.shape) == 1:
Y = Y.reshape((Y.shape[0], 1))
Yc = Y - Y.mean(0)
return (Yc**2.0).sum()
def RESS(R, X, Y, others=None, relative=False):
"""Implements the Residual Error Sum of Squares
This function calculates the RESS statistic for a given regression
class and a set of calibration data. The regression function is
trained on the X and Y data. The X data is then used to predict a set
of Y data. The difference between these predictions and the true Y
data is squared and summed to give the RESS statistic. Note that this
statistic can be misleading if used on its own as it can reward
routines which over-fit to the sample data and do not have good
generalisation performance. Consider using in conjunction with the
:py:func:`PRESS` statistic.
Args:
R (class): A regression class
X (ndarray N x n): X calibration data, one row per data sample
Y (ndarray N x m): Y calibration data, one row per data sample
others (dict, optional): A dict of other parameters to send to the
regression class constructor.
relative (boolean, optional): whether to divide the error by the
true Y value before squaring and summing - where Y columns have
different scales this may help to prevent the output being
dominated by the column with the largest magnitude.
Returns:
RESS (float): The RESS statistic.
"""
if others is None:
others = {}
if X.shape[0] != Y.shape[0]:
raise ParameterError('X and Y data must have the same number of '
'rows (data samples)')
# Change 1-D arrays into column vectors
if len(X.shape) == 1:
X = X.reshape((X.shape[0], 1))
if len(Y.shape) == 1:
Y = Y.reshape((Y.shape[0], 1))
model = R(X=X, Y=Y, **others)
Yhat = model.prediction(Z=X)
if relative:
return (((Yhat - Y) / Y)**2).sum()
else:
return ((Yhat - Y)**2).sum()
def R2(R, X, Y, others=None):
"""Implements the R**2 statistic
This function calculates the R**2 statistic for a given regression
class and a set of calibration data. This is equal to (1-RESS/SS),
which gives an indication of how much of the initial variation in the
(centered) Y data is explained by the regression model after it has
been trained on the same Y data. Note that an overfitted model can
have a very large R**2 but poor generalisation performance. The
:py:func:`Q2` statistic looks at how much variance in each part of the
Y data is explained by the regression model trained on only the other
parts of the Y data so is more robust against overfitting.
Args:
R (class): A regression class
X (ndarray N x n): X calibration data, one row per data sample
Y (ndarray N x m): Y calibration data, one row per data sample
others (dict, optional): A dict of other parameters to send to the
regression class constructor.
Returns:
R2 (float): The R2 statistic.
"""
return 1.0 - RESS(R, X, Y, others, relative=False) / SS(Y)
def PRESS(R, X, Y, groups=4, others=None, relative=False):
"""Implements the Predicted Residual Error Sum of Squares
This function calculates the PRESS statistic for a given regression
class and a set of calibration data. Each groups of samples in turn is
removed from the data set, the regression model is trained on the
remaining data, and then is used to predict the Y values of the
samples that were removed. Once a full set of Y predictions has been
produced, the sum of the squared difference between them and the true
Y data is the PRESS statistic.
Args:
R (class): A regression class
X (ndarray N x n): X calibration data, one row per data sample
Y (ndarray N x m): Y calibration data, one row per data sample
groups (int, optional): Number of cross-validation groups to use
others (dict, optional): A dict of other parameters to send to the
regression class constructor.
relative (boolean, optional): whether to divide the error by the
true Y value before squaring and summing - where Y columns have
different scales this may help to prevent the output being
dominated by the column with the largest magnitude.
Returns:
PRESS (float): The PRESS statistic.
"""
if others is None:
others = {}
if X.shape[0] != Y.shape[0]:
raise ParameterError('X and Y data must have the same number of '
'rows (data samples)')
data_samples = X.shape[0]
if data_samples < 2:
raise ParameterError('There must be at least two data samples to '
'produce the PRESS statistic.')
if data_samples < groups:
raise ParameterError('There must be at least as many data samples as '
'cross-validation groups')
if groups < 2:
raise ParameterError('There must be at least two cross-validation '
'groups')
group_size = data_samples // groups
start_indexes = [x*group_size for x in range(0, groups)]
end_indexes = [x*group_size for x in range(1, groups+1)]
end_indexes[-1] = data_samples # Last group may be bigger
# Change 1-D arrays into column vectors
if len(X.shape) == 1:
X = X.reshape((X.shape[0], 1))
if len(Y.shape) == 1:
Y = Y.reshape((Y.shape[0], 1))
Yhat = np.empty(Y.shape)
for i in range(0, groups):
samples_excluding_group = data_samples - \
(end_indexes[i]-start_indexes[i])
Xp = np.empty((samples_excluding_group, X.shape[1]))
Yp = np.empty((samples_excluding_group, Y.shape[1]))
Xp[0:start_indexes[i], :] = X[0:start_indexes[i], :]
Xp[start_indexes[i]:, :] = X[end_indexes[i]:, :]
Yp[0:start_indexes[i], :] = Y[0:start_indexes[i], :]
Yp[start_indexes[i]:, :] = Y[end_indexes[i]:, :]
model = R(X=Xp, Y=Yp, **others)
Yhat[start_indexes[i]:end_indexes[i], :] = \
model.prediction(Z=X[start_indexes[i]:end_indexes[i], :])
if relative:
return (((Yhat - Y) / Y)**2).sum()
else:
return ((Yhat - Y)**2).sum()
def Q2(R, X, Y, groups=4, others=None):
"""Implements the Q**2 statistic
This function calculates the Q**2 statistic for a given regression
class and a set of calibration data. This is equal to (1-PRESS/SS),
which gives an indication of how much of the initial variation in each
part of the (centered) Y data is explained by the regression model
trained on the other parts of the Y data. This attempts to ensure that
regression models with a tendency to over-fit training data are not
favoured.
Args:
R (class): A regression class
X (ndarray N x n): X calibration data, one row per data sample
Y (ndarray N x m): Y calibration data, one row per data sample
groups (int, optional): Number of cross-validation groups to use
others (dict, optional): A dict of other parameters to send to the
regression class constructor.
Returns:
Q2 (float): The Q2 statistic.
"""
return 1.0 - PRESS(R, X, Y, groups, others, relative=False) / SS(Y)
def residuals_QQ(Y):
"""Function for creating normal Q-Q probability plots of residuals
This function is used to explore the residuals left over after a
regression model has been fitted to some calibration data. The input
is a matrix of residuals created by subtracting the true Y calibration
values from the Y values predicted by the regression model when the X
calibration values are input. Each column represents a variable of Y,
and in turn each is centered, divided by the standard deviation of the
values in the column and sorted.
Theoretical quantiles from the normal distribution and the sample
quantiles for each Y variable are returned. When the theoretical
quantiles are plotted against the sample quantiles for any of the Y
variables, a Q-Q plot is producted. If the residuals are normally
distributed, the points should lie on a straight line through the
origin.
Requires 'SciPy' to be available.
Args:
Y (ndarray N x m): Matrix of residuals
Returns:
X, Y (tuple of ndarray N and ndarray N x m): The theoretical
quantiles from the normal distribution and the sample quantiles
from the normal distribution
Raises:
NotImplementedError: SciPy is not available
"""
if not _stats_available:
raise NotImplementedError("This function requires SciPy")
# Change 1-D array into column vector
if len(Y.shape) == 1:
Y = Y.reshape((Y.shape[0], 1))
Yc = (Y - Y.mean(0))
Yc /= Yc.std(0)
Yc.sort(0)
samples = Y.shape[0]
X = np.empty((samples))
for i in range(0, samples):
X[i] = scipy.stats.norm.ppf(1.0 / (samples+1) * (i+1))
return X, Yc
| {
"repo_name": "jhumphry/regressions",
"path": "regressions/fitstats.py",
"copies": "1",
"size": "9732",
"license": "isc",
"hash": -3045243730217523700,
"line_mean": 34.2608695652,
"line_max": 78,
"alpha_frac": 0.6427250308,
"autogenerated": false,
"ratio": 3.962540716612378,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 276
} |
"""A module which implements kernel PLS."""
import random
from . import *
class Kernel_PLS(RegressionBase):
"""Non-linear Kernel PLS regression using the PLS2 algorithm
This class implements kernel PLS regression by transforming the input
X data into feature space by applying a kernel function between each
pair of inputs. The kernel function provided will be called with two
vectors and should return a float. Kernels should be symmetrical with
regard to the order in which the vectors are supplied. The PLS2
algorithm is then applied to the transformed data. The application of
the kernel function means that non-linear transformations are
possible.
Note:
If ``ignore_failures`` is ``True`` then the resulting object
may have fewer components than requested if convergence does
not succeed.
Args:
X (ndarray N x n): X calibration data, one row per data sample
Y (ndarray N x m): Y calibration data, one row per data sample
g (int): Number of components to extract
X_kernel (function): Kernel function
max_iterations (int, optional) : Maximum number of iterations of
NIPALS to attempt
iteration_convergence (float, optional): Difference in norm
between two iterations at which point the iteration will be
considered to have converged.
ignore_failures (boolean, optional): Do not raise an error if
iteration has to be abandoned before the requested number
of components have been recovered
Attributes:
components (int): number of components extracted (=g)
X_training_set (ndarray N x n): X calibration data (centred)
K (ndarray N x N): X calibration data transformed into feature space
P (ndarray n x g): Loadings on K (Components extracted from data)
Q (ndarray m x g): Loadings on Y (Components extracted from data)
T (ndarray N x g): Scores on K
U (ndarray N x g): Scores on Y
B_RHS (ndarray n x m): Partial regression matrix
"""
def __init__(self, X, Y, g, X_kernel,
max_iterations=DEFAULT_MAX_ITERATIONS,
iteration_convergence=DEFAULT_EPSILON,
ignore_failures=True):
Xc, Yc = super()._prepare_data(X, Y)
self.X_training_set = Xc
self.X_kernel = X_kernel
K = np.empty((self.data_samples, self.data_samples))
for i in range(0, self.data_samples):
for j in range(0, i):
K[i, j] = X_kernel(Xc[i, :], Xc[j, :])
K[j, i] = K[i, j]
K[i, i] = X_kernel(Xc[i, :], Xc[i, :])
centralizer = (np.identity(self.data_samples)) - \
(1.0 / self.data_samples) * \
np.ones((self.data_samples, self.data_samples))
K = centralizer @ K @ centralizer
self.K = K
T = np.empty((self.data_samples, g))
Q = np.empty((self.Y_variables, g))
U = np.empty((self.data_samples, g))
P = np.empty((self.data_samples, g))
self.components = 0
K_j = K
Y_j = Yc
for j in range(0, g):
u_j = Y_j[:, random.randint(0, self.Y_variables-1)]
iteration_count = 0
iteration_change = iteration_convergence * 10.0
while iteration_count < max_iterations and \
iteration_change > iteration_convergence:
w_j = K_j @ u_j
t_j = w_j / np.linalg.norm(w_j, 2)
q_j = Y_j.T @ t_j
old_u_j = u_j
u_j = Y_j @ q_j
u_j /= np.linalg.norm(u_j, 2)
iteration_change = linalg.norm(u_j - old_u_j)
iteration_count += 1
if iteration_count >= max_iterations:
if ignore_failures:
break
else:
raise ConvergenceError('PLS2 failed to converge for '
'component: '
'{}'.format(self.components+1))
T[:, j] = t_j
Q[:, j] = q_j
U[:, j] = u_j
P[:, j] = (K_j.T @ w_j) / (w_j @ w_j)
deflator = (np.identity(self.data_samples) - np.outer(t_j.T, t_j))
K_j = deflator @ K_j @ deflator
Y_j = Y_j - np.outer(t_j, q_j.T)
self.components += 1
# If iteration stopped early because of failed convergence, only
# the actual components will be copied
self.T = T[:, 0:self.components]
self.Q = Q[:, 0:self.components]
self.U = U[:, 0:self.components]
self.P = P[:, 0:self.components]
self.B_RHS = self.U @ linalg.inv(self.T.T @ self.K @ self.U) @ self.Q.T
def prediction(self, Z):
"""Predict the output resulting from a given input
Args:
Z (ndarray of floats): The input on which to make the
prediction. A one-dimensional array will be interpreted as
a single multi-dimensional input unless the number of X
variables in the calibration data was 1, in which case it
will be interpreted as a set of inputs. A two-dimensional
array will be interpreted as one multi-dimensional input
per row.
Returns:
Y (ndarray of floats) : The predicted output - either a one
dimensional array of the same length as the number of
calibration Y variables or a two dimensional array with the
same number of columns as the calibration Y data and one row
for each input row.
"""
if len(Z.shape) == 1:
if self.X_variables == 1:
Z = Z.reshape((Z.shape[0], 1))
Kt = np.empty((Z.shape[0], self.data_samples))
else:
if Z.shape[0] != self.X_variables:
raise ParameterError('Data provided does not have the '
'same number of variables as the '
'original X data')
Z = Z.reshape((1, Z.shape[0]))
Kt = np.empty((1, self.data_samples))
else:
if Z.shape[1] != self.X_variables:
raise ParameterError('Data provided does not have the same '
'number of variables as the original X '
'data')
Kt = np.empty((Z.shape[0], self.data_samples))
for i in range(0, Z.shape[0]):
for j in range(0, self.data_samples):
Kt[i, j] = self.X_kernel(Z[i, :] - self.X_offset,
self.X_training_set[j, :])
centralizer = (1.0 / self.data_samples) * \
np.ones((Z.shape[0], self.data_samples))
Kt = (Kt - centralizer @ self.K) @ \
(np.identity(self.data_samples) -
(1.0 / self.data_samples) * np.ones(self.data_samples))
# Fix centralisation - appears to be necessary but not usually
# mentioned in papers
Kt -= Kt.mean(0)
return self.Y_offset + Kt @ self.B_RHS
| {
"repo_name": "jhumphry/regressions",
"path": "regressions/kernel_pls.py",
"copies": "1",
"size": "7288",
"license": "isc",
"hash": 4389283295795383000,
"line_mean": 37.9732620321,
"line_max": 79,
"alpha_frac": 0.541712404,
"autogenerated": false,
"ratio": 4.017640573318633,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5059352977318633,
"avg_score": null,
"num_lines": null
} |
"""A module which implements Multiple Linear Regression."""
from . import *
class MLR(RegressionBase):
"""Multiple Linear Regression
Standard multiple linear regression assumes the relationship between the
variables (once the means have been subtracted to center both variables)
is Y = A X + E where E is a vector of zero-mean noise vectors.
Note :
The regression matrix B is found using the pseudo-inverse. In
order for this to be calculable, the number of calibration samples
``N`` has be be larger than the number of X variables ``n``, and
there must not be any collinearities in the calibration X data.
Args:
X (ndarray N x n): X calibration data, one row per data sample
Y (ndarray N x m): Y calibration data, one row per data sample
Attributes:
B (ndarray m x n): Resulting regression matrix
"""
def __init__(self, X, Y):
Xc, Yc = super()._prepare_data(X, Y)
if Xc.shape[0] <= Xc.shape[1]:
raise ParameterError('MLR requires more rows (data samples) than '
'input variables (columns of X data)')
self.B = linalg.inv(Xc.T @ Xc) @ Xc.T @ Yc
def prediction(self, Z):
"""Predict the output resulting from a given input
Args:
Z (ndarray of floats): The input on which to make the
prediction. Must either be a one dimensional array of the
same length as the number of calibration X variables, or a
two dimensional array with the same number of columns as
the calibration X data and one row for each input row.
Returns:
Y (ndarray of floats) : The predicted output - either a one
dimensional array of the same length as the number of
calibration Y variables or a two dimensional array with the
same number of columns as the calibration Y data and one row
for each input row.
"""
if len(Z.shape) == 1:
if Z.shape[0] != self.X_variables:
raise ParameterError('Data provided does not have the same '
'number of variables as the original X '
'data')
return self.Y_offset + (Z - self.X_offset) @ self.B
else:
if Z.shape[1] != self.X_variables:
raise ParameterError('Data provided does not have the same '
'number of variables as the original X '
'data')
result = np.empty((Z.shape[0], self.Y_variables))
for i in range(0, Z.shape[0]):
result[i, :] = self.Y_offset + (Z[i, :] - self.X_offset) \
@ self.B
return result
| {
"repo_name": "jhumphry/regressions",
"path": "regressions/mlr.py",
"copies": "1",
"size": "2866",
"license": "isc",
"hash": -147947914718275170,
"line_mean": 38.2602739726,
"line_max": 78,
"alpha_frac": 0.5697836706,
"autogenerated": false,
"ratio": 4.607717041800643,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 73
} |
"""A module which implements Principal Component Regression."""
import random
from . import *
class PCR_NIPALS(RegressionBase):
"""Principal Components Regression using the NIPALS algorithm
PCR forms a set of new latent variables from the provided X data
samples which describe as much of the variance in the X data as
possible. The latent variables are then regressed against the provided
Y data. PCR is connected with Principal Components Analysis, where the
latent variables are referred to as Principal Components.
This class uses the Non-linear Iterative PArtial Least Squares
algorithm to extract the components. Either a fixed number of
components should be specified using the ``g`` argument, or a target
proportion of variation explained by the components should be
specified via ``variation_explained``. The variables of the X and Y
data can have their variances standardized. This is useful if they are
of heterogeneous types as otherwise the components extracted can be
dominated by the effects of different measurement scales rather than
by the actual data.
Note:
If ``ignore_failures`` is ``True`` then the resulting object
may have fewer components than requested if convergence does
not succeed.
Args:
X (ndarray N x n): X calibration data, one row per data sample
Y (ndarray N x m): Y calibration data, one row per data sample
g (int): Number of components to extract
variation_explained (float): Proportion of variance in X
calibration data that the components extracted should explain
(from 0.001 - 0.999)
standardize_X (boolean, optional): Standardize the X data
standardize_Y (boolean, optional): Standardize the Y data
max_iterations (int, optional) : Maximum number of iterations of
NIPALS to attempt
iteration_convergence (float, optional): Difference in norm
between two iterations at which point the iteration will be
considered to have converged.
ignore_failures (boolean, optional): Do not raise an error if
iteration has to be abandoned before the requested number
of or coverage by components has been achieved.
Attributes:
components (int): number of components extracted (=g)
T (ndarray N x g): Scores
P (ndarray n x g): Loadings (Components extracted from data)
eigenvalues (ndarray g): Eigenvalues extracted
total_variation (float): Total variation in calibration X data
C (ndarray g x m): Regression coefficients
PgC (ndarray n x m): Precalculated matrix product of P (limited to
g components) and C
"""
def __init__(self, X, Y, g=None, variation_explained=None,
standardize_X=False, standardize_Y=False,
max_iterations=DEFAULT_MAX_ITERATIONS,
iteration_convergence=DEFAULT_EPSILON,
ignore_failures=True):
if (g is None) == (variation_explained is None):
raise ParameterError('Must specify either the number of principal '
'components g to use or the proportion of '
'data variance that must be explained.')
if variation_explained is not None:
if variation_explained < 0.001 or\
variation_explained > 0.999:
raise ParameterError('PCR will not reliably be able to use '
'principal components that explain less '
'than 0.1% or more than 99.9% of the '
'variation in the data.')
Xc, Yc = super()._prepare_data(X, Y, standardize_X, standardize_Y)
if g is not None:
if g < 1 or g > self.max_rank:
raise ParameterError('Number of required components specified '
'is impossible.')
if standardize_X:
self.total_variation = self.X_variables * (self.data_samples - 1.0)
else:
self.total_variation = (Xc @ Xc.T).trace()
self._perform_pca(Xc, g, variation_explained,
max_iterations, iteration_convergence,
ignore_failures)
# Find regression parameters
self.Y_offset = Y.mean(0)
Yc = Y - self.Y_offset
if standardize_Y:
self.Y_scaling = Y.std(0, ddof=1)
Yc /= self.Y_scaling
else:
self.Y_scaling = None
self.C = np.diag(1.0 / self.eigenvalues) @ self.T.T @ Yc
self.PgC = self.P @ self.C
def _perform_pca(self, X, g=None, variation_explained=None,
max_iterations=DEFAULT_MAX_ITERATIONS,
iteration_convergence=DEFAULT_EPSILON,
ignore_failures=True):
"""A non-public routine that performs the PCA using an appropriate
method and sets up self.T, self.P, self.eignvalues and
self.components."""
T = np.empty((self.data_samples, self.max_rank)) # Scores
P = np.empty((self.X_variables, self.max_rank)) # Loadings
eig = np.empty((self.max_rank,))
self.components = 0
X_j = X
while True:
t_j = X_j[:, random.randint(0, self.X_variables-1)]
iteration_count = 0
iteration_change = iteration_convergence * 10.0
while iteration_count < max_iterations and \
iteration_change > iteration_convergence:
p_j = X_j.T @ t_j
p_j /= np.linalg.norm(p_j, 2) # Normalise p_j vectors
old_t_j = t_j
t_j = X_j @ p_j
iteration_change = linalg.norm(t_j - old_t_j)
iteration_count += 1
if iteration_count >= max_iterations:
if ignore_failures:
break
else:
raise ConvergenceError('NIPALS PCA for PCR failed to '
'converge for component: '
'{}'.format(self.components+1))
X_j = X_j - np.outer(t_j, p_j.T) # Reduce in rank
T[:, self.components] = t_j
P[:, self.components] = p_j
eig[self.components] = t_j @ t_j
self.components += 1
if g is not None:
if self.components == g:
break
if variation_explained is not None:
if eig[0:self.components].sum() >= \
variation_explained * self.total_variation:
break
# Only copy the components actually used
self.T = T[:, 0:self.components]
self.P = P[:, 0:self.components]
self.eigenvalues = eig[0:self.components]
def variation_explained(self):
"""Return the proportion of variation explained
Returns:
variation_explained (float): Proportion of the total variation
in the X data explained by the extracted principal components.
"""
return self.eigenvalues.sum() / self.total_variation
def prediction(self, Z):
"""Predict the output resulting from a given input
Args:
Z (ndarray of floats): The input on which to make the
prediction. Must either be a one dimensional array of the
same length as the number of calibration X variables, or a
two dimensional array with the same number of columns as
the calibration X data and one row for each input row.
Returns:
Y (ndarray of floats) : The predicted output - either a one
dimensional array of the same length as the number of
calibration Y variables or a two dimensional array with the
same number of columns as the calibration Y data and one row
for each input row.
"""
if len(Z.shape) == 1:
if Z.shape[0] != self.X_variables:
raise ParameterError('Data provided does not have the same '
'number of variables as the original X '
'data')
elif Z.shape[1] != self.X_variables:
raise ParameterError('Data provided does not have the same '
'number of variables as the original X data')
tmp = (Z - self.X_offset)
if self.standardized_X:
tmp *= self.X_rscaling
tmp = tmp @ self.PgC
if self.standardized_Y:
tmp *= self.Y_scaling
return self.Y_offset + tmp
class PCR_SVD(PCR_NIPALS):
"""Principal Components Regression using SVD
This class implements PCR with the same mathematical goals as
:py:class:`PCR_NIPALS` but using a different method to extract the
principal components. The convergence criteria in the NIPALS algorithm
can be formulated into an eigenvalue problem and solved directly using
an existing SVD-based solver. This has the advantage of being entirely
deterministic, but the disadvantage that all components have to be
extracted each time, even if only a few are required to explain most
of the variance in X.
Note:
The attributes of the resulting class are exactly the same as for
:py:class:`PCR_NIPALS`.
Args:
X (ndarray N x n): X calibration data, one row per data sample
Y (ndarray N x m): Y calibration data, one row per data sample
g (int): Number of components to extract
variation_explained (float): Proportion of variance in X
calibration data that the components extracted should explain
(from 0.001 - 0.999)
standardize_X (boolean, optional): Standardize the X data
standardize_Y (boolean, optional): Standardize the Y data
max_iterations : Not relevant for SVD
iteration_convergence : Not relevant for SVD
ignore_failures: Not relevant for SVD
"""
def _perform_pca(self, X, g=None, variation_explained=None,
max_iterations=DEFAULT_MAX_ITERATIONS,
iteration_convergence=DEFAULT_EPSILON,
ignore_failures=True):
"""A non-public routine that performs the PCA using an appropriate
method and sets up self.total_variation, self.T, self.P,
self.eignvalues and self.components."""
u, s, v = linalg.svd(X, full_matrices=False)
T = u @ np.diag(s)
P = v.T
eig = (T.T @ T).diagonal()
if g is not None:
self.T = T[:, 0:g]
self.P = P[:, 0:g]
self.eigenvalues = eig[0:g]
self.components = g
else:
cuml = (eig.cumsum()/self.total_variation)
self.components = cuml.searchsorted(variation_explained) + 1
self.T = T[:, 0:self.components]
self.P = P[:, 0:self.components]
self.eigenvalues = eig[0:self.components]
| {
"repo_name": "jhumphry/regressions",
"path": "regressions/pcr.py",
"copies": "1",
"size": "11224",
"license": "isc",
"hash": -4381909593718865400,
"line_mean": 39.3741007194,
"line_max": 79,
"alpha_frac": 0.5892729865,
"autogenerated": false,
"ratio": 4.394675019577134,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5483948006077134,
"avg_score": null,
"num_lines": null
} |
"""A module which implements the continuous wavelet transform
with complex Morlet wavelets.
Author : Alexandre Gramfort, alexandre.gramfort@telecom-paristech.fr (2011)
License : BSD 3-clause
inspired by Matlab code from Sheraz Khan & Brainstorm & SPM
"""
from math import sqrt
from copy import deepcopy
import numpy as np
from scipy import linalg
from scipy.fftpack import fftn, ifftn
from ..fixes import partial
from ..baseline import rescale
from ..parallel import parallel_func
from ..utils import logger, verbose
from ..channels import ContainsMixin, PickDropChannelsMixin
from ..io.pick import pick_info, pick_types
from ..utils import deprecated
def morlet(Fs, freqs, n_cycles=7, sigma=None, zero_mean=False):
"""Compute Wavelets for the given frequency range
Parameters
----------
Fs : float
Sampling Frequency
freqs : array
frequency range of interest (1 x Frequencies)
n_cycles: float | array of float
Number of cycles. Fixed number or one per frequency.
sigma : float, (optional)
It controls the width of the wavelet ie its temporal
resolution. If sigma is None the temporal resolution
is adapted with the frequency like for all wavelet transform.
The higher the frequency the shorter is the wavelet.
If sigma is fixed the temporal resolution is fixed
like for the short time Fourier transform and the number
of oscillations increases with the frequency.
zero_mean : bool
Make sure the wavelet is zero mean
Returns
-------
Ws : list of array
Wavelets time series
"""
Ws = list()
n_cycles = np.atleast_1d(n_cycles)
if (n_cycles.size != 1) and (n_cycles.size != len(freqs)):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
# fixed or scale-dependent window
if sigma is None:
sigma_t = this_n_cycles / (2.0 * np.pi * f)
else:
sigma_t = this_n_cycles / (2.0 * np.pi * sigma)
# this scaling factor is proportional to (Tallon-Baudry 98):
# (sigma_t*sqrt(pi))^(-1/2);
t = np.arange(0, 5 * sigma_t, 1.0 / Fs)
t = np.r_[-t[::-1], t[1:]]
oscillation = np.exp(2.0 * 1j * np.pi * f * t)
gaussian_enveloppe = np.exp(-t ** 2 / (2.0 * sigma_t ** 2))
if zero_mean: # to make it zero mean
real_offset = np.exp(- 2 * (np.pi * f * sigma_t) ** 2)
oscillation -= real_offset
W = oscillation * gaussian_enveloppe
W /= sqrt(0.5) * linalg.norm(W.ravel())
Ws.append(W)
return Ws
def _centered(arr, newsize):
"""Aux Function to center data"""
# Return the center newsize portion of the array.
newsize = np.asarray(newsize)
currsize = np.array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _cwt_fft(X, Ws, mode="same"):
"""Compute cwt with fft based convolutions
Return a generator over signals.
"""
X = np.asarray(X)
# Precompute wavelets for given frequency range to save time
n_signals, n_times = X.shape
n_freqs = len(Ws)
Ws_max_size = max(W.size for W in Ws)
size = n_times + Ws_max_size - 1
# Always use 2**n-sized FFT
fsize = 2 ** int(np.ceil(np.log2(size)))
# precompute FFTs of Ws
fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128)
for i, W in enumerate(Ws):
if len(W) > n_times:
raise ValueError('Wavelet is too long for such a short signal. '
'Reduce the number of cycles.')
fft_Ws[i] = fftn(W, [fsize])
for k, x in enumerate(X):
if mode == "full":
tfr = np.zeros((n_freqs, fsize), dtype=np.complex128)
elif mode == "same" or mode == "valid":
tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)
fft_x = fftn(x, [fsize])
for i, W in enumerate(Ws):
ret = ifftn(fft_x * fft_Ws[i])[:n_times + W.size - 1]
if mode == "valid":
sz = abs(W.size - n_times) + 1
offset = (n_times - sz) / 2
tfr[i, offset:(offset + sz)] = _centered(ret, sz)
else:
tfr[i, :] = _centered(ret, n_times)
yield tfr
def _cwt_convolve(X, Ws, mode='same'):
"""Compute time freq decomposition with temporal convolutions
Return a generator over signals.
"""
X = np.asarray(X)
n_signals, n_times = X.shape
n_freqs = len(Ws)
# Compute convolutions
for x in X:
tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)
for i, W in enumerate(Ws):
ret = np.convolve(x, W, mode=mode)
if len(W) > len(x):
raise ValueError('Wavelet is too long for such a short '
'signal. Reduce the number of cycles.')
if mode == "valid":
sz = abs(W.size - n_times) + 1
offset = (n_times - sz) / 2
tfr[i, offset:(offset + sz)] = ret
else:
tfr[i] = ret
yield tfr
def cwt_morlet(X, Fs, freqs, use_fft=True, n_cycles=7.0, zero_mean=False):
"""Compute time freq decomposition with Morlet wavelets
Parameters
----------
X : array of shape [n_signals, n_times]
signals (one per line)
Fs : float
sampling Frequency
freqs : array
Array of frequencies of interest
use_fft : bool
Compute convolution with FFT or temoral convolution.
n_cycles: float | array of float
Number of cycles. Fixed number or one per frequency.
zero_mean : bool
Make sure the wavelets are zero mean.
Returns
-------
tfr : 3D array
Time Frequency Decompositions (n_signals x n_frequencies x n_times)
"""
mode = 'same'
# mode = "valid"
n_signals, n_times = X.shape
n_frequencies = len(freqs)
# Precompute wavelets for given frequency range to save time
Ws = morlet(Fs, freqs, n_cycles=n_cycles, zero_mean=zero_mean)
if use_fft:
coefs = _cwt_fft(X, Ws, mode)
else:
coefs = _cwt_convolve(X, Ws, mode)
tfrs = np.empty((n_signals, n_frequencies, n_times), dtype=np.complex)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr
return tfrs
def cwt(X, Ws, use_fft=True, mode='same', decim=1):
"""Compute time freq decomposition with continuous wavelet transform
Parameters
----------
X : array of shape [n_signals, n_times]
signals (one per line)
Ws : list of array
Wavelets time series
use_fft : bool
Use FFT for convolutions
mode : 'same' | 'valid' | 'full'
Convention for convolution
decim : int
Temporal decimation factor
Returns
-------
tfr : 3D array
Time Frequency Decompositions (n_signals x n_frequencies x n_times)
"""
n_signals, n_times = X[:, ::decim].shape
n_frequencies = len(Ws)
if use_fft:
coefs = _cwt_fft(X, Ws, mode)
else:
coefs = _cwt_convolve(X, Ws, mode)
tfrs = np.empty((n_signals, n_frequencies, n_times), dtype=np.complex)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr[..., ::decim]
return tfrs
def _time_frequency(X, Ws, use_fft):
"""Aux of time_frequency for parallel computing over channels
"""
n_epochs, n_times = X.shape
n_frequencies = len(Ws)
psd = np.zeros((n_frequencies, n_times)) # PSD
plf = np.zeros((n_frequencies, n_times), dtype=np.complex) # phase lock
mode = 'same'
if use_fft:
tfrs = _cwt_fft(X, Ws, mode)
else:
tfrs = _cwt_convolve(X, Ws, mode)
for tfr in tfrs:
tfr_abs = np.abs(tfr)
psd += tfr_abs ** 2
plf += tfr / tfr_abs
return psd, plf
@verbose
def single_trial_power(data, Fs, frequencies, use_fft=True, n_cycles=7,
baseline=None, baseline_mode='ratio', times=None,
decim=1, n_jobs=1, zero_mean=False, verbose=None):
"""Compute time-frequency power on single epochs
Parameters
----------
data : array of shape [n_epochs, n_channels, n_times]
The epochs
Fs : float
Sampling rate
frequencies : array-like
The frequencies
use_fft : bool
Use the FFT for convolutions or not.
n_cycles : float | array of float
Number of cycles in the Morlet wavelet. Fixed number
or one per frequency.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
baseline_mode : None | 'ratio' | 'zscore'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
times : array
Required to define baseline
decim : int
Temporal decimation factor
n_jobs : int
The number of epochs to process at the same time
zero_mean : bool
Make sure the wavelets are zero mean.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : 4D array
Power estimate (Epochs x Channels x Frequencies x Timepoints).
"""
mode = 'same'
n_frequencies = len(frequencies)
n_epochs, n_channels, n_times = data[:, :, ::decim].shape
# Precompute wavelets for given frequency range to save time
Ws = morlet(Fs, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
parallel, my_cwt, _ = parallel_func(cwt, n_jobs)
logger.info("Computing time-frequency power on single epochs...")
power = np.empty((n_epochs, n_channels, n_frequencies, n_times),
dtype=np.float)
# Package arguments for `cwt` here to minimize omissions where only one of
# the two calls below is updated with new function arguments.
cwt_kw = dict(Ws=Ws, use_fft=use_fft, mode=mode, decim=decim)
if n_jobs == 1:
for k, e in enumerate(data):
power[k] = np.abs(cwt(e, **cwt_kw)) ** 2
else:
# Precompute tf decompositions in parallel
tfrs = parallel(my_cwt(e, **cwt_kw) for e in data)
for k, tfr in enumerate(tfrs):
power[k] = np.abs(tfr) ** 2
# Run baseline correction. Be sure to decimate the times array as well if
# needed.
if times is not None:
times = times[::decim]
power = rescale(power, times, baseline, baseline_mode, copy=False)
return power
def _induced_power(data, Fs, frequencies, use_fft=True, n_cycles=7,
decim=1, n_jobs=1, zero_mean=False):
"""Compute time induced power and inter-trial phase-locking factor
The time frequency decomposition is done with Morlet wavelets
Parameters
----------
data : array
3D array of shape [n_epochs, n_channels, n_times]
Fs : float
sampling Frequency
frequencies : array
Array of frequencies of interest
use_fft : bool
Compute transform with fft based convolutions or temporal
convolutions.
n_cycles : float | array of float
Number of cycles. Fixed number or one per frequency.
decim: int
Temporal decimation factor
n_jobs : int
The number of CPUs used in parallel. All CPUs are used in -1.
Requires joblib package.
zero_mean : bool
Make sure the wavelets are zero mean.
Returns
-------
power : 2D array
Induced power (Channels x Frequencies x Timepoints).
Squared amplitude of time-frequency coefficients.
phase_lock : 2D array
Phase locking factor in [0, 1] (Channels x Frequencies x Timepoints)
"""
n_frequencies = len(frequencies)
n_epochs, n_channels, n_times = data[:, :, ::decim].shape
# Precompute wavelets for given frequency range to save time
Ws = morlet(Fs, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
if n_jobs == 1:
psd = np.empty((n_channels, n_frequencies, n_times))
plf = np.empty((n_channels, n_frequencies, n_times), dtype=np.complex)
for c in range(n_channels):
X = data[:, c, :]
this_psd, this_plf = _time_frequency(X, Ws, use_fft)
psd[c], plf[c] = this_psd[:, ::decim], this_plf[:, ::decim]
else:
parallel, my_time_frequency, _ = parallel_func(_time_frequency, n_jobs)
psd_plf = parallel(my_time_frequency(np.squeeze(data[:, c, :]),
Ws, use_fft)
for c in range(n_channels))
psd = np.zeros((n_channels, n_frequencies, n_times))
plf = np.zeros((n_channels, n_frequencies, n_times), dtype=np.complex)
for c, (psd_c, plf_c) in enumerate(psd_plf):
psd[c, :, :], plf[c, :, :] = psd_c[:, ::decim], plf_c[:, ::decim]
psd /= n_epochs
plf = np.abs(plf) / n_epochs
return psd, plf
@deprecated("induced_power will be removed in release 0.9. Use "
"tfr_morlet instead.")
def induced_power(data, Fs, frequencies, use_fft=True, n_cycles=7,
decim=1, n_jobs=1, zero_mean=False):
"""Compute time induced power and inter-trial phase-locking factor
The time frequency decomposition is done with Morlet wavelets
Parameters
----------
data : array
3D array of shape [n_epochs, n_channels, n_times]
Fs : float
sampling Frequency
frequencies : array
Array of frequencies of interest
use_fft : bool
Compute transform with fft based convolutions or temporal
convolutions.
n_cycles : float | array of float
Number of cycles. Fixed number or one per frequency.
decim: int
Temporal decimation factor
n_jobs : int
The number of CPUs used in parallel. All CPUs are used in -1.
Requires joblib package.
zero_mean : bool
Make sure the wavelets are zero mean.
Returns
-------
power : 2D array
Induced power (Channels x Frequencies x Timepoints).
Squared amplitude of time-frequency coefficients.
phase_lock : 2D array
Phase locking factor in [0, 1] (Channels x Frequencies x Timepoints)
"""
return _induced_power(data, Fs, frequencies, use_fft=use_fft,
n_cycles=n_cycles, decim=decim, n_jobs=n_jobs,
zero_mean=zero_mean)
def _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB):
"""Aux Function to prepare tfr computation"""
from ..viz.utils import _setup_vmin_vmax
if mode is not None and baseline is not None:
logger.info("Applying baseline correction '%s' during %s" %
(mode, baseline))
data = rescale(data.copy(), times, baseline, mode)
# crop time
itmin, itmax = None, None
if tmin is not None:
itmin = np.where(times >= tmin)[0][0]
if tmax is not None:
itmax = np.where(times <= tmax)[0][-1]
times = times[itmin:itmax]
# crop freqs
ifmin, ifmax = None, None
if fmin is not None:
ifmin = np.where(freqs >= fmin)[0][0]
if fmax is not None:
ifmax = np.where(freqs <= fmax)[0][-1]
freqs = freqs[ifmin:ifmax]
# crop data
data = data[:, ifmin:ifmax, itmin:itmax]
times *= 1e3
if dB:
data = 20 * np.log10(data)
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
return data, times, freqs, vmin, vmax
# XXX : todo IO of TFRs
class AverageTFR(ContainsMixin, PickDropChannelsMixin):
"""Container for Time-Frequency data
Can for example store induced power at sensor level or intertrial
coherence.
Parameters
----------
info : Info
The measurement info.
data : ndarray, shape (n_channels, n_freqs, n_times)
The data.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
nave : int
The number of averaged TFRs.
Attributes
----------
ch_names : list
The names of the channels.
"""
@verbose
def __init__(self, info, data, times, freqs, nave, verbose=None):
self.info = info
if data.ndim != 3:
raise ValueError('data should be 3d. Got %d.' % data.ndim)
n_channels, n_freqs, n_times = data.shape
if n_channels != len(info['chs']):
raise ValueError("Number of channels and data size don't match"
" (%d != %d)." % (n_channels, len(info['chs'])))
if n_freqs != len(freqs):
raise ValueError("Number of frequencies and data size don't match"
" (%d != %d)." % (n_freqs, len(freqs)))
if n_times != len(times):
raise ValueError("Number of times and data size don't match"
" (%d != %d)." % (n_times, len(times)))
self.data = data
self.times = times
self.freqs = freqs
self.nave = nave
@property
def ch_names(self):
return self.info['ch_names']
@verbose
def plot(self, picks, baseline=None, mode='mean', tmin=None, tmax=None,
fmin=None, fmax=None, vmin=None, vmax=None, cmap='RdBu_r',
dB=False, colorbar=True, show=True, verbose=None):
"""Plot TFRs in a topography with images
Parameters
----------
picks : array-like of int
The indices of the channels to plot.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
layout : Layout | None
Layout instance specifying sensor positions. If possible, the
correct layout is inferred from the data.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas
show : bool
Call pyplot.show() at the end.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
from ..viz.topo import _imshow_tfr
import matplotlib.pyplot as plt
times, freqs = self.times.copy(), self.freqs.copy()
data = self.data[picks]
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB)
tmin, tmax = times[0], times[-1]
for k, p in zip(range(len(data)), picks):
plt.figure()
_imshow_tfr(plt, 0, tmin, tmax, vmin, vmax, ylim=None,
tfr=data[k: k + 1], freq=freqs, x_label='Time (ms)',
y_label='Frequency (Hz)', colorbar=colorbar,
picker=False, cmap=cmap)
if show:
import matplotlib.pyplot as plt
plt.show()
def plot_topo(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
layout=None, cmap='RdBu_r', title=None, dB=False,
colorbar=True, layout_scale=0.945, show=True):
"""Plot TFRs in a topography with images
Parameters
----------
picks : array-like of int | None
The indices of the channels to plot. If None all available
channels are displayed.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
layout : Layout | None
Layout instance specifying sensor positions. If possible, the
correct layout is inferred from the data.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
title : str
Title of the figure.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
show : bool
Call pyplot.show() at the end.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
from ..viz.topo import _imshow_tfr, _plot_topo
times = self.times.copy()
freqs = self.freqs
data = self.data
info = self.info
if picks is not None:
data = data[picks]
info = pick_info(info, picks)
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax,
mode, baseline, vmin, vmax, dB)
if layout is None:
from mne.layouts.layout import find_layout
layout = find_layout(self.info)
imshow = partial(_imshow_tfr, tfr=data, freq=freqs, cmap=cmap)
fig = _plot_topo(info=info, times=times,
show_func=imshow, layout=layout,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title, border='w',
x_label='Time (ms)', y_label='Frequency (Hz)')
if show:
import matplotlib.pyplot as plt
plt.show()
return fig
def _check_compat(self, tfr):
"""checks that self and tfr have the same time-frequency ranges"""
assert np.all(tfr.times == self.times)
assert np.all(tfr.freqs == self.freqs)
def __add__(self, tfr):
self._check_compat(tfr)
out = self.copy()
out.data += tfr.data
return out
def __iadd__(self, tfr):
self._check_compat(tfr)
self.data += tfr.data
return self
def __sub__(self, tfr):
self._check_compat(tfr)
out = self.copy()
out.data -= tfr.data
return out
def __isub__(self, tfr):
self._check_compat(tfr)
self.data -= tfr.data
return self
def copy(self):
"""Return a copy of the instance."""
return deepcopy(self)
def __repr__(self):
s = "time : [%f, %f]" % (self.times[0], self.times[-1])
s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1])
s += ", nave : %d" % self.nave
s += ', channels : %d' % self.data.shape[1]
return "<AverageTFR | %s>" % s
def apply_baseline(self, baseline, mode='mean'):
"""Baseline correct the data
Parameters
----------
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
"""
self.data = rescale(self.data, self.times, baseline, mode, copy=False)
def plot_topomap(self, tmin=None, tmax=None, fmin=None, fmax=None,
ch_type='mag', baseline=None, mode='mean',
layout=None, vmin=None, vmax=None, cmap='RdBu_r',
sensors='k,', colorbar=True, unit=None, res=64, size=2,
format='%1.1e', show_names=False, title=None,
axes=None, show=True):
"""Plot topographic maps of time-frequency intervals of TFR data
Parameters
----------
tfr : AvereageTFR
The AvereageTFR object.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg'
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file was
found, the layout is automatically generated from the sensor
locations.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap
Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
'Reds'.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses).
colorbar : bool
Plot a colorbar.
unit : str | None
The unit of the channel type used for colorbar labels.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
format : str
String format for colorbar values.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
axes : instance of Axes | None
The axes to plot to. If None the axes is defined automatically.
show : bool
Call pyplot.show() at the end.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz import plot_tfr_topomap
return plot_tfr_topomap(self, tmin=tmin, tmax=tmax, fmin=fmin,
fmax=fmax, ch_type=ch_type, baseline=baseline,
mode=mode, layout=layout, vmin=vmin, vmax=vmax,
cmap=cmap, sensors=sensors, colorbar=colorbar,
unit=unit, res=res, size=size, format=format,
show_names=show_names, title=title, axes=axes,
show=show)
def tfr_morlet(epochs, freqs, n_cycles, use_fft=False,
return_itc=True, decim=1, n_jobs=1):
"""Compute Time-Frequency Representation (TFR) using Morlet wavelets
Parameters
----------
epochs : Epochs
The epochs.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
use_fft : bool
The fft based convolution or not.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
decim : int
The decimation factor on the time axis. To reduce memory usage.
n_jobs : int
The number of jobs to eggie in parallel.
Returns
-------
power : AverageTFR
The averaged power.
itc : AverageTFR
The intertrial coherence (ITC). Only returned if return_itc
is True.
"""
data = epochs.get_data()
picks = pick_types(epochs.info, meg=True, eeg=True)
info = pick_info(epochs.info, picks)
data = data[:, picks, :]
power, itc = _induced_power(data, Fs=info['sfreq'], frequencies=freqs,
n_cycles=n_cycles, n_jobs=n_jobs,
use_fft=use_fft, decim=decim,
zero_mean=True)
times = epochs.times[::decim].copy()
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave)
if return_itc:
out = (out, AverageTFR(info, itc, times, freqs, nave))
return out
| {
"repo_name": "jaeilepp/eggie",
"path": "mne/time_frequency/tfr.py",
"copies": "1",
"size": "34411",
"license": "bsd-2-clause",
"hash": 2611984325535475700,
"line_mean": 36.5665938865,
"line_max": 79,
"alpha_frac": 0.5806282875,
"autogenerated": false,
"ratio": 3.8482442406620443,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49288725281620444,
"avg_score": null,
"num_lines": null
} |
"""A module which implements the Partial Least Squares 1 algorithm."""
from . import *
class PLS1(RegressionBase):
"""Regression using the PLS1 algorithm.
The PLS1 algorithm forms a set of new latent variables from the
provided X and Y data samples based on criteria that balance the need
to explain the variance within X and Y and the covariance between X
and Y. Regression is then performed on the latent variables. PLS1 only
addresses the case of a single Y variable and if more than one output
variable is required then PLS1 will be run multiple times. PLS1 is a
deterministic algorithm that requires one iteration per component
extracted.
Note:
If ``ignore_failures`` is ``True`` then the resulting object
may have fewer components than requested if convergence does
not succeed.
Args:
X (ndarray N x n): X calibration data, one row per data sample
Y (ndarray N x m): Y calibration data, one row per data sample
g (int): Number of components to extract
epsilon (float, optional): Value at which the components
extracted will be considered to be too small to be stable
and iteration will cease
ignore_failures (boolean, optional): Do not raise an error if
iteration has to be abandoned before the requested number
of components have been recovered
Attributes:
components (int): number of components extracted (=g)
W (ndarray m x n x g): Weight vectors
P (ndarray m x n x g): Loadings (Components extracted from data)
T (ndarray m x N x g): Scores
c (ndarray m x g): Regression coefficients
b (ndarray m x n): Resulting regression matrix
"""
def __init__(self, X, Y, g,
epsilon=DEFAULT_EPSILON, ignore_failures=False):
Xc, Yc = super()._prepare_data(X, Y)
if g < 1 or g > self.max_rank:
raise ParameterError('Number of required components '
'specified is impossible.')
self.components = g
W = np.empty((self.Y_variables, self.X_variables, g))
P = np.empty((self.Y_variables, self.X_variables, g))
T = np.empty((self.Y_variables, self.data_samples, g))
c = np.empty((self.Y_variables, g))
b = np.empty((self.Y_variables, self.X_variables))
for z in range(0, self.Y_variables):
X_j = Xc
y_j = Yc[:, z]
for j in range(0, g):
w_j = X_j.T @ y_j
w_j /= linalg.norm(w_j, 2)
t_j = X_j @ w_j
tt_j = t_j.T @ t_j
c_j = (t_j.T @ y_j) / tt_j
if c_j < epsilon:
if ignore_failures:
if self.components > j:
self.components = j # See comment below
break
else:
raise ConvergenceError('PLS1 failed at iteration: '
'g={}, j={}'.format(g, j))
p_j = (X_j.T @ t_j) / tt_j
X_j = X_j - np.outer(t_j, p_j.T) # Reduce in rank
y_j = y_j - t_j * c_j
W[z, :, j] = w_j
P[z, :, j] = p_j
T[z, :, j] = t_j
c[z, j] = c_j
else:
# N.B - don't try to find the regression matrix if the
# iteration failed! Inversion won't work...
b[z, :] = W[z, :, :] @ \
linalg.inv(P[z, :, :].T @ W[z, :, :]) @ \
c[z, :]
# If one of the iterations fails due to c_j becoming too small, then
# self.components will be reduced and the output will be cut down to
# the lowest number of iterations achieved for any of the Y variables.
# Of course, b may no longer be a particularly good regression vector
# in this case.
self.W = W[:, :, 0:self.components]
self.P = P[:, :, 0:self.components]
self.T = T[:, :, 0:self.components]
self.c = c[:, 0:self.components]
self.b = b
def prediction(self, Z):
"""Predict the output resulting from a given input
Args:
Z (ndarray of floats): The input on which to make the
prediction. Must either be a one dimensional array of the
same length as the number of calibration X variables, or a
two dimensional array with the same number of columns as
the calibration X data and one row for each input row.
Returns:
Y (ndarray of floats) : The predicted output - either a one
dimensional array of the same length as the number of
calibration Y variables or a two dimensional array with the
same number of columns as the calibration Y data and one row
for each input row.
"""
if len(Z.shape) == 1:
if Z.shape[0] != self.X_variables:
raise ParameterError('Data provided does not have the same '
'number of variables as the original X '
'data')
return self.Y_offset + (Z - self.X_offset).T @ self.b.T
else:
if Z.shape[1] != self.X_variables:
raise ParameterError('Data provided does not have the same '
'number of variables as the original X '
'data')
result = np.empty((Z.shape[0], self.Y_variables))
for i in range(0, Z.shape[0]):
result[i, :] = self.Y_offset + \
(Z[i, :] - self.X_offset).T @ self.b.T
return result
def prediction_iterative(self, Z):
"""Predict the output resulting from a given input, iteratively
This produces the same output as the one-step version ``prediction``
but works by applying each loading in turn to extract the latent
variables corresponding to the input.
Args:
Z (ndarray of floats): The input on which to make the
prediction. Must either be a one dimensional array of the
same length as the number of calibration X variables, or a
two dimensional array with the same number of columns as
the calibration X data and one row for each input row.
Returns:
Y (ndarray of floats) : The predicted output - either a one
dimensional array of the same length as the number of
calibration Y variables or a two dimensional array with the
same number of columns as the calibration Y data and one row
for each input row.
"""
if len(Z.shape) == 1:
if Z.shape[0] != self.X_variables:
raise ParameterError('Data provided does not have the same '
'number of variables as the original X '
'data')
result = self.Y_offset.copy()
for k in range(0, self.Y_variables):
x_j = Z - self.X_offset
t = np.empty((self.components))
for j in range(0, self.components):
t[j] = x_j @ self.W[k, :, j]
x_j = x_j - t[j] * self.P[k, :, j]
result[k] += self.c[k, :] @ t
return result
else:
if Z.shape[1] != self.X_variables:
raise ParameterError('Data provided does not have the same '
'number of variables as the original X '
'data')
result = np.empty((Z.shape[0], self.Y_variables))
result[:, :] = self.Y_offset.copy()
for l in range(0, Z.shape[0]):
for k in range(0, self.Y_variables):
x_j = Z[l, :] - self.X_offset
t = np.empty((self.components))
for j in range(0, self.components):
t[j] = x_j @ self.W[k, :, j]
x_j = x_j - t[j] * self.P[k, :, j]
result[l, k] += self.c[k, :] @ t
return result
| {
"repo_name": "jhumphry/regressions",
"path": "regressions/pls1.py",
"copies": "1",
"size": "8404",
"license": "isc",
"hash": 4906481718369004000,
"line_mean": 40.603960396,
"line_max": 78,
"alpha_frac": 0.5211803903,
"autogenerated": false,
"ratio": 4.2877551020408164,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5308935492340816,
"avg_score": null,
"num_lines": null
} |
"""A module which implements the Partial Least Squares 2 algorithm."""
import random
from . import *
class PLS2(RegressionBase):
"""Regression using the PLS2 algorithm.
The PLS2 algorithm forms a set of new latent variables from the
provided X and Y data samples based on criteria that balance the need
to explain the variance within X and Y and the covariance between X
and Y. Regression is then performed on the latent variables. In
contrast to PLS1, the PLS2 algorithm handles multi-dimensional Y in
one pass, taking into account all of the Y variables at once. Due to
the added complexity relative to PLS1, PLS2 is a non-deterministic
iterative algorithm comparable to the NIPALS algorithm for PCR.
Note:
If ``ignore_failures`` is ``True`` then the resulting object
may have fewer components than requested if convergence does
not succeed.
Args:
X (ndarray N x n): X calibration data, one row per data sample
Y (ndarray N x m): Y calibration data, one row per data sample
g (int): Number of components to extract
max_iterations (int, optional) : Maximum number of iterations of
NIPALS to attempt
iteration_convergence (float, optional): Difference in norm
between two iterations at which point the iteration will be
considered to have converged.
ignore_failures (boolean, optional): Do not raise an error if
iteration has to be abandoned before the requested number
of components have been recovered
Attributes:
components (int): number of components extracted (=g)
P (ndarray n x g): Loadings on X (Components extracted from data)
Q (ndarray m x g): Loadings on Y (Components extracted from data)
T (ndarray N x g): Scores on X
U (ndarray N x g): Scores on Y
W (ndarray n x g): Weight vectors
C (ndarray g x g): Diagonal matrix of regression coefficients
B (ndarray n x m): Final regression matrix
"""
def __init__(self, X, Y, g,
max_iterations=DEFAULT_MAX_ITERATIONS,
iteration_convergence=DEFAULT_EPSILON,
ignore_failures=True):
Xc, Yc = super()._prepare_data(X, Y)
if g < 1 or g > self.max_rank:
raise ParameterError('Number of required components '
'specified is impossible.')
W = np.empty((self.X_variables, g))
T = np.empty((self.data_samples, g))
Q = np.empty((self.Y_variables, g))
U = np.empty((self.data_samples, g))
P = np.empty((self.X_variables, g))
c = np.empty((g,))
self.components = 0
X_j = Xc
Y_j = Yc
for j in range(0, g):
u_j = Y_j[:, random.randint(0, self.Y_variables-1)]
iteration_count = 0
iteration_change = iteration_convergence * 10.0
while iteration_count < max_iterations and \
iteration_change > iteration_convergence:
w_j = X_j.T @ u_j
w_j /= np.linalg.norm(w_j, 2)
t_j = X_j @ w_j
q_j = Y_j.T @ t_j
q_j /= np.linalg.norm(q_j, 2)
old_u_j = u_j
u_j = Y_j @ q_j
iteration_change = linalg.norm(u_j - old_u_j)
iteration_count += 1
if iteration_count >= max_iterations:
if ignore_failures:
break
else:
raise ConvergenceError('PLS2 failed to converge for '
'component: '
'{}'.format(self.components+1))
W[:, j] = w_j
T[:, j] = t_j
Q[:, j] = q_j
U[:, j] = u_j
t_dot_t = t_j.T @ t_j
c[j] = (t_j.T @ u_j) / t_dot_t
P[:, j] = (X_j.T @ t_j) / t_dot_t
X_j = X_j - np.outer(t_j, P[:, j].T)
Y_j = Y_j - c[j] * np.outer(t_j, q_j.T)
self.components += 1
# If iteration stopped early because of failed convergence, only
# the actual components will be copied
self.W = W[:, 0:self.components]
self.T = T[:, 0:self.components]
self.Q = Q[:, 0:self.components]
self.U = U[:, 0:self.components]
self.P = P[:, 0:self.components]
self.C = np.diag(c[0:self.components])
self.B = self.W @ linalg.inv(self.P.T @ self.W) @ self.C @ self.Q.T
def prediction(self, Z):
"""Predict the output resulting from a given input
Args:
Z (ndarray of floats): The input on which to make the
prediction. Must either be a one dimensional array of the
same length as the number of calibration X variables, or a
two dimensional array with the same number of columns as
the calibration X data and one row for each input row.
Returns:
Y (ndarray of floats) : The predicted output - either a one
dimensional array of the same length as the number of
calibration Y variables or a two dimensional array with the
same number of columns as the calibration Y data and one row
for each input row.
"""
if len(Z.shape) == 1:
if Z.shape[0] != self.X_variables:
raise ParameterError('Data provided does not have the same '
'number of variables as the original X '
'data')
return self.Y_offset + (Z - self.X_offset).T @ self.B
else:
if Z.shape[1] != self.X_variables:
raise ParameterError('Data provided does not have the same '
'number of variables as the original X '
'data')
result = np.empty((Z.shape[0], self.Y_variables))
for i in range(0, Z.shape[0]):
result[i, :] = self.Y_offset + \
(Z[i, :] - self.X_offset).T @ self.B
return result
def prediction_iterative(self, Z):
"""Predict the output resulting from a given input, iteratively
This produces the same output as the one-step version ``prediction``
but works by applying each loading in turn to extract the latent
variables corresponding to the input.
Args:
Z (ndarray of floats): The input on which to make the
prediction. Must either be a one dimensional array of the
same length as the number of calibration X variables, or a
two dimensional array with the same number of columns as
the calibration X data and one row for each input row.
Returns:
Y (ndarray of floats) : The predicted output - either a one
dimensional array of the same length as the number of
calibration Y variables or a two dimensional array with the
same number of columns as the calibration Y data and one row
for each input row.
"""
if len(Z.shape) == 1:
if Z.shape[0] != self.X_variables:
raise ParameterError('Data provided does not have the same '
'number of variables as the original X '
'data')
x_j = Z - self.X_offset
t = np.empty((self.components))
for j in range(0, self.components):
t[j] = x_j @ self.W[:, j]
x_j = x_j - t[j] * self.P[:, j]
result = self.Y_offset + t @ self.C @ self.Q.T
return result
else:
if Z.shape[1] != self.X_variables:
raise ParameterError('Data provided does not have the same '
'number of variables as the original X '
'data')
result = np.empty((Z.shape[0], self.Y_variables))
t = np.empty((self.components))
for k in range(0, Z.shape[0]):
x_j = Z[k, :] - self.X_offset
for j in range(0, self.components):
t[j] = x_j @ self.W[:, j]
x_j = x_j - t[j] * self.P[:, j]
result[k, :] = self.Y_offset + t @ self.C @ self.Q.T
return result
| {
"repo_name": "jhumphry/regressions",
"path": "regressions/pls2.py",
"copies": "1",
"size": "8579",
"license": "isc",
"hash": -6395002917140847000,
"line_mean": 38.7175925926,
"line_max": 77,
"alpha_frac": 0.5340948829,
"autogenerated": false,
"ratio": 4.201273261508325,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5235368144408324,
"avg_score": null,
"num_lines": null
} |
"""A module which implements the PLS-SB algorithm."""
from . import *
class PLS_SB(RegressionBase):
"""Regression using the PLS-SB algorithm.
The PLS-SB sets up the same mathematical problem as the PLS2 module,
but then formulates the convergence criteria as an eigenvalue problem
and solves it directly. It is therefore a deterministic algorithm, but
has the drawback that all components must be extracted at once, even
if only a few are required. Note that the output of PLS-SB is not the
same as PLS2. In the PLS2 each component found is removed from the
working copies of the input matrices by a rank-1 operation so the next
iterations will converge on a new component. In PLS-SB all components
are found at once.
Args:
X (ndarray N x n): X calibration data, one row per data sample
Y (ndarray N x m): Y calibration data, one row per data sample
g (int): Number of components to extract
Note:
The attributes of the resulting class are exactly the same as for
:py:class:`pls2.PLS2`.
"""
def __init__(self, X, Y, g):
Xc, Yc = super()._prepare_data(X, Y)
if g < 1 or g > self.max_rank:
raise ParameterError('Number of required components '
'specified is impossible.')
self.components = g
XtY = Xc.T @ Yc
_, W = linalg.eigh(XtY @ XtY.T)
self.W = W[:, :-g-1:-1].real
self.T = Xc @ self.W
self.Q = Yc.T @ self.T
self.Q /= np.linalg.norm(self.Q, axis=0)
self.U = Yc @ self.Q
t_dot_t = (self.T.T @ self.T).diagonal()
self.C = np.diag((self.T.T @ self.U).diagonal() / t_dot_t)
self.P = (Xc.T @ self.T) / t_dot_t
self.B = self.W @ linalg.inv(self.P.T @ self.W) @ self.C @ self.Q.T
def prediction(self, Z):
"""Predict the output resulting from a given input
Args:
Z (ndarray of floats): The input on which to make the
prediction. Must either be a one dimensional array of the
same length as the number of calibration X variables, or a
two dimensional array with the same number of columns as
the calibration X data and one row for each input row.
Returns:
Y (ndarray of floats) : The predicted output - either a one
dimensional array of the same length as the number of
calibration Y variables or a two dimensional array with the
same number of columns as the calibration Y data and one row
for each input row.
"""
if len(Z.shape) == 1:
if Z.shape[0] != self.X_variables:
raise ParameterError('Data provided does not have the same '
'number of variables as the original X '
'data')
return self.Y_offset + (Z - self.X_offset).T @ self.B
else:
if Z.shape[1] != self.X_variables:
raise ParameterError('Data provided does not have the same '
'number of variables as the original X '
'data')
result = np.empty((Z.shape[0], self.Y_variables))
for i in range(0, Z.shape[0]):
result[i, :] = self.Y_offset + \
(Z[i, :] - self.X_offset).T @ self.B
return result
def prediction_iterative(self, Z):
"""Predict the output resulting from a given input, iteratively
This produces the same output as the one-step version ``prediction``
but works by applying each loading in turn to extract the latent
variables corresponding to the input.
Args:
Z (ndarray of floats): The input on which to make the
prediction. Must either be a one dimensional array of the
same length as the number of calibration X variables, or a
two dimensional array with the same number of columns as
the calibration X data and one row for each input row.
Returns:
Y (ndarray of floats) : The predicted output - either a one
dimensional array of the same length as the number of
calibration Y variables or a two dimensional array with the
same number of columns as the calibration Y data and one row
for each input row.
"""
if len(Z.shape) == 1:
if Z.shape[0] != self.X_variables:
raise ParameterError('Data provided does not have the same '
'number of variables as the original X '
'data')
x_j = Z - self.X_offset
t = np.empty((self.components))
for j in range(0, self.components):
t[j] = x_j @ self.W[:, j]
x_j = x_j - t[j] * self.P[:, j]
result = self.Y_offset + t @ self.C @ self.Q.T
return result
else:
if Z.shape[1] != self.X_variables:
raise ParameterError('Data provided does not have the same '
'number of variables as the original X '
'data')
result = np.empty((Z.shape[0], self.Y_variables))
t = np.empty((self.components))
for k in range(0, Z.shape[0]):
x_j = Z[k, :] - self.X_offset
for j in range(0, self.components):
t[j] = x_j @ self.W[:, j]
x_j = x_j - t[j] * self.P[:, j]
result[k, :] = self.Y_offset + t @ self.C @ self.Q.T
return result
| {
"repo_name": "jhumphry/regressions",
"path": "regressions/pls_sb.py",
"copies": "1",
"size": "5826",
"license": "isc",
"hash": -4710234365048718000,
"line_mean": 39.4583333333,
"line_max": 77,
"alpha_frac": 0.5449708205,
"autogenerated": false,
"ratio": 4.212581344902386,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5257552165402386,
"avg_score": null,
"num_lines": null
} |
"""A module which implements the time frequency estimation.
Authors : Alexandre Gramfort, alexandre.gramfort@telecom-paristech.fr
Hari Bharadwaj <hari@nmr.mgh.harvard.edu>
License : BSD 3-clause
Morlet code inspired by Matlab code from Sheraz Khan & Brainstorm & SPM
"""
import warnings
from math import sqrt
from copy import deepcopy
import numpy as np
from scipy import linalg
from scipy.fftpack import fftn, ifftn
from ..fixes import partial
from ..baseline import rescale
from ..parallel import parallel_func
from ..utils import logger, verbose, requires_h5py
from ..channels.channels import ContainsMixin, PickDropChannelsMixin
from ..io.pick import pick_info, pick_types
from ..utils import check_fname
from .multitaper import dpss_windows
from .._hdf5 import write_hdf5, read_hdf5, _check_simplify_h5_info
def morlet(sfreq, freqs, n_cycles=7, sigma=None, zero_mean=False, Fs=None):
"""Compute Wavelets for the given frequency range
Parameters
----------
sfreq : float
Sampling Frequency
freqs : array
frequency range of interest (1 x Frequencies)
n_cycles: float | array of float
Number of cycles. Fixed number or one per frequency.
sigma : float, (optional)
It controls the width of the wavelet ie its temporal
resolution. If sigma is None the temporal resolution
is adapted with the frequency like for all wavelet transform.
The higher the frequency the shorter is the wavelet.
If sigma is fixed the temporal resolution is fixed
like for the short time Fourier transform and the number
of oscillations increases with the frequency.
zero_mean : bool
Make sure the wavelet is zero mean
Returns
-------
Ws : list of array
Wavelets time series
"""
Ws = list()
n_cycles = np.atleast_1d(n_cycles)
# deprecate Fs
if Fs is not None:
sfreq = Fs
warnings.warn("`Fs` is deprecated and will be removed in v0.10. "
"Use `sfreq` instead", DeprecationWarning)
if (n_cycles.size != 1) and (n_cycles.size != len(freqs)):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
# fixed or scale-dependent window
if sigma is None:
sigma_t = this_n_cycles / (2.0 * np.pi * f)
else:
sigma_t = this_n_cycles / (2.0 * np.pi * sigma)
# this scaling factor is proportional to (Tallon-Baudry 98):
# (sigma_t*sqrt(pi))^(-1/2);
t = np.arange(0., 5. * sigma_t, 1.0 / sfreq)
t = np.r_[-t[::-1], t[1:]]
oscillation = np.exp(2.0 * 1j * np.pi * f * t)
gaussian_enveloppe = np.exp(-t ** 2 / (2.0 * sigma_t ** 2))
if zero_mean: # to make it zero mean
real_offset = np.exp(- 2 * (np.pi * f * sigma_t) ** 2)
oscillation -= real_offset
W = oscillation * gaussian_enveloppe
W /= sqrt(0.5) * linalg.norm(W.ravel())
Ws.append(W)
return Ws
def _dpss_wavelet(sfreq, freqs, n_cycles=7, time_bandwidth=4.0,
zero_mean=False):
"""Compute Wavelets for the given frequency range
Parameters
----------
sfreq : float
Sampling Frequency.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
Defaults to 7.
time_bandwidth : float, (optional)
Time x Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1).
Default is 4.0, giving 3 good tapers.
Returns
-------
Ws : list of array
Wavelets time series
"""
Ws = list()
if time_bandwidth < 2.0:
raise ValueError("time_bandwidth should be >= 2.0 for good tapers")
n_taps = int(np.floor(time_bandwidth - 1))
n_cycles = np.atleast_1d(n_cycles)
if n_cycles.size != 1 and n_cycles.size != len(freqs):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for m in range(n_taps):
Wm = list()
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
t_win = this_n_cycles / float(f)
t = np.arange(0., t_win, 1.0 / sfreq)
# Making sure wavelets are centered before tapering
oscillation = np.exp(2.0 * 1j * np.pi * f * (t - t_win / 2.))
# Get dpss tapers
tapers, conc = dpss_windows(t.shape[0], time_bandwidth / 2.,
n_taps)
Wk = oscillation * tapers[m]
if zero_mean: # to make it zero mean
real_offset = Wk.mean()
Wk -= real_offset
Wk /= sqrt(0.5) * linalg.norm(Wk.ravel())
Wm.append(Wk)
Ws.append(Wm)
return Ws
def _centered(arr, newsize):
"""Aux Function to center data"""
# Return the center newsize portion of the array.
newsize = np.asarray(newsize)
currsize = np.array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _cwt_fft(X, Ws, mode="same"):
"""Compute cwt with fft based convolutions
Return a generator over signals.
"""
X = np.asarray(X)
# Precompute wavelets for given frequency range to save time
n_signals, n_times = X.shape
n_freqs = len(Ws)
Ws_max_size = max(W.size for W in Ws)
size = n_times + Ws_max_size - 1
# Always use 2**n-sized FFT
fsize = 2 ** int(np.ceil(np.log2(size)))
# precompute FFTs of Ws
fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128)
for i, W in enumerate(Ws):
if len(W) > n_times:
raise ValueError('Wavelet is too long for such a short signal. '
'Reduce the number of cycles.')
fft_Ws[i] = fftn(W, [fsize])
for k, x in enumerate(X):
if mode == "full":
tfr = np.zeros((n_freqs, fsize), dtype=np.complex128)
elif mode == "same" or mode == "valid":
tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)
fft_x = fftn(x, [fsize])
for i, W in enumerate(Ws):
ret = ifftn(fft_x * fft_Ws[i])[:n_times + W.size - 1]
if mode == "valid":
sz = abs(W.size - n_times) + 1
offset = (n_times - sz) / 2
tfr[i, offset:(offset + sz)] = _centered(ret, sz)
else:
tfr[i, :] = _centered(ret, n_times)
yield tfr
def _cwt_convolve(X, Ws, mode='same'):
"""Compute time freq decomposition with temporal convolutions
Return a generator over signals.
"""
X = np.asarray(X)
n_signals, n_times = X.shape
n_freqs = len(Ws)
# Compute convolutions
for x in X:
tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)
for i, W in enumerate(Ws):
ret = np.convolve(x, W, mode=mode)
if len(W) > len(x):
raise ValueError('Wavelet is too long for such a short '
'signal. Reduce the number of cycles.')
if mode == "valid":
sz = abs(W.size - n_times) + 1
offset = (n_times - sz) / 2
tfr[i, offset:(offset + sz)] = ret
else:
tfr[i] = ret
yield tfr
def cwt_morlet(X, sfreq, freqs, use_fft=True, n_cycles=7.0, zero_mean=False,
Fs=None):
"""Compute time freq decomposition with Morlet wavelets
Parameters
----------
X : array of shape [n_signals, n_times]
signals (one per line)
sfreq : float
sampling Frequency
freqs : array
Array of frequencies of interest
use_fft : bool
Compute convolution with FFT or temoral convolution.
n_cycles: float | array of float
Number of cycles. Fixed number or one per frequency.
zero_mean : bool
Make sure the wavelets are zero mean.
Returns
-------
tfr : 3D array
Time Frequency Decompositions (n_signals x n_frequencies x n_times)
"""
# deprecate Fs
if Fs is not None:
sfreq = Fs
warnings.warn("`Fs` is deprecated and will be removed in v0.10. "
"Use `sfreq` instead", DeprecationWarning)
mode = 'same'
# mode = "valid"
n_signals, n_times = X.shape
n_frequencies = len(freqs)
# Precompute wavelets for given frequency range to save time
Ws = morlet(sfreq, freqs, n_cycles=n_cycles, zero_mean=zero_mean)
if use_fft:
coefs = _cwt_fft(X, Ws, mode)
else:
coefs = _cwt_convolve(X, Ws, mode)
tfrs = np.empty((n_signals, n_frequencies, n_times), dtype=np.complex)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr
return tfrs
def cwt(X, Ws, use_fft=True, mode='same', decim=1):
"""Compute time freq decomposition with continuous wavelet transform
Parameters
----------
X : array of shape [n_signals, n_times]
signals (one per line)
Ws : list of array
Wavelets time series
use_fft : bool
Use FFT for convolutions
mode : 'same' | 'valid' | 'full'
Convention for convolution
decim : int
Temporal decimation factor
Returns
-------
tfr : 3D array
Time Frequency Decompositions (n_signals x n_frequencies x n_times)
"""
n_signals, n_times = X[:, ::decim].shape
n_frequencies = len(Ws)
if use_fft:
coefs = _cwt_fft(X, Ws, mode)
else:
coefs = _cwt_convolve(X, Ws, mode)
tfrs = np.empty((n_signals, n_frequencies, n_times), dtype=np.complex)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr[..., ::decim]
return tfrs
def _time_frequency(X, Ws, use_fft):
"""Aux of time_frequency for parallel computing over channels
"""
n_epochs, n_times = X.shape
n_frequencies = len(Ws)
psd = np.zeros((n_frequencies, n_times)) # PSD
plf = np.zeros((n_frequencies, n_times), dtype=np.complex) # phase lock
mode = 'same'
if use_fft:
tfrs = _cwt_fft(X, Ws, mode)
else:
tfrs = _cwt_convolve(X, Ws, mode)
for tfr in tfrs:
tfr_abs = np.abs(tfr)
psd += tfr_abs ** 2
plf += tfr / tfr_abs
return psd, plf
@verbose
def single_trial_power(data, sfreq, frequencies, use_fft=True, n_cycles=7,
baseline=None, baseline_mode='ratio', times=None,
decim=1, n_jobs=1, zero_mean=False, Fs=None,
verbose=None):
"""Compute time-frequency power on single epochs
Parameters
----------
data : array of shape [n_epochs, n_channels, n_times]
The epochs
sfreq : float
Sampling rate
frequencies : array-like
The frequencies
use_fft : bool
Use the FFT for convolutions or not.
n_cycles : float | array of float
Number of cycles in the Morlet wavelet. Fixed number
or one per frequency.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
baseline_mode : None | 'ratio' | 'zscore'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
times : array
Required to define baseline
decim : int
Temporal decimation factor
n_jobs : int
The number of epochs to process at the same time
zero_mean : bool
Make sure the wavelets are zero mean.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : 4D array
Power estimate (Epochs x Channels x Frequencies x Timepoints).
"""
# deprecate Fs
if Fs is not None:
sfreq = Fs
warnings.warn("`Fs` is deprecated and will be removed in v0.10. "
"Use `sfreq` instead", DeprecationWarning)
mode = 'same'
n_frequencies = len(frequencies)
n_epochs, n_channels, n_times = data[:, :, ::decim].shape
# Precompute wavelets for given frequency range to save time
Ws = morlet(sfreq, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
parallel, my_cwt, _ = parallel_func(cwt, n_jobs)
logger.info("Computing time-frequency power on single epochs...")
power = np.empty((n_epochs, n_channels, n_frequencies, n_times),
dtype=np.float)
# Package arguments for `cwt` here to minimize omissions where only one of
# the two calls below is updated with new function arguments.
cwt_kw = dict(Ws=Ws, use_fft=use_fft, mode=mode, decim=decim)
if n_jobs == 1:
for k, e in enumerate(data):
power[k] = np.abs(cwt(e, **cwt_kw)) ** 2
else:
# Precompute tf decompositions in parallel
tfrs = parallel(my_cwt(e, **cwt_kw) for e in data)
for k, tfr in enumerate(tfrs):
power[k] = np.abs(tfr) ** 2
# Run baseline correction. Be sure to decimate the times array as well if
# needed.
if times is not None:
times = times[::decim]
power = rescale(power, times, baseline, baseline_mode, copy=False)
return power
def _induced_power(data, sfreq, frequencies, use_fft=True, n_cycles=7,
decim=1, n_jobs=1, zero_mean=False, Fs=None):
"""Compute time induced power and inter-trial phase-locking factor
The time frequency decomposition is done with Morlet wavelets
Parameters
----------
data : array
3D array of shape [n_epochs, n_channels, n_times]
sfreq : float
sampling Frequency
frequencies : array
Array of frequencies of interest
use_fft : bool
Compute transform with fft based convolutions or temporal
convolutions.
n_cycles : float | array of float
Number of cycles. Fixed number or one per frequency.
decim: int
Temporal decimation factor
n_jobs : int
The number of CPUs used in parallel. All CPUs are used in -1.
Requires joblib package.
zero_mean : bool
Make sure the wavelets are zero mean.
Returns
-------
power : 2D array
Induced power (Channels x Frequencies x Timepoints).
Squared amplitude of time-frequency coefficients.
phase_lock : 2D array
Phase locking factor in [0, 1] (Channels x Frequencies x Timepoints)
"""
n_frequencies = len(frequencies)
n_epochs, n_channels, n_times = data[:, :, ::decim].shape
# Precompute wavelets for given frequency range to save time
Ws = morlet(sfreq, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
if n_jobs == 1:
psd = np.empty((n_channels, n_frequencies, n_times))
plf = np.empty((n_channels, n_frequencies, n_times), dtype=np.complex)
for c in range(n_channels):
X = data[:, c, :]
this_psd, this_plf = _time_frequency(X, Ws, use_fft)
psd[c], plf[c] = this_psd[:, ::decim], this_plf[:, ::decim]
else:
parallel, my_time_frequency, _ = parallel_func(_time_frequency, n_jobs)
psd_plf = parallel(my_time_frequency(np.squeeze(data[:, c, :]),
Ws, use_fft)
for c in range(n_channels))
psd = np.zeros((n_channels, n_frequencies, n_times))
plf = np.zeros((n_channels, n_frequencies, n_times), dtype=np.complex)
for c, (psd_c, plf_c) in enumerate(psd_plf):
psd[c, :, :], plf[c, :, :] = psd_c[:, ::decim], plf_c[:, ::decim]
psd /= n_epochs
plf = np.abs(plf) / n_epochs
return psd, plf
def _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB):
"""Aux Function to prepare tfr computation"""
from ..viz.utils import _setup_vmin_vmax
if mode is not None and baseline is not None:
logger.info("Applying baseline correction '%s' during %s" %
(mode, baseline))
data = rescale(data.copy(), times, baseline, mode)
# crop time
itmin, itmax = None, None
if tmin is not None:
itmin = np.where(times >= tmin)[0][0]
if tmax is not None:
itmax = np.where(times <= tmax)[0][-1]
times = times[itmin:itmax]
# crop freqs
ifmin, ifmax = None, None
if fmin is not None:
ifmin = np.where(freqs >= fmin)[0][0]
if fmax is not None:
ifmax = np.where(freqs <= fmax)[0][-1]
freqs = freqs[ifmin:ifmax]
# crop data
data = data[:, ifmin:ifmax, itmin:itmax]
times *= 1e3
if dB:
data = 20 * np.log10(data)
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
return data, times, freqs, vmin, vmax
# XXX : todo IO of TFRs
class AverageTFR(ContainsMixin, PickDropChannelsMixin):
"""Container for Time-Frequency data
Can for example store induced power at sensor level or intertrial
coherence.
Parameters
----------
info : Info
The measurement info.
data : ndarray, shape (n_channels, n_freqs, n_times)
The data.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
nave : int
The number of averaged TFRs.
comment : str | None
Comment on the data, e.g., the experimental condition.
Defaults to None.
method : str | None
Comment on the method used to compute the data, e.g., morlet wavelet.
Defaults to None.
Attributes
----------
ch_names : list
The names of the channels.
"""
@verbose
def __init__(self, info, data, times, freqs, nave, comment=None,
method=None, verbose=None):
self.info = info
if data.ndim != 3:
raise ValueError('data should be 3d. Got %d.' % data.ndim)
n_channels, n_freqs, n_times = data.shape
if n_channels != len(info['chs']):
raise ValueError("Number of channels and data size don't match"
" (%d != %d)." % (n_channels, len(info['chs'])))
if n_freqs != len(freqs):
raise ValueError("Number of frequencies and data size don't match"
" (%d != %d)." % (n_freqs, len(freqs)))
if n_times != len(times):
raise ValueError("Number of times and data size don't match"
" (%d != %d)." % (n_times, len(times)))
self.data = data
self.times = times
self.freqs = freqs
self.nave = nave
self.comment = comment
self.method = method
@property
def ch_names(self):
return self.info['ch_names']
@verbose
def plot(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
cmap='RdBu_r', dB=False, colorbar=True, show=True,
title=None, verbose=None):
"""Plot TFRs in a topography with images
Parameters
----------
picks : array-like of int | None
The indices of the channels to plot.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
layout : Layout | None
Layout instance specifying sensor positions. If possible, the
correct layout is inferred from the data.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas
show : bool
Call pyplot.show() at the end.
title : str | None
String for title. Defaults to None (blank/no title).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
from ..viz.topo import _imshow_tfr
import matplotlib.pyplot as plt
times, freqs = self.times.copy(), self.freqs.copy()
data = self.data[picks]
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB)
tmin, tmax = times[0], times[-1]
for k, p in zip(range(len(data)), picks):
plt.figure()
_imshow_tfr(plt, 0, tmin, tmax, vmin, vmax, ylim=None,
tfr=data[k: k + 1], freq=freqs, x_label='Time (ms)',
y_label='Frequency (Hz)', colorbar=colorbar,
picker=False, cmap=cmap, title=title)
if show:
plt.show()
return
def plot_topo(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
layout=None, cmap='RdBu_r', title=None, dB=False,
colorbar=True, layout_scale=0.945, show=True,
border='none', fig_facecolor='k', font_color='w'):
"""Plot TFRs in a topography with images
Parameters
----------
picks : array-like of int | None
The indices of the channels to plot. If None all available
channels are displayed.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
layout : Layout | None
Layout instance specifying sensor positions. If possible, the
correct layout is inferred from the data.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
title : str
Title of the figure.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
show : bool
Call pyplot.show() at the end.
border : str
matplotlib borders style to be used for each sensor plot.
fig_facecolor : str | obj
The figure face color. Defaults to black.
font_color: str | obj
The color of tick labels in the colorbar. Defaults to white.
"""
from ..viz.topo import _imshow_tfr, _plot_topo
times = self.times.copy()
freqs = self.freqs
data = self.data
info = self.info
if picks is not None:
data = data[picks]
info = pick_info(info, picks)
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax,
mode, baseline, vmin, vmax, dB)
if layout is None:
from mne import find_layout
layout = find_layout(self.info)
imshow = partial(_imshow_tfr, tfr=data, freq=freqs, cmap=cmap)
fig = _plot_topo(info=info, times=times,
show_func=imshow, layout=layout,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title, border=border,
x_label='Time (ms)', y_label='Frequency (Hz)',
fig_facecolor=fig_facecolor,
font_color=font_color)
if show:
import matplotlib.pyplot as plt
plt.show()
return fig
def _check_compat(self, tfr):
"""checks that self and tfr have the same time-frequency ranges"""
assert np.all(tfr.times == self.times)
assert np.all(tfr.freqs == self.freqs)
def __add__(self, tfr):
self._check_compat(tfr)
out = self.copy()
out.data += tfr.data
return out
def __iadd__(self, tfr):
self._check_compat(tfr)
self.data += tfr.data
return self
def __sub__(self, tfr):
self._check_compat(tfr)
out = self.copy()
out.data -= tfr.data
return out
def __isub__(self, tfr):
self._check_compat(tfr)
self.data -= tfr.data
return self
def copy(self):
"""Return a copy of the instance."""
return deepcopy(self)
def __repr__(self):
s = "time : [%f, %f]" % (self.times[0], self.times[-1])
s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1])
s += ", nave : %d" % self.nave
s += ', channels : %d' % self.data.shape[0]
return "<AverageTFR | %s>" % s
def apply_baseline(self, baseline, mode='mean'):
"""Baseline correct the data
Parameters
----------
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
"""
self.data = rescale(self.data, self.times, baseline, mode, copy=False)
def plot_topomap(self, tmin=None, tmax=None, fmin=None, fmax=None,
ch_type='mag', baseline=None, mode='mean',
layout=None, vmin=None, vmax=None, cmap='RdBu_r',
sensors=True, colorbar=True, unit=None, res=64, size=2,
format='%1.1e', show_names=False, title=None,
axes=None, show=True):
"""Plot topographic maps of time-frequency intervals of TFR data
Parameters
----------
tfr : AvereageTFR
The AvereageTFR object.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg'
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file was
found, the layout is automatically generated from the sensor
locations.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap
Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
'Reds'.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle
will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
unit : str | None
The unit of the channel type used for colorbar labels.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
format : str
String format for colorbar values.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
axes : instance of Axes | None
The axes to plot to. If None the axes is defined automatically.
show : bool
Call pyplot.show() at the end.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz import plot_tfr_topomap
return plot_tfr_topomap(self, tmin=tmin, tmax=tmax, fmin=fmin,
fmax=fmax, ch_type=ch_type, baseline=baseline,
mode=mode, layout=layout, vmin=vmin, vmax=vmax,
cmap=cmap, sensors=sensors, colorbar=colorbar,
unit=unit, res=res, size=size, format=format,
show_names=show_names, title=title, axes=axes,
show=show)
@requires_h5py
def save(self, fname, overwrite=False):
"""Save TFR object to hdf5 file
Parameters
----------
fname : str
The file name, which should end with -tfr.h5 .
overwrite : bool
If True, overwrite file (if it exists). Defaults to false
"""
write_tfrs(fname, self, overwrite=overwrite)
def _prepare_write_tfr(tfr, condition):
"""Aux function"""
info = _check_simplify_h5_info(tfr.info)
return (condition, dict(times=tfr.times, freqs=tfr.freqs,
data=tfr.data, info=info, nave=tfr.nave,
comment=tfr.comment, method=tfr.method))
@requires_h5py
def write_tfrs(fname, tfr, overwrite=False):
"""Write a TFR dataset to hdf5.
Parameters
----------
fname : string
The file name, which should end with -tfr.h5
tfr : AverageTFR instance, or list of AverageTFR instances
The TFR dataset, or list of TFR datasets, to save in one file.
Note. If .comment is not None, a name will be generated on the fly,
based on the order in which the TFR objects are passed
overwrite : bool
If True, overwrite file (if it exists). Defaults to False.
"""
out = []
if not isinstance(tfr, (list, tuple)):
tfr = [tfr]
for ii, tfr_ in enumerate(tfr):
comment = ii if tfr_.comment is None else tfr_.comment
out.append(_prepare_write_tfr(tfr_, condition=comment))
write_hdf5(fname, out, overwrite=overwrite)
@requires_h5py
def read_tfrs(fname, condition=None):
"""
Read TFR datasets from hdf5 file.
Parameters
----------
fname : string
The file name, which should end with -tfr.h5 .
condition : int or str | list of int or str | None
The condition to load. If None, all conditions will be returned.
Defaults to None.
Returns
-------
tfrs : list of instances of AverageTFR | instance of AverageTFR
Depending on `condition` either the TFR object or a list of multiple
TFR objects.
"""
check_fname(fname, 'tfr', ('-tfr.h5',))
logger.info('Reading %s ...' % fname)
tfr_data = read_hdf5(fname)
if condition is not None:
tfr_dict = dict(tfr_data)
if condition not in tfr_dict:
keys = ['%s' % k for k in tfr_dict]
raise ValueError('Cannot find condition ("{0}") in this file. '
'I can give you "{1}""'
.format(condition, " or ".join(keys)))
out = AverageTFR(**tfr_dict[condition])
else:
out = [AverageTFR(**d) for d in list(zip(*tfr_data))[1]]
return out
def tfr_morlet(epochs, freqs, n_cycles, use_fft=False,
return_itc=True, decim=1, n_jobs=1):
"""Compute Time-Frequency Representation (TFR) using Morlet wavelets
Parameters
----------
epochs : Epochs
The epochs.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
use_fft : bool
The fft based convolution or not.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
decim : int
The decimation factor on the time axis. To reduce memory usage.
n_jobs : int
The number of jobs to run in parallel.
Returns
-------
power : AverageTFR
The averaged power.
itc : AverageTFR
The intertrial coherence (ITC). Only returned if return_itc
is True.
"""
data = epochs.get_data()
picks = pick_types(epochs.info, meg=True, eeg=True)
info = pick_info(epochs.info, picks)
data = data[:, picks, :]
power, itc = _induced_power(data, sfreq=info['sfreq'], frequencies=freqs,
n_cycles=n_cycles, n_jobs=n_jobs,
use_fft=use_fft, decim=decim,
zero_mean=True)
times = epochs.times[::decim].copy()
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave, method='morlet-power')
if return_itc:
out = (out, AverageTFR(info, itc, times, freqs, nave,
method='morlet-itc'))
return out
@verbose
def _induced_power_mtm(data, sfreq, frequencies, time_bandwidth=4.0,
use_fft=True, n_cycles=7, decim=1, n_jobs=1,
zero_mean=True, verbose=None):
"""Compute time induced power and inter-trial phase-locking factor
The time frequency decomposition is done with DPSS wavelets
Parameters
----------
data : np.ndarray, shape (n_epochs, n_channels, n_times)
The input data.
sfreq : float
sampling Frequency
frequencies : np.ndarray, shape (n_frequencies,)
Array of frequencies of interest
time_bandwidth : float
Time x (Full) Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1). Default is 4.0 (3 tapers).
use_fft : bool
Compute transform with fft based convolutions or temporal
convolutions. Defaults to True.
n_cycles : float | np.ndarray shape (n_frequencies,)
Number of cycles. Fixed number or one per frequency. Defaults to 7.
decim: int
Temporal decimation factor. Defaults to 1.
n_jobs : int
The number of CPUs used in parallel. All CPUs are used in -1.
Requires joblib package. Defaults to 1.
zero_mean : bool
Make sure the wavelets are zero mean. Defaults to True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : np.ndarray, shape (n_channels, n_frequencies, n_times)
Induced power. Squared amplitude of time-frequency coefficients.
itc : np.ndarray, shape (n_channels, n_frequencies, n_times)
Phase locking value.
"""
n_epochs, n_channels, n_times = data[:, :, ::decim].shape
logger.info('Data is %d trials and %d channels', n_epochs, n_channels)
n_frequencies = len(frequencies)
logger.info('Multitaper time-frequency analysis for %d frequencies',
n_frequencies)
# Precompute wavelets for given frequency range to save time
Ws = _dpss_wavelet(sfreq, frequencies, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, zero_mean=zero_mean)
n_taps = len(Ws)
logger.info('Using %d tapers', n_taps)
n_times_wavelets = Ws[0][0].shape[0]
if n_times <= n_times_wavelets:
warnings.warn("Time windows are as long or longer than the epoch. "
"Consider reducing n_cycles.")
psd, itc = 0., 0.
for m in range(n_taps): # n_taps is typically small, better to save RAM
if n_jobs == 1:
psd_m = np.empty((n_channels, n_frequencies, n_times))
itc_m = np.empty((n_channels, n_frequencies, n_times),
dtype=np.complex)
for c in range(n_channels):
logger.debug('Analysing channel #%d', c)
X = data[:, c, :]
this_psd, this_itc = _time_frequency(X, Ws[m], use_fft)
psd_m[c], itc_m[c] = this_psd[:, ::decim], this_itc[:, ::decim]
else:
parallel, my_time_frequency, _ = parallel_func(_time_frequency,
n_jobs)
psd_itc = parallel(my_time_frequency(np.squeeze(data[:, c, :]),
Ws[m], use_fft)
for c in range(n_channels))
psd_m = np.zeros((n_channels, n_frequencies, n_times))
itc_m = np.zeros((n_channels, n_frequencies, n_times),
dtype=np.complex)
for c, (psd_c, itc_c) in enumerate(psd_itc):
psd_m[c, :, :] = psd_c[:, ::decim]
itc_m[c, :, :] = itc_c[:, ::decim]
psd_m /= n_epochs
itc_m = np.abs(itc_m) / n_epochs
psd += psd_m
itc += itc_m
psd /= n_taps
itc /= n_taps
return psd, itc
def tfr_multitaper(epochs, freqs, n_cycles, time_bandwidth=4.0, use_fft=True,
return_itc=True, decim=1, n_jobs=1):
"""Compute Time-Frequency Representation (TFR) using DPSS wavelets
Parameters
----------
epochs : Epochs
The epochs.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
The time-window length is thus T = n_cycles / freq.
time_bandwidth : float, (optional)
Time x (Full) Bandwidth product. Should be >= 2.0.
Choose this along with n_cycles to get desired frequency resolution.
The number of good tapers (least leakage from far away frequencies)
is chosen automatically based on this to floor(time_bandwidth - 1).
Default is 4.0 (3 good tapers).
E.g., With freq = 20 Hz and n_cycles = 10, we get time = 0.5 s.
If time_bandwidth = 4., then frequency smoothing is (4 / time) = 8 Hz.
use_fft : bool
The fft based convolution or not.
Defaults to True.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
Defaults to True.
decim : int
The decimation factor on the time axis. To reduce memory usage.
Note than this is brute force decimation, no anti-aliasing is done.
Defaults to 1.
n_jobs : int
The number of jobs to run in parallel. Defaults to 1.
Returns
-------
power : AverageTFR
The averaged power.
itc : AverageTFR
The intertrial coherence (ITC). Only returned if return_itc
is True.
"""
data = epochs.get_data()
picks = pick_types(epochs.info, meg=True, eeg=True)
info = pick_info(epochs.info, picks)
data = data[:, picks, :]
power, itc = _induced_power_mtm(data, sfreq=info['sfreq'],
frequencies=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth,
use_fft=use_fft, decim=decim,
n_jobs=n_jobs, zero_mean=True,
verbose='INFO')
times = epochs.times[::decim].copy()
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave, method='mutlitaper-power')
if return_itc:
out = (out, AverageTFR(info, itc, times, freqs, nave,
method='mutlitaper-itc'))
return out
| {
"repo_name": "effigies/mne-python",
"path": "mne/time_frequency/tfr.py",
"copies": "1",
"size": "45989",
"license": "bsd-3-clause",
"hash": 6825717880582897000,
"line_mean": 36.665028665,
"line_max": 80,
"alpha_frac": 0.5754202092,
"autogenerated": false,
"ratio": 3.807666832256996,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9883036485850885,
"avg_score": 0.000010111121222232332,
"num_lines": 1221
} |
"""A module which implements the time frequency estimation.
Authors : Hari Bharadwaj <hari@nmr.mgh.harvard.edu>
License : BSD 3-clause
Multitaper wavelet method
"""
import warnings
from math import sqrt
import numpy as np
from scipy import linalg
from scipy.fftpack import fftn, ifftn
from .utils import logger, verbose
from .dpss import dpss_windows
def _dpss_wavelet(sfreq, freqs, n_cycles=7, time_bandwidth=4.0,
zero_mean=False):
"""Compute Wavelets for the given frequency range
Parameters
----------
sfreq : float
Sampling Frequency.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
Defaults to 7.
time_bandwidth : float, (optional)
Time x Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1).
Default is 4.0, giving 3 good tapers.
Returns
-------
Ws : list of array
Wavelets time series
"""
Ws = list()
if time_bandwidth < 2.0:
raise ValueError("time_bandwidth should be >= 2.0 for good tapers")
n_taps = int(np.floor(time_bandwidth - 1))
n_cycles = np.atleast_1d(n_cycles)
if n_cycles.size != 1 and n_cycles.size != len(freqs):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for m in range(n_taps):
Wm = list()
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
t_win = this_n_cycles / float(f)
t = np.arange(0., t_win, 1.0 / sfreq)
# Making sure wavelets are centered before tapering
oscillation = np.exp(2.0 * 1j * np.pi * f * (t - t_win / 2.))
# Get dpss tapers
tapers, conc = dpss_windows(t.shape[0], time_bandwidth / 2.,
n_taps)
Wk = oscillation * tapers[m]
if zero_mean: # to make it zero mean
real_offset = Wk.mean()
Wk -= real_offset
Wk /= sqrt(0.5) * linalg.norm(Wk.ravel())
Wm.append(Wk)
Ws.append(Wm)
return Ws
def _centered(arr, newsize):
"""Aux Function to center data"""
# Return the center newsize portion of the array.
newsize = np.asarray(newsize)
currsize = np.array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _cwt_fft(X, Ws, mode="same"):
"""Compute cwt with fft based convolutions
Return a generator over signals.
"""
X = np.asarray(X)
# Precompute wavelets for given frequency range to save time
n_signals, n_times = X.shape
n_freqs = len(Ws)
Ws_max_size = max(W.size for W in Ws)
size = n_times + Ws_max_size - 1
# Always use 2**n-sized FFT
fsize = 2 ** int(np.ceil(np.log2(size)))
# precompute FFTs of Ws
fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128)
for i, W in enumerate(Ws):
if len(W) > n_times:
raise ValueError('Wavelet is too long for such a short signal. '
'Reduce the number of cycles.')
fft_Ws[i] = fftn(W, [fsize])
for k, x in enumerate(X):
if mode == "full":
tfr = np.zeros((n_freqs, fsize), dtype=np.complex128)
elif mode == "same" or mode == "valid":
tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)
fft_x = fftn(x, [fsize])
for i, W in enumerate(Ws):
ret = ifftn(fft_x * fft_Ws[i])[:n_times + W.size - 1]
if mode == "valid":
sz = abs(W.size - n_times) + 1
offset = (n_times - sz) / 2
tfr[i, offset:(offset + sz)] = _centered(ret, sz)
else:
tfr[i, :] = _centered(ret, n_times)
yield tfr
def _cwt_convolve(X, Ws, mode='same'):
"""Compute time freq decomposition with temporal convolutions
Return a generator over signals.
"""
X = np.asarray(X)
n_signals, n_times = X.shape
n_freqs = len(Ws)
# Compute convolutions
for x in X:
tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)
for i, W in enumerate(Ws):
ret = np.convolve(x, W, mode=mode)
if len(W) > len(x):
raise ValueError('Wavelet is too long for such a short '
'signal. Reduce the number of cycles.')
if mode == "valid":
sz = abs(W.size - n_times) + 1
offset = (n_times - sz) / 2
tfr[i, offset:(offset + sz)] = ret
else:
tfr[i] = ret
yield tfr
def _time_frequency(X, Ws, use_fft, decim):
"""Aux of time_frequency for parallel computing over channels
"""
n_epochs, n_times = X.shape
n_times = n_times // decim + bool(n_times % decim)
n_frequencies = len(Ws)
psd = np.zeros((n_frequencies, n_times)) # PSD
plf = np.zeros((n_frequencies, n_times), np.complex) # phase lock
mode = 'same'
if use_fft:
tfrs = _cwt_fft(X, Ws, mode)
else:
tfrs = _cwt_convolve(X, Ws, mode)
for tfr in tfrs:
tfr = tfr[:, ::decim]
tfr_abs = np.abs(tfr)
psd += tfr_abs ** 2
plf += tfr / tfr_abs
psd /= n_epochs
plf = np.abs(plf) / n_epochs
return psd, plf
@verbose
def tfr_multitaper(data, sfreq, frequencies, time_bandwidth=4.0,
use_fft=True, n_cycles=7, decim=1,
zero_mean=True, verbose=None):
"""Compute time induced power and inter-trial coherence
The time frequency decomposition is done with DPSS wavelets
Parameters
----------
data : np.ndarray, shape (n_epochs, n_channels, n_times)
The input data.
sfreq : float
sampling Frequency
frequencies : np.ndarray, shape (n_frequencies,)
Array of frequencies of interest
time_bandwidth : float
Time x (Full) Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1). Default is 4.0 (3 tapers).
use_fft : bool
Compute transform with fft based convolutions or temporal
convolutions. Defaults to True.
n_cycles : float | np.ndarray shape (n_frequencies,)
Number of cycles. Fixed number or one per frequency. Defaults to 7.
decim: int
Temporal decimation factor. Defaults to 1.
zero_mean : bool
Make sure the wavelets are zero mean. Defaults to True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : np.ndarray, shape (n_channels, n_frequencies, n_times)
Induced power. Squared amplitude of time-frequency coefficients.
itc : np.ndarray, shape (n_channels, n_frequencies, n_times)
Phase locking value.
times : np.ndarray, shape (n_times, )
Time vector for convenience based on n_times, sfreq and decim
"""
n_epochs, n_channels, n_times = data[:, :, ::decim].shape
logger.info('Data is %d trials and %d channels', n_epochs, n_channels)
n_frequencies = len(frequencies)
logger.info('Multitaper time-frequency analysis for %d frequencies',
n_frequencies)
# Precompute wavelets for given frequency range to save time
Ws = _dpss_wavelet(sfreq, frequencies, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, zero_mean=zero_mean)
n_taps = len(Ws)
logger.info('Using %d tapers', n_taps)
n_times_wavelets = Ws[0][0].shape[0]
if n_times <= n_times_wavelets:
warnings.warn("Time windows are as long or longer than the epoch. "
"Consider reducing n_cycles.")
psd = np.zeros((n_channels, n_frequencies, n_times))
itc = np.zeros((n_channels, n_frequencies, n_times))
for m in range(n_taps):
psd_itc = (_time_frequency(data[:, c, :], Ws[m], use_fft, decim)
for c in range(n_channels))
for c, (psd_c, itc_c) in enumerate(psd_itc):
psd[c, :, :] += psd_c
itc[c, :, :] += itc_c
psd /= n_taps
itc /= n_taps
times = np.arange(n_times) / np.float(sfreq)
return psd, itc, times
@verbose
def rescale(data, times, baseline, mode, verbose=None, copy=True):
"""Rescale i.e., baseline correcting data
Parameters
----------
data : array
It can be of any shape. The only constraint is that the last
dimension should be time.
times : 1D array
Time instants is seconds.
baseline : tuple or list of length 2, or None
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used. If None, no correction is applied.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent' | 'zlogratio'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
logratio is the same an mean but in log-scale, zlogratio is the
same as zscore but data is rendered in log-scale first.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
copy : bool
Operate on a copy of the data, or in place.
Returns
-------
data_scaled : array
Array of same shape as data after rescaling.
"""
if copy:
data = data.copy()
valid_modes = ('logratio', 'ratio', 'zscore', 'mean', 'percent',
'zlogratio')
if mode not in valid_modes:
raise Exception('mode should be any of : %s' % (valid_modes, ))
if baseline is not None:
logger.info("Applying baseline correction ... (mode: %s)" % mode)
bmin, bmax = baseline
if bmin is None:
imin = 0
else:
imin = int(np.where(times >= bmin)[0][0])
if bmax is None:
imax = len(times)
else:
imax = int(np.where(times <= bmax)[0][-1]) + 1
# avoid potential "empty slice" warning
if data.shape[-1] > 0:
mean = np.mean(data[..., imin:imax], axis=-1)[..., None]
else:
mean = 0 # otherwise we get an ugly nan
if mode == 'mean':
data -= mean
if mode == 'logratio':
data /= mean
data = np.log10(data) # a value of 1 means 10 times bigger
if mode == 'ratio':
data /= mean
elif mode == 'zscore':
std = np.std(data[..., imin:imax], axis=-1)[..., None]
data -= mean
data /= std
elif mode == 'percent':
data -= mean
data /= mean
elif mode == 'zlogratio':
data /= mean
data = np.log10(data)
std = np.std(data[..., imin:imax], axis=-1)[..., None]
data /= std
else:
logger.info("No baseline correction applied...")
return data
def plot_tfr(tfr, times, freqs, ch_idx=0, vmin=None, vmax=None,
x_label='Time (s)', y_label='Frequency (Hz)',
colorbar=True, cmap='RdBu_r', title=None):
""" Basic plotting function to show time-freq
Parameters
----------
tfr : np.ndarray, shape (n_channels, n_frequencies, n_times)
Time-frequency data from tfr_multitaper (power or itc)
times: np.ndarray, shape (n_times, )
Time array corresponding to tfr, also from tfr_multitaper
freqs : np.ndarray, shape (n_times, )
Frequency array over which tfr was calculated
ch_idx : integer, option, default: 0
Index of channel to plot
vmin : scalar, optional, default: tfr.min()
Minimum of colorbar
vmax : scalra, optional, default: tfr.max()
Maximum of colorbar
x_label : string, optional, default: 'Time (s)'
Label for x-axis (i.e., time axis)
y_label : string, optional, default: 'Frequency (Hz)'
Label for y-axis (i.e., frequency axis)
colorbar : boolean, optional, default: False
Whether to show colorbar
cmap : string, optional, default: 'RdBu_r'
matplotlib.colors.Colormap object name
title : string, optional, default: None
Title for the plot
Returns
-------
"""
if vmin is None:
vmin = tfr.min()
if vmax is None:
vmax = tfr.max()
import matplotlib.pyplot as plt
extent = (times[0], times[-1], freqs[0], freqs[-1])
plt.imshow(tfr[ch_idx], extent=extent, aspect="auto", origin="lower",
vmin=vmin, vmax=vmax, cmap=cmap)
if x_label is not None:
plt.xlabel(x_label)
if y_label is not None:
plt.ylabel(y_label)
if colorbar:
plt.colorbar()
if title:
plt.title(title)
| {
"repo_name": "lennyvarghese/ANLffr",
"path": "anlffr/tfr.py",
"copies": "2",
"size": "13583",
"license": "bsd-3-clause",
"hash": -142506642312031710,
"line_mean": 33.1281407035,
"line_max": 77,
"alpha_frac": 0.5743944637,
"autogenerated": false,
"ratio": 3.548328108672936,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 398
} |
"""A module which implements the time-frequency estimation.
Morlet code inspired by Matlab code from Sheraz Khan & Brainstorm & SPM
"""
# Authors : Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Hari Bharadwaj <hari@nmr.mgh.harvard.edu>
# Clement Moutard <clement.moutard@polytechnique.org>
# Jean-Remi King <jeanremi.king@gmail.com>
#
# License : BSD (3-clause)
from copy import deepcopy
from functools import partial
from math import sqrt
import numpy as np
from scipy import linalg
from .multitaper import dpss_windows
from ..baseline import rescale
from ..fixes import fft, ifft
from ..filter import next_fast_len
from ..parallel import parallel_func
from ..utils import (logger, verbose, _time_mask, _freq_mask, check_fname,
sizeof_fmt, GetEpochsMixin, _prepare_read_metadata,
fill_doc, _prepare_write_metadata, _check_event_id,
_gen_events, SizeMixin, _is_numeric, _check_option,
_validate_type)
from ..channels.channels import ContainsMixin, UpdateChannelsMixin
from ..channels.layout import _merge_ch_data, _pair_grad_sensors
from ..io.pick import (pick_info, _picks_to_idx, channel_type, _pick_inst,
_get_channel_types)
from ..io.meas_info import Info
from ..viz.utils import (figure_nobar, plt_show, _setup_cmap, warn,
_connection_line, _prepare_joint_axes,
_setup_vmin_vmax, _set_title_multiple_electrodes)
from ..externals.h5io import write_hdf5, read_hdf5
def morlet(sfreq, freqs, n_cycles=7.0, sigma=None, zero_mean=False):
"""Compute Morlet wavelets for the given frequency range.
Parameters
----------
sfreq : float
The sampling Frequency.
freqs : array
Frequency range of interest (1 x Frequencies).
n_cycles : float | array of float, default 7.0
Number of cycles. Fixed number or one per frequency.
sigma : float, default None
It controls the width of the wavelet ie its temporal
resolution. If sigma is None the temporal resolution
is adapted with the frequency like for all wavelet transform.
The higher the frequency the shorter is the wavelet.
If sigma is fixed the temporal resolution is fixed
like for the short time Fourier transform and the number
of oscillations increases with the frequency.
zero_mean : bool, default False
Make sure the wavelet has a mean of zero.
Returns
-------
Ws : list of array
The wavelets time series.
"""
Ws = list()
n_cycles = np.atleast_1d(n_cycles)
freqs = np.array(freqs)
if np.any(freqs <= 0):
raise ValueError("all frequencies in 'freqs' must be "
"greater than 0.")
if (n_cycles.size != 1) and (n_cycles.size != len(freqs)):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
# fixed or scale-dependent window
if sigma is None:
sigma_t = this_n_cycles / (2.0 * np.pi * f)
else:
sigma_t = this_n_cycles / (2.0 * np.pi * sigma)
# this scaling factor is proportional to (Tallon-Baudry 98):
# (sigma_t*sqrt(pi))^(-1/2);
t = np.arange(0., 5. * sigma_t, 1.0 / sfreq)
t = np.r_[-t[::-1], t[1:]]
oscillation = np.exp(2.0 * 1j * np.pi * f * t)
gaussian_enveloppe = np.exp(-t ** 2 / (2.0 * sigma_t ** 2))
if zero_mean: # to make it zero mean
real_offset = np.exp(- 2 * (np.pi * f * sigma_t) ** 2)
oscillation -= real_offset
W = oscillation * gaussian_enveloppe
W /= sqrt(0.5) * linalg.norm(W.ravel())
Ws.append(W)
return Ws
def _make_dpss(sfreq, freqs, n_cycles=7., time_bandwidth=4.0, zero_mean=False):
"""Compute DPSS tapers for the given frequency range.
Parameters
----------
sfreq : float
The sampling frequency.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,), default 7.
The number of cycles globally or for each frequency.
time_bandwidth : float, default 4.0
Time x Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1).
Default is 4.0, giving 3 good tapers.
zero_mean : bool | None, , default False
Make sure the wavelet has a mean of zero.
Returns
-------
Ws : list of array
The wavelets time series.
"""
Ws = list()
freqs = np.array(freqs)
if np.any(freqs <= 0):
raise ValueError("all frequencies in 'freqs' must be "
"greater than 0.")
if time_bandwidth < 2.0:
raise ValueError("time_bandwidth should be >= 2.0 for good tapers")
n_taps = int(np.floor(time_bandwidth - 1))
n_cycles = np.atleast_1d(n_cycles)
if n_cycles.size != 1 and n_cycles.size != len(freqs):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for m in range(n_taps):
Wm = list()
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
t_win = this_n_cycles / float(f)
t = np.arange(0., t_win, 1.0 / sfreq)
# Making sure wavelets are centered before tapering
oscillation = np.exp(2.0 * 1j * np.pi * f * (t - t_win / 2.))
# Get dpss tapers
tapers, conc = dpss_windows(t.shape[0], time_bandwidth / 2.,
n_taps)
Wk = oscillation * tapers[m]
if zero_mean: # to make it zero mean
real_offset = Wk.mean()
Wk -= real_offset
Wk /= sqrt(0.5) * linalg.norm(Wk.ravel())
Wm.append(Wk)
Ws.append(Wm)
return Ws
# Low level convolution
def _get_nfft(wavelets, X, use_fft=True, check=True):
n_times = X.shape[-1]
max_size = max(w.size for w in wavelets)
if max_size > n_times:
msg = (f'At least one of the wavelets ({max_size}) is longer than the '
f'signal ({n_times}). Consider using a longer signal or '
'shorter wavelets.')
if check:
if use_fft:
warn(msg, UserWarning)
else:
raise ValueError(msg)
nfft = n_times + max_size - 1
nfft = next_fast_len(nfft) # 2 ** int(np.ceil(np.log2(nfft)))
return nfft
def _cwt_gen(X, Ws, *, fsize=0, mode="same", decim=1, use_fft=True):
"""Compute cwt with fft based convolutions or temporal convolutions.
Parameters
----------
X : array of shape (n_signals, n_times)
The data.
Ws : list of array
Wavelets time series.
fsize : int
FFT length.
mode : {'full', 'valid', 'same'}
See numpy.convolve.
decim : int | slice, default 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note:: Decimation may create aliasing artifacts.
use_fft : bool, default True
Use the FFT for convolutions or not.
Returns
-------
out : array, shape (n_signals, n_freqs, n_time_decim)
The time-frequency transform of the signals.
"""
_check_option('mode', mode, ['same', 'valid', 'full'])
decim = _check_decim(decim)
X = np.asarray(X)
# Precompute wavelets for given frequency range to save time
_, n_times = X.shape
n_times_out = X[:, decim].shape[1]
n_freqs = len(Ws)
# precompute FFTs of Ws
if use_fft:
fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128)
for i, W in enumerate(Ws):
fft_Ws[i] = fft(W, fsize)
# Make generator looping across signals
tfr = np.zeros((n_freqs, n_times_out), dtype=np.complex128)
for x in X:
if use_fft:
fft_x = fft(x, fsize)
# Loop across wavelets
for ii, W in enumerate(Ws):
if use_fft:
ret = ifft(fft_x * fft_Ws[ii])[:n_times + W.size - 1]
else:
ret = np.convolve(x, W, mode=mode)
# Center and decimate decomposition
if mode == 'valid':
sz = int(abs(W.size - n_times)) + 1
offset = (n_times - sz) // 2
this_slice = slice(offset // decim.step,
(offset + sz) // decim.step)
if use_fft:
ret = _centered(ret, sz)
tfr[ii, this_slice] = ret[decim]
elif mode == 'full' and not use_fft:
start = (W.size - 1) // 2
end = len(ret) - (W.size // 2)
ret = ret[start:end]
tfr[ii, :] = ret[decim]
else:
if use_fft:
ret = _centered(ret, n_times)
tfr[ii, :] = ret[decim]
yield tfr
# Loop of convolution: single trial
def _compute_tfr(epoch_data, freqs, sfreq=1.0, method='morlet',
n_cycles=7.0, zero_mean=None, time_bandwidth=None,
use_fft=True, decim=1, output='complex', n_jobs=1,
verbose=None):
"""Compute time-frequency transforms.
Parameters
----------
epoch_data : array of shape (n_epochs, n_channels, n_times)
The epochs.
freqs : array-like of floats, shape (n_freqs)
The frequencies.
sfreq : float | int, default 1.0
Sampling frequency of the data.
method : 'multitaper' | 'morlet', default 'morlet'
The time-frequency method. 'morlet' convolves a Morlet wavelet.
'multitaper' uses complex exponentials windowed with multiple DPSS
tapers.
n_cycles : float | array of float, default 7.0
Number of cycles in the wavelet. Fixed number
or one per frequency.
zero_mean : bool | None, default None
None means True for method='multitaper' and False for method='morlet'.
If True, make sure the wavelets have a mean of zero.
time_bandwidth : float, default None
If None and method=multitaper, will be set to 4.0 (3 tapers).
Time x (Full) Bandwidth product. Only applies if
method == 'multitaper'. The number of good tapers (low-bias) is
chosen automatically based on this to equal floor(time_bandwidth - 1).
use_fft : bool, default True
Use the FFT for convolutions or not.
decim : int | slice, default 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note::
Decimation may create aliasing artifacts, yet decimation
is done after the convolutions.
output : str, default 'complex'
* 'complex' : single trial complex.
* 'power' : single trial power.
* 'phase' : single trial phase.
* 'avg_power' : average of single trial power.
* 'itc' : inter-trial coherence.
* 'avg_power_itc' : average of single trial power and inter-trial
coherence across trials.
%(n_jobs)s
The number of epochs to process at the same time. The parallelization
is implemented across channels.
%(verbose)s
Returns
-------
out : array
Time frequency transform of epoch_data. If output is in ['complex',
'phase', 'power'], then shape of out is (n_epochs, n_chans, n_freqs,
n_times), else it is (n_chans, n_freqs, n_times). If output is
'avg_power_itc', the real values code for 'avg_power' and the
imaginary values code for the 'itc': out = avg_power + i * itc
"""
# Check data
epoch_data = np.asarray(epoch_data)
if epoch_data.ndim != 3:
raise ValueError('epoch_data must be of shape (n_epochs, n_chans, '
'n_times), got %s' % (epoch_data.shape,))
# Check params
freqs, sfreq, zero_mean, n_cycles, time_bandwidth, decim = \
_check_tfr_param(freqs, sfreq, method, zero_mean, n_cycles,
time_bandwidth, use_fft, decim, output)
decim = _check_decim(decim)
if (freqs > sfreq / 2.).any():
raise ValueError('Cannot compute freq above Nyquist freq of the data '
'(%0.1f Hz), got %0.1f Hz'
% (sfreq / 2., freqs.max()))
# We decimate *after* decomposition, so we need to create our kernels
# for the original sfreq
if method == 'morlet':
W = morlet(sfreq, freqs, n_cycles=n_cycles, zero_mean=zero_mean)
Ws = [W] # to have same dimensionality as the 'multitaper' case
elif method == 'multitaper':
Ws = _make_dpss(sfreq, freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, zero_mean=zero_mean)
# Check wavelets
if len(Ws[0][0]) > epoch_data.shape[2]:
raise ValueError('At least one of the wavelets is longer than the '
'signal. Use a longer signal or shorter wavelets.')
# Initialize output
n_freqs = len(freqs)
n_epochs, n_chans, n_times = epoch_data[:, :, decim].shape
if output in ('power', 'phase', 'avg_power', 'itc'):
dtype = np.float64
elif output in ('complex', 'avg_power_itc'):
# avg_power_itc is stored as power + 1i * itc to keep a
# simple dimensionality
dtype = np.complex128
if ('avg_' in output) or ('itc' in output):
out = np.empty((n_chans, n_freqs, n_times), dtype)
else:
out = np.empty((n_chans, n_epochs, n_freqs, n_times), dtype)
# Parallel computation
all_Ws = sum([list(W) for W in Ws], list())
_get_nfft(all_Ws, epoch_data, use_fft)
parallel, my_cwt, _ = parallel_func(_time_frequency_loop, n_jobs)
# Parallelization is applied across channels.
tfrs = parallel(
my_cwt(channel, Ws, output, use_fft, 'same', decim)
for channel in epoch_data.transpose(1, 0, 2))
# FIXME: to avoid overheads we should use np.array_split()
for channel_idx, tfr in enumerate(tfrs):
out[channel_idx] = tfr
if ('avg_' not in output) and ('itc' not in output):
# This is to enforce that the first dimension is for epochs
out = out.transpose(1, 0, 2, 3)
return out
def _check_tfr_param(freqs, sfreq, method, zero_mean, n_cycles,
time_bandwidth, use_fft, decim, output):
"""Aux. function to _compute_tfr to check the params validity."""
# Check freqs
if not isinstance(freqs, (list, np.ndarray)):
raise ValueError('freqs must be an array-like, got %s '
'instead.' % type(freqs))
freqs = np.asarray(freqs, dtype=float)
if freqs.ndim != 1:
raise ValueError('freqs must be of shape (n_freqs,), got %s '
'instead.' % np.array(freqs.shape))
# Check sfreq
if not isinstance(sfreq, (float, int)):
raise ValueError('sfreq must be a float or an int, got %s '
'instead.' % type(sfreq))
sfreq = float(sfreq)
# Default zero_mean = True if multitaper else False
zero_mean = method == 'multitaper' if zero_mean is None else zero_mean
if not isinstance(zero_mean, bool):
raise ValueError('zero_mean should be of type bool, got %s. instead'
% type(zero_mean))
freqs = np.asarray(freqs)
if (method == 'multitaper') and (output == 'phase'):
raise NotImplementedError(
'This function is not optimized to compute the phase using the '
'multitaper method. Use np.angle of the complex output instead.')
# Check n_cycles
if isinstance(n_cycles, (int, float)):
n_cycles = float(n_cycles)
elif isinstance(n_cycles, (list, np.ndarray)):
n_cycles = np.array(n_cycles)
if len(n_cycles) != len(freqs):
raise ValueError('n_cycles must be a float or an array of length '
'%i frequencies, got %i cycles instead.' %
(len(freqs), len(n_cycles)))
else:
raise ValueError('n_cycles must be a float or an array, got %s '
'instead.' % type(n_cycles))
# Check time_bandwidth
if (method == 'morlet') and (time_bandwidth is not None):
raise ValueError('time_bandwidth only applies to "multitaper" method.')
elif method == 'multitaper':
time_bandwidth = (4.0 if time_bandwidth is None
else float(time_bandwidth))
# Check use_fft
if not isinstance(use_fft, bool):
raise ValueError('use_fft must be a boolean, got %s '
'instead.' % type(use_fft))
# Check decim
if isinstance(decim, int):
decim = slice(None, None, decim)
if not isinstance(decim, slice):
raise ValueError('decim must be an integer or a slice, '
'got %s instead.' % type(decim))
# Check output
_check_option('output', output, ['complex', 'power', 'phase',
'avg_power_itc', 'avg_power', 'itc'])
_check_option('method', method, ['multitaper', 'morlet'])
return freqs, sfreq, zero_mean, n_cycles, time_bandwidth, decim
def _time_frequency_loop(X, Ws, output, use_fft, mode, decim):
"""Aux. function to _compute_tfr.
Loops time-frequency transform across wavelets and epochs.
Parameters
----------
X : array, shape (n_epochs, n_times)
The epochs data of a single channel.
Ws : list, shape (n_tapers, n_wavelets, n_times)
The wavelets.
output : str
* 'complex' : single trial complex.
* 'power' : single trial power.
* 'phase' : single trial phase.
* 'avg_power' : average of single trial power.
* 'itc' : inter-trial coherence.
* 'avg_power_itc' : average of single trial power and inter-trial
coherence across trials.
use_fft : bool
Use the FFT for convolutions or not.
mode : {'full', 'valid', 'same'}
See numpy.convolve.
decim : slice
The decimation slice: e.g. power[:, decim]
"""
# Set output type
dtype = np.float64
if output in ['complex', 'avg_power_itc']:
dtype = np.complex128
# Init outputs
decim = _check_decim(decim)
n_epochs, n_times = X[:, decim].shape
n_freqs = len(Ws[0])
if ('avg_' in output) or ('itc' in output):
tfrs = np.zeros((n_freqs, n_times), dtype=dtype)
else:
tfrs = np.zeros((n_epochs, n_freqs, n_times), dtype=dtype)
# Loops across tapers.
for W in Ws:
# No need to check here, it's done earlier (outside parallel part)
nfft = _get_nfft(W, X, use_fft, check=False)
coefs = _cwt_gen(
X, W, fsize=nfft, mode=mode, decim=decim, use_fft=use_fft)
# Inter-trial phase locking is apparently computed per taper...
if 'itc' in output:
plf = np.zeros((n_freqs, n_times), dtype=np.complex128)
# Loop across epochs
for epoch_idx, tfr in enumerate(coefs):
# Transform complex values
if output in ['power', 'avg_power']:
tfr = (tfr * tfr.conj()).real # power
elif output == 'phase':
tfr = np.angle(tfr)
elif output == 'avg_power_itc':
tfr_abs = np.abs(tfr)
plf += tfr / tfr_abs # phase
tfr = tfr_abs ** 2 # power
elif output == 'itc':
plf += tfr / np.abs(tfr) # phase
continue # not need to stack anything else than plf
# Stack or add
if ('avg_' in output) or ('itc' in output):
tfrs += tfr
else:
tfrs[epoch_idx] += tfr
# Compute inter trial coherence
if output == 'avg_power_itc':
tfrs += 1j * np.abs(plf)
elif output == 'itc':
tfrs += np.abs(plf)
# Normalization of average metrics
if ('avg_' in output) or ('itc' in output):
tfrs /= n_epochs
# Normalization by number of taper
tfrs /= len(Ws)
return tfrs
def cwt(X, Ws, use_fft=True, mode='same', decim=1):
"""Compute time freq decomposition with continuous wavelet transform.
Parameters
----------
X : array, shape (n_signals, n_times)
The signals.
Ws : list of array
Wavelets time series.
use_fft : bool
Use FFT for convolutions. Defaults to True.
mode : 'same' | 'valid' | 'full'
Convention for convolution. 'full' is currently not implemented with
``use_fft=False``. Defaults to ``'same'``.
decim : int | slice
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note:: Decimation may create aliasing artifacts.
Defaults to 1.
Returns
-------
tfr : array, shape (n_signals, n_freqs, n_times)
The time-frequency decompositions.
See Also
--------
mne.time_frequency.tfr_morlet : Compute time-frequency decomposition
with Morlet wavelets.
"""
nfft = _get_nfft(Ws, X, use_fft)
return _cwt_array(X, Ws, nfft, mode, decim, use_fft)
def _cwt_array(X, Ws, nfft, mode, decim, use_fft):
decim = _check_decim(decim)
coefs = _cwt_gen(
X, Ws, fsize=nfft, mode=mode, decim=decim, use_fft=use_fft)
n_signals, n_times = X[:, decim].shape
tfrs = np.empty((n_signals, len(Ws), n_times), dtype=np.complex128)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr
return tfrs
def _tfr_aux(method, inst, freqs, decim, return_itc, picks, average,
output=None, **tfr_params):
from ..epochs import BaseEpochs
"""Help reduce redundancy between tfr_morlet and tfr_multitaper."""
decim = _check_decim(decim)
data = _get_data(inst, return_itc)
info = inst.info.copy() # make a copy as sfreq can be altered
info, data = _prepare_picks(info, data, picks, axis=1)
del picks
if average:
if output == 'complex':
raise ValueError('output must be "power" if average=True')
if return_itc:
output = 'avg_power_itc'
else:
output = 'avg_power'
else:
output = 'power' if output is None else output
if return_itc:
raise ValueError('Inter-trial coherence is not supported'
' with average=False')
out = _compute_tfr(data, freqs, info['sfreq'], method=method,
output=output, decim=decim, **tfr_params)
times = inst.times[decim].copy()
info['sfreq'] /= decim.step
if average:
if return_itc:
power, itc = out.real, out.imag
else:
power = out
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave,
method='%s-power' % method)
if return_itc:
out = (out, AverageTFR(info, itc, times, freqs, nave,
method='%s-itc' % method))
else:
power = out
if isinstance(inst, BaseEpochs):
meta = deepcopy(inst._metadata)
evs = deepcopy(inst.events)
ev_id = deepcopy(inst.event_id)
else:
# if the input is of class Evoked
meta = evs = ev_id = None
out = EpochsTFR(info, power, times, freqs, method='%s-power' % method,
events=evs, event_id=ev_id, metadata=meta)
return out
@verbose
def tfr_morlet(inst, freqs, n_cycles, use_fft=False, return_itc=True, decim=1,
n_jobs=1, picks=None, zero_mean=True, average=True,
output='power', verbose=None):
"""Compute Time-Frequency Representation (TFR) using Morlet wavelets.
Same computation as `~mne.time_frequency.tfr_array_morlet`, but
operates on `~mne.Epochs` objects instead of
:class:`NumPy arrays <numpy.ndarray>`.
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
use_fft : bool, default False
The fft based convolution or not.
return_itc : bool, default True
Return inter-trial coherence (ITC) as well as averaged power.
Must be ``False`` for evoked data.
decim : int | slice, default 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note:: Decimation may create aliasing artifacts.
%(n_jobs)s
picks : array-like of int | None, default None
The indices of the channels to decompose. If None, all available
good data channels are decomposed.
zero_mean : bool, default True
Make sure the wavelet has a mean of zero.
.. versionadded:: 0.13.0
%(tfr_average)s
output : str
Can be "power" (default) or "complex". If "complex", then
average must be False.
.. versionadded:: 0.15.0
%(verbose)s
Returns
-------
power : AverageTFR | EpochsTFR
The averaged or single-trial power.
itc : AverageTFR | EpochsTFR
The inter-trial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
mne.time_frequency.tfr_array_morlet
mne.time_frequency.tfr_multitaper
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_stockwell
mne.time_frequency.tfr_array_stockwell
"""
tfr_params = dict(n_cycles=n_cycles, n_jobs=n_jobs, use_fft=use_fft,
zero_mean=zero_mean, output=output)
return _tfr_aux('morlet', inst, freqs, decim, return_itc, picks,
average, **tfr_params)
@verbose
def tfr_array_morlet(epoch_data, sfreq, freqs, n_cycles=7.0,
zero_mean=False, use_fft=True, decim=1, output='complex',
n_jobs=1, verbose=None):
"""Compute Time-Frequency Representation (TFR) using Morlet wavelets.
Same computation as `~mne.time_frequency.tfr_morlet`, but operates on
:class:`NumPy arrays <numpy.ndarray>` instead of `~mne.Epochs` objects.
Parameters
----------
epoch_data : array of shape (n_epochs, n_channels, n_times)
The epochs.
sfreq : float | int
Sampling frequency of the data.
freqs : array-like of float, shape (n_freqs,)
The frequencies.
n_cycles : float | array of float, default 7.0
Number of cycles in the Morlet wavelet. Fixed number or one per
frequency.
zero_mean : bool | False
If True, make sure the wavelets have a mean of zero. default False.
use_fft : bool
Use the FFT for convolutions or not. default True.
decim : int | slice
To reduce memory usage, decimation factor after time-frequency
decomposition. default 1
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note::
Decimation may create aliasing artifacts, yet decimation
is done after the convolutions.
output : str, default 'complex'
* 'complex' : single trial complex.
* 'power' : single trial power.
* 'phase' : single trial phase.
* 'avg_power' : average of single trial power.
* 'itc' : inter-trial coherence.
* 'avg_power_itc' : average of single trial power and inter-trial
coherence across trials.
%(n_jobs)s
The number of epochs to process at the same time. The parallelization
is implemented across channels. Default 1.
%(verbose)s
Returns
-------
out : array
Time frequency transform of epoch_data. If output is in ['complex',
'phase', 'power'], then shape of out is (n_epochs, n_chans, n_freqs,
n_times), else it is (n_chans, n_freqs, n_times). If output is
'avg_power_itc', the real values code for 'avg_power' and the
imaginary values code for the 'itc': out = avg_power + i * itc.
See Also
--------
mne.time_frequency.tfr_morlet
mne.time_frequency.tfr_multitaper
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_stockwell
mne.time_frequency.tfr_array_stockwell
Notes
-----
.. versionadded:: 0.14.0
"""
return _compute_tfr(epoch_data=epoch_data, freqs=freqs,
sfreq=sfreq, method='morlet', n_cycles=n_cycles,
zero_mean=zero_mean, time_bandwidth=None,
use_fft=use_fft, decim=decim, output=output,
n_jobs=n_jobs, verbose=verbose)
@verbose
def tfr_multitaper(inst, freqs, n_cycles, time_bandwidth=4.0,
use_fft=True, return_itc=True, decim=1,
n_jobs=1, picks=None, average=True, verbose=None):
"""Compute Time-Frequency Representation (TFR) using DPSS tapers.
Same computation as `~mne.time_frequency.tfr_array_multitaper`, but
operates on `~mne.Epochs` objects instead of
:class:`NumPy arrays <numpy.ndarray>`.
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
The time-window length is thus T = n_cycles / freq.
time_bandwidth : float, (optional), default 4.0 (n_tapers=3)
Time x (Full) Bandwidth product. Should be >= 2.0.
Choose this along with n_cycles to get desired frequency resolution.
The number of good tapers (least leakage from far away frequencies)
is chosen automatically based on this to floor(time_bandwidth - 1).
E.g., With freq = 20 Hz and n_cycles = 10, we get time = 0.5 s.
If time_bandwidth = 4., then frequency smoothing is (4 / time) = 8 Hz.
use_fft : bool, default True
The fft based convolution or not.
return_itc : bool, default True
Return inter-trial coherence (ITC) as well as averaged (or
single-trial) power.
decim : int | slice, default 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note:: Decimation may create aliasing artifacts.
%(n_jobs)s
%(picks_good_data)s
%(tfr_average)s
%(verbose)s
Returns
-------
power : AverageTFR | EpochsTFR
The averaged or single-trial power.
itc : AverageTFR | EpochsTFR
The inter-trial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_stockwell
mne.time_frequency.tfr_array_stockwell
mne.time_frequency.tfr_morlet
mne.time_frequency.tfr_array_morlet
Notes
-----
.. versionadded:: 0.9.0
"""
tfr_params = dict(n_cycles=n_cycles, n_jobs=n_jobs, use_fft=use_fft,
zero_mean=True, time_bandwidth=time_bandwidth)
return _tfr_aux('multitaper', inst, freqs, decim, return_itc, picks,
average, **tfr_params)
# TFR(s) class
class _BaseTFR(ContainsMixin, UpdateChannelsMixin, SizeMixin):
"""Base TFR class."""
@property
def data(self):
return self._data
@data.setter
def data(self, data):
self._data = data
@property
def ch_names(self):
"""Channel names."""
return self.info['ch_names']
@fill_doc
def crop(self, tmin=None, tmax=None, fmin=None, fmax=None,
include_tmax=True):
"""Crop data to a given time interval in place.
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
fmin : float | None
Lowest frequency of selection in Hz.
.. versionadded:: 0.18.0
fmax : float | None
Highest frequency of selection in Hz.
.. versionadded:: 0.18.0
%(include_tmax)s
Returns
-------
inst : instance of AverageTFR
The modified instance.
"""
if tmin is not None or tmax is not None:
time_mask = _time_mask(
self.times, tmin, tmax, sfreq=self.info['sfreq'],
include_tmax=include_tmax)
else:
time_mask = slice(None)
if fmin is not None or fmax is not None:
freq_mask = _freq_mask(self.freqs, sfreq=self.info['sfreq'],
fmin=fmin, fmax=fmax)
else:
freq_mask = slice(None)
self.times = self.times[time_mask]
self.freqs = self.freqs[freq_mask]
# Deal with broadcasting (boolean arrays do not broadcast, but indices
# do, so we need to convert freq_mask to make use of broadcasting)
if isinstance(time_mask, np.ndarray) and \
isinstance(freq_mask, np.ndarray):
freq_mask = np.where(freq_mask)[0][:, np.newaxis]
self.data = self.data[..., freq_mask, time_mask]
return self
def copy(self):
"""Return a copy of the instance.
Returns
-------
copy : instance of EpochsTFR | instance of AverageTFR
A copy of the instance.
"""
return deepcopy(self)
@verbose
def apply_baseline(self, baseline, mode='mean', verbose=None):
"""Baseline correct the data.
Parameters
----------
baseline : array-like, shape (2,)
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'mean' | 'ratio' | 'logratio' | 'percent' | 'zscore' | 'zlogratio'
Perform baseline correction by
- subtracting the mean of baseline values ('mean')
- dividing by the mean of baseline values ('ratio')
- dividing by the mean of baseline values and taking the log
('logratio')
- subtracting the mean of baseline values followed by dividing by
the mean of baseline values ('percent')
- subtracting the mean of baseline values and dividing by the
standard deviation of baseline values ('zscore')
- dividing by the mean of baseline values, taking the log, and
dividing by the standard deviation of log baseline values
('zlogratio')
%(verbose_meth)s
Returns
-------
inst : instance of AverageTFR
The modified instance.
""" # noqa: E501
rescale(self.data, self.times, baseline, mode, copy=False)
return self
def save(self, fname, overwrite=False):
"""Save TFR object to hdf5 file.
Parameters
----------
fname : str
The file name, which should end with ``-tfr.h5``.
overwrite : bool
If True, overwrite file (if it exists). Defaults to False.
See Also
--------
read_tfrs, write_tfrs
"""
write_tfrs(fname, self, overwrite=overwrite)
@fill_doc
class AverageTFR(_BaseTFR):
"""Container for Time-Frequency data.
Can for example store induced power at sensor level or inter-trial
coherence.
Parameters
----------
info : Info
The measurement info.
data : ndarray, shape (n_channels, n_freqs, n_times)
The data.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
nave : int
The number of averaged TFRs.
comment : str | None, default None
Comment on the data, e.g., the experimental condition.
method : str | None, default None
Comment on the method used to compute the data, e.g., morlet wavelet.
%(verbose)s
Attributes
----------
info : instance of Info
Measurement info.
ch_names : list
The names of the channels.
nave : int
Number of averaged epochs.
data : ndarray, shape (n_channels, n_freqs, n_times)
The data array.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
comment : str
Comment on dataset. Can be the condition.
method : str | None, default None
Comment on the method used to compute the data, e.g., morlet wavelet.
"""
@verbose
def __init__(self, info, data, times, freqs, nave, comment=None,
method=None, verbose=None): # noqa: D102
self.info = info
if data.ndim != 3:
raise ValueError('data should be 3d. Got %d.' % data.ndim)
n_channels, n_freqs, n_times = data.shape
if n_channels != len(info['chs']):
raise ValueError("Number of channels and data size don't match"
" (%d != %d)." % (n_channels, len(info['chs'])))
if n_freqs != len(freqs):
raise ValueError("Number of frequencies and data size don't match"
" (%d != %d)." % (n_freqs, len(freqs)))
if n_times != len(times):
raise ValueError("Number of times and data size don't match"
" (%d != %d)." % (n_times, len(times)))
self.data = data
self.times = np.array(times, dtype=float)
self.freqs = np.array(freqs, dtype=float)
self.nave = nave
self.comment = comment
self.method = method
self.preload = True
@verbose
def plot(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
cmap='RdBu_r', dB=False, colorbar=True, show=True, title=None,
axes=None, layout=None, yscale='auto', mask=None,
mask_style=None, mask_cmap="Greys", mask_alpha=0.1, combine=None,
exclude=[], verbose=None):
"""Plot TFRs as a two-dimensional image(s).
Parameters
----------
%(picks_good_data)s
baseline : None (default) or tuple, shape (2,)
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'mean' | 'ratio' | 'logratio' | 'percent' | 'zscore' | 'zlogratio'
Perform baseline correction by
- subtracting the mean of baseline values ('mean')
- dividing by the mean of baseline values ('ratio')
- dividing by the mean of baseline values and taking the log
('logratio')
- subtracting the mean of baseline values followed by dividing by
the mean of baseline values ('percent')
- subtracting the mean of baseline values and dividing by the
standard deviation of baseline values ('zscore')
- dividing by the mean of baseline values, taking the log, and
dividing by the standard deviation of log baseline values
('zlogratio')
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The minimum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maximum value an the color scale. If vmax is None, the data
maximum value is used.
cmap : matplotlib colormap | 'interactive' | (colormap, bool)
The colormap to use. If tuple, the first value indicates the
colormap to use and the second value is a boolean defining
interactivity. In interactive mode the colors are adjustable by
clicking and dragging the colorbar with left and right mouse
button. Left mouse button moves the scale up and down and right
mouse button adjusts the range. Hitting space bar resets the range.
Up and down arrows can be used to change the colormap. If
'interactive', translates to ('RdBu_r', True). Defaults to
'RdBu_r'.
.. warning:: Interactive mode works smoothly only for a small
amount of images.
dB : bool
If True, 10*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot. For user defined axes,
the colorbar cannot be drawn. Defaults to True.
show : bool
Call pyplot.show() at the end.
title : str | 'auto' | None
String for title. Defaults to None (blank/no title). If 'auto',
automatically create a title that lists up to 6 of the channels
used in the figure.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channels. If instance of Axes,
there must be only one channel plotted.
layout : Layout | None
Layout instance specifying sensor positions. Used for interactive
plotting of topographies on rectangle selection. If possible, the
correct layout is inferred from the data.
yscale : 'auto' (default) | 'linear' | 'log'
The scale of y (frequency) axis. 'linear' gives linear y axis,
'log' leads to log-spaced y axis and 'auto' detects if frequencies
are log-spaced and only then sets the y axis to 'log'.
.. versionadded:: 0.14.0
mask : ndarray | None
An array of booleans of the same shape as the data. Entries of the
data that correspond to False in the mask are plotted
transparently. Useful for, e.g., masking for statistical
significance.
.. versionadded:: 0.16.0
mask_style : None | 'both' | 'contour' | 'mask'
If ``mask`` is not None: if ``'contour'``, a contour line is drawn
around the masked areas (``True`` in ``mask``). If ``'mask'``,
entries not ``True`` in ``mask`` are shown transparently. If
``'both'``, both a contour and transparency are used.
If ``None``, defaults to ``'both'`` if ``mask`` is not None, and is
ignored otherwise.
.. versionadded:: 0.17
mask_cmap : matplotlib colormap | (colormap, bool) | 'interactive'
The colormap chosen for masked parts of the image (see below), if
``mask`` is not ``None``. If None, ``cmap`` is reused. Defaults to
``'Greys'``. Not interactive. Otherwise, as ``cmap``.
.. versionadded:: 0.17
mask_alpha : float
A float between 0 and 1. If ``mask`` is not None, this sets the
alpha level (degree of transparency) for the masked-out segments.
I.e., if 0, masked-out segments are not visible at all.
Defaults to 0.1.
.. versionadded:: 0.16.0
combine : 'mean' | 'rms' | None
Type of aggregation to perform across selected channels. If
None, plot one figure per selected channel.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded. Defaults to an empty list.
%(verbose_meth)s
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
""" # noqa: E501
return self._plot(picks=picks, baseline=baseline, mode=mode,
tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
vmin=vmin, vmax=vmax, cmap=cmap, dB=dB,
colorbar=colorbar, show=show, title=title,
axes=axes, layout=layout, yscale=yscale, mask=mask,
mask_style=mask_style, mask_cmap=mask_cmap,
mask_alpha=mask_alpha, combine=combine,
exclude=exclude, verbose=verbose)
@verbose
def _plot(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
cmap='RdBu_r', dB=False, colorbar=True, show=True, title=None,
axes=None, layout=None, yscale='auto', mask=None,
mask_style=None, mask_cmap="Greys", mask_alpha=.25,
combine=None, exclude=None, copy=True,
source_plot_joint=False, topomap_args=dict(), ch_type=None,
verbose=None):
"""Plot TFRs as a two-dimensional image(s).
See self.plot() for parameters description.
"""
import matplotlib.pyplot as plt
from ..viz.topo import _imshow_tfr
# channel selection
# simply create a new tfr object(s) with the desired channel selection
tfr = _preproc_tfr_instance(
self, picks, tmin, tmax, fmin, fmax, vmin, vmax, dB, mode,
baseline, exclude, copy)
del picks
data = tfr.data
n_picks = len(tfr.ch_names) if combine is None else 1
if combine == 'mean':
data = data.mean(axis=0, keepdims=True)
elif combine == 'rms':
data = np.sqrt((data ** 2).mean(axis=0, keepdims=True))
elif combine is not None:
raise ValueError('combine must be None, mean or rms.')
if isinstance(axes, list) or isinstance(axes, np.ndarray):
if len(axes) != n_picks:
raise RuntimeError('There must be an axes for each picked '
'channel.')
tmin, tmax = tfr.times[[0, -1]]
if vmax is None:
vmax = np.abs(data).max()
if vmin is None:
vmin = -np.abs(data).max()
if isinstance(axes, plt.Axes):
axes = [axes]
cmap = _setup_cmap(cmap)
for idx in range(len(data)):
if axes is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = axes[idx]
fig = ax.get_figure()
onselect_callback = partial(
tfr._onselect, cmap=cmap, source_plot_joint=source_plot_joint,
topomap_args={k: v for k, v in topomap_args.items()
if k not in {"vmin", "vmax", "cmap", "axes"}})
_imshow_tfr(
ax, 0, tmin, tmax, vmin, vmax, onselect_callback, ylim=None,
tfr=data[idx: idx + 1], freq=tfr.freqs, x_label='Time (s)',
y_label='Frequency (Hz)', colorbar=colorbar, cmap=cmap,
yscale=yscale, mask=mask, mask_style=mask_style,
mask_cmap=mask_cmap, mask_alpha=mask_alpha)
if title is None:
if combine is None or len(tfr.info['ch_names']) == 1:
title = tfr.info['ch_names'][0]
else:
title = _set_title_multiple_electrodes(
title, combine, tfr.info["ch_names"], all=True,
ch_type=ch_type)
if title:
fig.suptitle(title)
plt_show(show)
# XXX This is inside the loop, guaranteeing a single iter!
# Also there is no None-contingent behavior here so the docstring
# was wrong (saying it would be collapsed)
return fig
@verbose
def plot_joint(self, timefreqs=None, picks=None, baseline=None,
mode='mean', tmin=None, tmax=None, fmin=None, fmax=None,
vmin=None, vmax=None, cmap='RdBu_r', dB=False,
colorbar=True, show=True, title=None,
yscale='auto', combine='mean', exclude=[],
topomap_args=None, image_args=None, verbose=None):
"""Plot TFRs as a two-dimensional image with topomaps.
Parameters
----------
timefreqs : None | list of tuple | dict of tuple
The time-frequency point(s) for which topomaps will be plotted.
See Notes.
%(picks_good_data)s
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None, the beginning of the data is used.
If b is None, then b is set to the end of the interval.
If baseline is equal to (None, None), the entire time
interval is used.
mode : None | str
If str, must be one of 'ratio', 'zscore', 'mean', 'percent',
'logratio' and 'zlogratio'.
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)),
mean simply subtracts the mean power, percent is the same as
applying ratio then mean, logratio is the same as mean but then
rendered in log-scale, zlogratio is the same as zscore but data
is rendered in log-scale first.
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The minimum value of the color scale for the image (for
topomaps, see ``topomap_args``). If vmin is None, the data
absolute minimum value is used.
vmax : float | None
The maximum value of the color scale for the image (for
topomaps, see ``topomap_args``). If vmax is None, the data
absolute maximum value is used.
cmap : matplotlib colormap
The colormap to use.
dB : bool
If True, 10*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot (relating to the
topomaps). For user defined axes, the colorbar cannot be drawn.
Defaults to True.
show : bool
Call pyplot.show() at the end.
title : str | None
String for title. Defaults to None (blank/no title).
yscale : 'auto' (default) | 'linear' | 'log'
The scale of y (frequency) axis. 'linear' gives linear y axis,
'log' leads to log-spaced y axis and 'auto' detects if frequencies
are log-spaced and only then sets the y axis to 'log'.
combine : 'mean' | 'rms'
Type of aggregation to perform across selected channels.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded. Defaults to an empty list, i.e., ``[]``.
topomap_args : None | dict
A dict of ``kwargs`` that are forwarded to
:func:`mne.viz.plot_topomap` to style the topomaps. ``axes`` and
``show`` are ignored. If ``times`` is not in this dict, automatic
peak detection is used. Beyond that, if ``None``, no customizable
arguments will be passed.
Defaults to ``None``.
image_args : None | dict
A dict of ``kwargs`` that are forwarded to :meth:`AverageTFR.plot`
to style the image. ``axes`` and ``show`` are ignored. Beyond that,
if ``None``, no customizable arguments will be passed.
Defaults to ``None``.
%(verbose_meth)s
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
Notes
-----
``timefreqs`` has three different modes: tuples, dicts, and auto.
For (list of) tuple(s) mode, each tuple defines a pair
(time, frequency) in s and Hz on the TFR plot. For example, to
look at 10 Hz activity 1 second into the epoch and 3 Hz activity
300 msec into the epoch, ::
timefreqs=((1, 10), (.3, 3))
If provided as a dictionary, (time, frequency) tuples are keys and
(time_window, frequency_window) tuples are the values - indicating the
width of the windows (centered on the time and frequency indicated by
the key) to be averaged over. For example, ::
timefreqs={(1, 10): (0.1, 2)}
would translate into a window that spans 0.95 to 1.05 seconds, as
well as 9 to 11 Hz. If None, a single topomap will be plotted at the
absolute peak across the time-frequency representation.
.. versionadded:: 0.16.0
""" # noqa: E501
from ..viz.topomap import (_set_contour_locator, plot_topomap,
_get_pos_outlines, _find_topomap_coords)
import matplotlib.pyplot as plt
#####################################
# Handle channels (picks and types) #
#####################################
# it would be nicer to let this happen in self._plot,
# but we need it here to do the loop over the remaining channel
# types in case a user supplies `picks` that pre-select only one
# channel type.
# Nonetheless, it should be refactored for code reuse.
copy = any(var is not None for var in (exclude, picks, baseline))
tfr = _pick_inst(self, picks, exclude, copy=copy)
del picks
ch_types = _get_channel_types(tfr.info, unique=True)
# if multiple sensor types: one plot per channel type, recursive call
if len(ch_types) > 1:
logger.info("Multiple channel types selected, returning one "
"figure per type.")
figs = list()
for this_type in ch_types: # pick corresponding channel type
type_picks = [idx for idx in range(tfr.info['nchan'])
if channel_type(tfr.info, idx) == this_type]
tf_ = _pick_inst(tfr, type_picks, None, copy=True)
if len(_get_channel_types(tf_.info, unique=True)) > 1:
raise RuntimeError(
'Possibly infinite loop due to channel selection '
'problem. This should never happen! Please check '
'your channel types.')
figs.append(
tf_.plot_joint(
timefreqs=timefreqs, picks=None, baseline=baseline,
mode=mode, tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
vmin=vmin, vmax=vmax, cmap=cmap, dB=dB,
colorbar=colorbar, show=False, title=title,
yscale=yscale, combine=combine,
exclude=None, topomap_args=topomap_args,
verbose=verbose))
return figs
else:
ch_type = ch_types.pop()
# Handle timefreqs
timefreqs = _get_timefreqs(tfr, timefreqs)
n_timefreqs = len(timefreqs)
if topomap_args is None:
topomap_args = dict()
topomap_args_pass = {k: v for k, v in topomap_args.items() if
k not in ('axes', 'show', 'colorbar')}
topomap_args_pass['outlines'] = topomap_args.get('outlines', 'skirt')
topomap_args_pass["contours"] = topomap_args.get('contours', 6)
topomap_args_pass['ch_type'] = ch_type
##############
# Image plot #
##############
fig, tf_ax, map_ax, cbar_ax = _prepare_joint_axes(n_timefreqs)
cmap = _setup_cmap(cmap)
# image plot
# we also use this to baseline and truncate (times and freqs)
# (a copy of) the instance
if image_args is None:
image_args = dict()
fig = tfr._plot(
picks=None, baseline=baseline, mode=mode, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax, vmin=vmin, vmax=vmax, cmap=cmap, dB=dB,
colorbar=False, show=False, title=title, axes=tf_ax,
yscale=yscale, combine=combine, exclude=None, copy=False,
source_plot_joint=True, topomap_args=topomap_args_pass,
ch_type=ch_type, **image_args)
# set and check time and freq limits ...
# can only do this after the tfr plot because it may change these
# parameters
tmax, tmin = tfr.times.max(), tfr.times.min()
fmax, fmin = tfr.freqs.max(), tfr.freqs.min()
for time, freq in timefreqs.keys():
if not (tmin <= time <= tmax):
error_value = "time point (" + str(time) + " s)"
elif not (fmin <= freq <= fmax):
error_value = "frequency (" + str(freq) + " Hz)"
else:
continue
raise ValueError("Requested " + error_value + " exceeds the range"
"of the data. Choose different `timefreqs`.")
############
# Topomaps #
############
titles, all_data, all_pos, vlims = [], [], [], []
# the structure here is a bit complicated to allow aggregating vlims
# over all topomaps. First, one loop over all timefreqs to collect
# vlims. Then, find the max vlims and in a second loop over timefreqs,
# do the actual plotting.
timefreqs_array = np.array([np.array(keys) for keys in timefreqs])
order = timefreqs_array[:, 0].argsort() # sort by time
for ii, (time, freq) in enumerate(timefreqs_array[order]):
avg = timefreqs[(time, freq)]
# set up symmetric windows
time_half_range, freq_half_range = avg / 2.
if time_half_range == 0:
time = tfr.times[np.argmin(np.abs(tfr.times - time))]
if freq_half_range == 0:
freq = tfr.freqs[np.argmin(np.abs(tfr.freqs - freq))]
if (time_half_range == 0) and (freq_half_range == 0):
sub_map_title = '(%.2f s,\n%.1f Hz)' % (time, freq)
else:
sub_map_title = \
'(%.1f \u00B1 %.1f s,\n%.1f \u00B1 %.1f Hz)' % \
(time, time_half_range, freq, freq_half_range)
tmin = time - time_half_range
tmax = time + time_half_range
fmin = freq - freq_half_range
fmax = freq + freq_half_range
data = tfr.data
# merging grads here before rescaling makes ERDs visible
sphere = topomap_args.get('sphere')
if ch_type == 'grad':
picks = _pair_grad_sensors(tfr.info, topomap_coords=False)
pos = _find_topomap_coords(
tfr.info, picks=picks[::2], sphere=sphere)
method = combine or 'rms'
data, _ = _merge_ch_data(data[picks], ch_type, [],
method=method)
del picks, method
else:
pos, _ = _get_pos_outlines(tfr.info, None, sphere)
del sphere
all_pos.append(pos)
data, times, freqs, _, _ = _preproc_tfr(
data, tfr.times, tfr.freqs, tmin, tmax, fmin, fmax,
mode, baseline, vmin, vmax, None, tfr.info['sfreq'])
vlims.append(np.abs(data).max())
titles.append(sub_map_title)
all_data.append(data)
new_t = tfr.times[np.abs(tfr.times - np.median([times])).argmin()]
new_f = tfr.freqs[np.abs(tfr.freqs - np.median([freqs])).argmin()]
timefreqs_array[ii] = (new_t, new_f)
# passing args to the topomap calls
max_lim = max(vlims)
topomap_args_pass["vmin"] = vmin = topomap_args.get('vmin', -max_lim)
topomap_args_pass["vmax"] = vmax = topomap_args.get('vmax', max_lim)
locator, contours = _set_contour_locator(
vmin, vmax, topomap_args_pass["contours"])
topomap_args_pass['contours'] = contours
for ax, title, data, pos in zip(map_ax, titles, all_data, all_pos):
ax.set_title(title)
plot_topomap(data.mean(axis=(-1, -2)), pos,
cmap=cmap[0], axes=ax, show=False,
**topomap_args_pass)
#############
# Finish up #
#############
if colorbar:
from matplotlib import ticker
cbar = plt.colorbar(ax.images[0], cax=cbar_ax)
if locator is None:
locator = ticker.MaxNLocator(nbins=5)
cbar.locator = locator
cbar.update_ticks()
plt.subplots_adjust(left=.12, right=.925, bottom=.14,
top=1. if title is not None else 1.2)
# draw the connection lines between time series and topoplots
lines = [_connection_line(time_, fig, tf_ax, map_ax_, y=freq_,
y_source_transform="transData")
for (time_, freq_), map_ax_ in zip(timefreqs_array, map_ax)]
fig.lines.extend(lines)
plt_show(show)
return fig
@verbose
def _onselect(self, eclick, erelease, baseline=None, mode=None,
cmap=None, source_plot_joint=False, topomap_args=None,
verbose=None):
"""Handle rubber band selector in channel tfr."""
from ..viz.topomap import plot_tfr_topomap, plot_topomap, _add_colorbar
if abs(eclick.x - erelease.x) < .1 or abs(eclick.y - erelease.y) < .1:
return
tmin = round(min(eclick.xdata, erelease.xdata), 5) # s
tmax = round(max(eclick.xdata, erelease.xdata), 5)
fmin = round(min(eclick.ydata, erelease.ydata), 5) # Hz
fmax = round(max(eclick.ydata, erelease.ydata), 5)
tmin = min(self.times, key=lambda x: abs(x - tmin)) # find closest
tmax = min(self.times, key=lambda x: abs(x - tmax))
fmin = min(self.freqs, key=lambda x: abs(x - fmin))
fmax = min(self.freqs, key=lambda x: abs(x - fmax))
if tmin == tmax or fmin == fmax:
logger.info('The selected area is too small. '
'Select a larger time-frequency window.')
return
types = list()
if 'eeg' in self:
types.append('eeg')
if 'mag' in self:
types.append('mag')
if 'grad' in self:
if len(_pair_grad_sensors(self.info, topomap_coords=False,
raise_error=False)) >= 2:
types.append('grad')
elif len(types) == 0:
return # Don't draw a figure for nothing.
fig = figure_nobar()
fig.suptitle('{:.2f} s - {:.2f} s, {:.2f} Hz - {:.2f} Hz'.format(
tmin, tmax, fmin, fmax), y=0.04)
if source_plot_joint:
ax = fig.add_subplot(111)
data = _preproc_tfr(
self.data, self.times, self.freqs, tmin, tmax, fmin, fmax,
None, None, None, None, None, self.info['sfreq'])[0]
data = data.mean(-1).mean(-1)
vmax = np.abs(data).max()
im, _ = plot_topomap(data, self.info, vmin=-vmax, vmax=vmax,
cmap=cmap[0], axes=ax, show=False,
**topomap_args)
_add_colorbar(ax, im, cmap, title="AU", pad=.1)
fig.show()
else:
for idx, ch_type in enumerate(types):
ax = fig.add_subplot(1, len(types), idx + 1)
plot_tfr_topomap(self, ch_type=ch_type, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax,
baseline=baseline, mode=mode, cmap=None,
title=ch_type, vmin=None, vmax=None, axes=ax)
@verbose
def plot_topo(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
layout=None, cmap='RdBu_r', title=None, dB=False,
colorbar=True, layout_scale=0.945, show=True,
border='none', fig_facecolor='k', fig_background=None,
font_color='w', yscale='auto', verbose=None):
"""Plot TFRs in a topography with images.
Parameters
----------
%(picks_good_data)s
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'mean' | 'ratio' | 'logratio' | 'percent' | 'zscore' | 'zlogratio'
Perform baseline correction by
- subtracting the mean of baseline values ('mean')
- dividing by the mean of baseline values ('ratio')
- dividing by the mean of baseline values and taking the log
('logratio')
- subtracting the mean of baseline values followed by dividing by
the mean of baseline values ('percent')
- subtracting the mean of baseline values and dividing by the
standard deviation of baseline values ('zscore')
- dividing by the mean of baseline values, taking the log, and
dividing by the standard deviation of log baseline values
('zlogratio')
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The minimum value of the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maximum value of the color scale. If vmax is None, the data
maximum value is used.
layout : Layout | None
Layout instance specifying sensor positions. If possible, the
correct layout is inferred from the data.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
title : str
Title of the figure.
dB : bool
If True, 10*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot.
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
show : bool
Call pyplot.show() at the end.
border : str
Matplotlib borders style to be used for each sensor plot.
fig_facecolor : color
The figure face color. Defaults to black.
fig_background : None | array
A background image for the figure. This must be a valid input to
`matplotlib.pyplot.imshow`. Defaults to None.
font_color : color
The color of tick labels in the colorbar. Defaults to white.
yscale : 'auto' (default) | 'linear' | 'log'
The scale of y (frequency) axis. 'linear' gives linear y axis,
'log' leads to log-spaced y axis and 'auto' detects if frequencies
are log-spaced and only then sets the y axis to 'log'.
%(verbose)s
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
""" # noqa: E501
from ..viz.topo import _imshow_tfr, _plot_topo, _imshow_tfr_unified
from ..viz import add_background_image
times = self.times.copy()
freqs = self.freqs
data = self.data
info = self.info
info, data = _prepare_picks(info, data, picks, axis=0)
del picks
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax,
mode, baseline, vmin, vmax, dB, info['sfreq'])
if layout is None:
from mne import find_layout
layout = find_layout(self.info)
onselect_callback = partial(self._onselect, baseline=baseline,
mode=mode)
click_fun = partial(_imshow_tfr, tfr=data, freq=freqs, yscale=yscale,
cmap=(cmap, True), onselect=onselect_callback)
imshow = partial(_imshow_tfr_unified, tfr=data, freq=freqs, cmap=cmap,
onselect=onselect_callback)
fig = _plot_topo(info=info, times=times, show_func=imshow,
click_func=click_fun, layout=layout,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title, border=border,
x_label='Time (s)', y_label='Frequency (Hz)',
fig_facecolor=fig_facecolor, font_color=font_color,
unified=True, img=True)
add_background_image(fig, fig_background)
plt_show(show)
return fig
@fill_doc
def plot_topomap(self, tmin=None, tmax=None, fmin=None, fmax=None,
ch_type=None, baseline=None, mode='mean',
vmin=None, vmax=None, cmap=None, sensors=True,
colorbar=True, unit=None, res=64, size=2,
cbar_fmt='%1.1e', show_names=False, title=None,
axes=None, show=True, outlines='head',
contours=6, sphere=None):
"""Plot topographic maps of time-frequency intervals of TFR data.
Parameters
----------
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'mean' | 'ratio' | 'logratio' | 'percent' | 'zscore' | 'zlogratio'
Perform baseline correction by
- subtracting the mean of baseline values ('mean')
- dividing by the mean of baseline values ('ratio')
- dividing by the mean of baseline values and taking the log
('logratio')
- subtracting the mean of baseline values followed by dividing by
the mean of baseline values ('percent')
- subtracting the mean of baseline values and dividing by the
standard deviation of baseline values ('zscore')
- dividing by the mean of baseline values, taking the log, and
dividing by the standard deviation of log baseline values
('zlogratio')
vmin : float | callable | None
The value specifying the lower bound of the color range. If None,
and vmax is None, -vmax is used. Else np.min(data) or in case
data contains only positive values 0. If callable, the output
equals vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range. If None,
the maximum value is used. If callable, the output equals
vmax(data). Defaults to None.
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap
to use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging
the colorbar with left and right mouse button. Left mouse button
moves the scale up and down and right mouse button adjusts the
range. Hitting space bar resets the range. Up and down arrows can
be used to change the colormap. If None (default), 'Reds' is used
for all positive data, otherwise defaults to 'RdBu_r'. If
'interactive', translates to (None, True).
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle
will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If ``mask`` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
axes : instance of Axes | None
The axes to plot to. If None the axes is defined automatically.
show : bool
Call pyplot.show() at the end.
%(topomap_outlines)s
contours : int | array of float
The number of contour lines to draw. If 0, no contours will be
drawn. When an integer, matplotlib ticker locator is used to find
suitable values for the contour thresholds (may sometimes be
inaccurate, use array for accuracy). If an array, the values
represent the levels for the contours. If colorbar=True, the ticks
in colorbar correspond to the contour levels. Defaults to 6.
%(topomap_sphere_auto)s
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
""" # noqa: E501
from ..viz import plot_tfr_topomap
return plot_tfr_topomap(self, tmin=tmin, tmax=tmax, fmin=fmin,
fmax=fmax, ch_type=ch_type, baseline=baseline,
mode=mode, vmin=vmin, vmax=vmax,
cmap=cmap, sensors=sensors, colorbar=colorbar,
unit=unit, res=res, size=size,
cbar_fmt=cbar_fmt, show_names=show_names,
title=title, axes=axes, show=show,
outlines=outlines,
contours=contours, sphere=sphere)
def _check_compat(self, tfr):
"""Check that self and tfr have the same time-frequency ranges."""
assert np.all(tfr.times == self.times)
assert np.all(tfr.freqs == self.freqs)
def __add__(self, tfr): # noqa: D105
"""Add instances."""
self._check_compat(tfr)
out = self.copy()
out.data += tfr.data
return out
def __iadd__(self, tfr): # noqa: D105
self._check_compat(tfr)
self.data += tfr.data
return self
def __sub__(self, tfr): # noqa: D105
"""Subtract instances."""
self._check_compat(tfr)
out = self.copy()
out.data -= tfr.data
return out
def __isub__(self, tfr): # noqa: D105
self._check_compat(tfr)
self.data -= tfr.data
return self
def __truediv__(self, a): # noqa: D105
"""Divide instances."""
out = self.copy()
out /= a
return out
def __itruediv__(self, a): # noqa: D105
self.data /= a
return self
def __mul__(self, a):
"""Multiply source instances."""
out = self.copy()
out *= a
return out
def __imul__(self, a): # noqa: D105
self.data *= a
return self
def __repr__(self): # noqa: D105
s = "time : [%f, %f]" % (self.times[0], self.times[-1])
s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1])
s += ", nave : %d" % self.nave
s += ', channels : %d' % self.data.shape[0]
s += ', ~%s' % (sizeof_fmt(self._size),)
return "<AverageTFR | %s>" % s
@fill_doc
class EpochsTFR(_BaseTFR, GetEpochsMixin):
"""Container for Time-Frequency data on epochs.
Can for example store induced power at sensor level.
Parameters
----------
info : Info
The measurement info.
data : ndarray, shape (n_epochs, n_channels, n_freqs, n_times)
The data.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
comment : str | None, default None
Comment on the data, e.g., the experimental condition.
method : str | None, default None
Comment on the method used to compute the data, e.g., morlet wavelet.
events : ndarray, shape (n_events, 3) | None
The events as stored in the Epochs class. If None (default), all event
values are set to 1 and event time-samples are set to range(n_epochs).
event_id : dict | None
Example: dict(auditory=1, visual=3). They keys can be used to access
associated events. If None, all events will be used and a dict is
created with string integer names corresponding to the event id
integers.
metadata : instance of pandas.DataFrame | None
A :class:`pandas.DataFrame` containing pertinent information for each
trial. See :class:`mne.Epochs` for further details.
%(verbose)s
Attributes
----------
info : instance of Info
Measurement info.
ch_names : list
The names of the channels.
data : ndarray, shape (n_epochs, n_channels, n_freqs, n_times)
The data array.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
comment : string
Comment on dataset. Can be the condition.
method : str | None, default None
Comment on the method used to compute the data, e.g., morlet wavelet.
events : ndarray, shape (n_events, 3) | None
Array containing sample information as event_id
event_id : dict | None
Names of conditions correspond to event_ids
metadata : pandas.DataFrame, shape (n_events, n_cols) | None
DataFrame containing pertinent information for each trial
Notes
-----
.. versionadded:: 0.13.0
"""
@verbose
def __init__(self, info, data, times, freqs, comment=None, method=None,
events=None, event_id=None, metadata=None, verbose=None):
# noqa: D102
self.info = info
if data.ndim != 4:
raise ValueError('data should be 4d. Got %d.' % data.ndim)
n_epochs, n_channels, n_freqs, n_times = data.shape
if n_channels != len(info['chs']):
raise ValueError("Number of channels and data size don't match"
" (%d != %d)." % (n_channels, len(info['chs'])))
if n_freqs != len(freqs):
raise ValueError("Number of frequencies and data size don't match"
" (%d != %d)." % (n_freqs, len(freqs)))
if n_times != len(times):
raise ValueError("Number of times and data size don't match"
" (%d != %d)." % (n_times, len(times)))
if events is None:
n_epochs = len(data)
events = _gen_events(n_epochs)
event_id = _check_event_id(event_id, events)
self.data = data
self.times = np.array(times, dtype=float)
self.freqs = np.array(freqs, dtype=float)
self.events = events
self.event_id = event_id
self.comment = comment
self.method = method
self.preload = True
self.metadata = metadata
def __repr__(self): # noqa: D105
s = "time : [%f, %f]" % (self.times[0], self.times[-1])
s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1])
s += ", epochs : %d" % self.data.shape[0]
s += ', channels : %d' % self.data.shape[1]
s += ', ~%s' % (sizeof_fmt(self._size),)
return "<EpochsTFR | %s>" % s
def __abs__(self):
"""Take the absolute value."""
epochs = self.copy()
epochs.data = np.abs(self.data)
return epochs
def average(self):
"""Average the data across epochs.
Returns
-------
ave : instance of AverageTFR
The averaged data.
"""
data = np.mean(self.data, axis=0)
return AverageTFR(info=self.info.copy(), data=data,
times=self.times.copy(), freqs=self.freqs.copy(),
nave=self.data.shape[0], method=self.method,
comment=self.comment)
def combine_tfr(all_tfr, weights='nave'):
"""Merge AverageTFR data by weighted addition.
Create a new AverageTFR instance, using a combination of the supplied
instances as its data. By default, the mean (weighted by trials) is used.
Subtraction can be performed by passing negative weights (e.g., [1, -1]).
Data must have the same channels and the same time instants.
Parameters
----------
all_tfr : list of AverageTFR
The tfr datasets.
weights : list of float | str
The weights to apply to the data of each AverageTFR instance.
Can also be ``'nave'`` to weight according to tfr.nave,
or ``'equal'`` to use equal weighting (each weighted as ``1/N``).
Returns
-------
tfr : AverageTFR
The new TFR data.
Notes
-----
.. versionadded:: 0.11.0
"""
tfr = all_tfr[0].copy()
if isinstance(weights, str):
if weights not in ('nave', 'equal'):
raise ValueError('Weights must be a list of float, or "nave" or '
'"equal"')
if weights == 'nave':
weights = np.array([e.nave for e in all_tfr], float)
weights /= weights.sum()
else: # == 'equal'
weights = [1. / len(all_tfr)] * len(all_tfr)
weights = np.array(weights, float)
if weights.ndim != 1 or weights.size != len(all_tfr):
raise ValueError('Weights must be the same size as all_tfr')
ch_names = tfr.ch_names
for t_ in all_tfr[1:]:
assert t_.ch_names == ch_names, ValueError("%s and %s do not contain "
"the same channels"
% (tfr, t_))
assert np.max(np.abs(t_.times - tfr.times)) < 1e-7, \
ValueError("%s and %s do not contain the same time instants"
% (tfr, t_))
# use union of bad channels
bads = list(set(tfr.info['bads']).union(*(t_.info['bads']
for t_ in all_tfr[1:])))
tfr.info['bads'] = bads
# XXX : should be refactored with combined_evoked function
tfr.data = sum(w * t_.data for w, t_ in zip(weights, all_tfr))
tfr.nave = max(int(1. / sum(w ** 2 / e.nave
for w, e in zip(weights, all_tfr))), 1)
return tfr
# Utils
def _get_data(inst, return_itc):
"""Get data from Epochs or Evoked instance as epochs x ch x time."""
from ..epochs import BaseEpochs
from ..evoked import Evoked
if not isinstance(inst, (BaseEpochs, Evoked)):
raise TypeError('inst must be Epochs or Evoked')
if isinstance(inst, BaseEpochs):
data = inst.get_data()
else:
if return_itc:
raise ValueError('return_itc must be False for evoked data')
data = inst.data[np.newaxis].copy()
return data
def _prepare_picks(info, data, picks, axis):
"""Prepare the picks."""
picks = _picks_to_idx(info, picks, exclude='bads')
info = pick_info(info, picks)
sl = [slice(None)] * data.ndim
sl[axis] = picks
data = data[tuple(sl)]
return info, data
def _centered(arr, newsize):
"""Aux Function to center data."""
# Return the center newsize portion of the array.
newsize = np.asarray(newsize)
currsize = np.array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB, sfreq, copy=None):
"""Aux Function to prepare tfr computation."""
if copy is None:
copy = baseline is not None
data = rescale(data, times, baseline, mode, copy=copy)
# crop time
itmin, itmax = None, None
idx = np.where(_time_mask(times, tmin, tmax, sfreq=sfreq))[0]
if tmin is not None:
itmin = idx[0]
if tmax is not None:
itmax = idx[-1] + 1
times = times[itmin:itmax]
# crop freqs
ifmin, ifmax = None, None
idx = np.where(_time_mask(freqs, fmin, fmax, sfreq=sfreq))[0]
if fmin is not None:
ifmin = idx[0]
if fmax is not None:
ifmax = idx[-1] + 1
freqs = freqs[ifmin:ifmax]
# crop data
data = data[:, ifmin:ifmax, itmin:itmax]
if dB:
data = 10 * np.log10((data * data.conj()).real)
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
return data, times, freqs, vmin, vmax
def _check_decim(decim):
"""Aux function checking the decim parameter."""
_validate_type(decim, ('int-like', slice), 'decim')
if not isinstance(decim, slice):
decim = slice(None, None, int(decim))
# ensure that we can actually use `decim.step`
if decim.step is None:
decim = slice(decim.start, decim.stop, 1)
return decim
# i/o
def write_tfrs(fname, tfr, overwrite=False):
"""Write a TFR dataset to hdf5.
Parameters
----------
fname : str
The file name, which should end with ``-tfr.h5``.
tfr : AverageTFR instance, or list of AverageTFR instances
The TFR dataset, or list of TFR datasets, to save in one file.
Note. If .comment is not None, a name will be generated on the fly,
based on the order in which the TFR objects are passed.
overwrite : bool
If True, overwrite file (if it exists). Defaults to False.
See Also
--------
read_tfrs
Notes
-----
.. versionadded:: 0.9.0
"""
out = []
if not isinstance(tfr, (list, tuple)):
tfr = [tfr]
for ii, tfr_ in enumerate(tfr):
comment = ii if tfr_.comment is None else tfr_.comment
out.append(_prepare_write_tfr(tfr_, condition=comment))
write_hdf5(fname, out, overwrite=overwrite, title='mnepython',
slash='replace')
def _prepare_write_tfr(tfr, condition):
"""Aux function."""
attributes = dict(times=tfr.times, freqs=tfr.freqs, data=tfr.data,
info=tfr.info, comment=tfr.comment, method=tfr.method)
if hasattr(tfr, 'nave'): # if AverageTFR
attributes['nave'] = tfr.nave
elif hasattr(tfr, 'events'): # if EpochsTFR
attributes['events'] = tfr.events
attributes['event_id'] = tfr.event_id
attributes['metadata'] = _prepare_write_metadata(tfr.metadata)
return condition, attributes
def read_tfrs(fname, condition=None):
"""Read TFR datasets from hdf5 file.
Parameters
----------
fname : str
The file name, which should end with -tfr.h5 .
condition : int or str | list of int or str | None
The condition to load. If None, all conditions will be returned.
Defaults to None.
Returns
-------
tfrs : list of instances of AverageTFR | instance of AverageTFR
Depending on ``condition`` either the TFR object or a list of multiple
TFR objects.
See Also
--------
write_tfrs
Notes
-----
.. versionadded:: 0.9.0
"""
check_fname(fname, 'tfr', ('-tfr.h5', '_tfr.h5'))
logger.info('Reading %s ...' % fname)
tfr_data = read_hdf5(fname, title='mnepython', slash='replace')
for k, tfr in tfr_data:
tfr['info'] = Info(tfr['info'])
tfr['info']._check_consistency()
if 'metadata' in tfr:
tfr['metadata'] = _prepare_read_metadata(tfr['metadata'])
is_average = 'nave' in tfr
if condition is not None:
if not is_average:
raise NotImplementedError('condition not supported when reading '
'EpochsTFR.')
tfr_dict = dict(tfr_data)
if condition not in tfr_dict:
keys = ['%s' % k for k in tfr_dict]
raise ValueError('Cannot find condition ("{}") in this file. '
'The file contains "{}""'
.format(condition, " or ".join(keys)))
out = AverageTFR(**tfr_dict[condition])
else:
inst = AverageTFR if is_average else EpochsTFR
out = [inst(**d) for d in list(zip(*tfr_data))[1]]
return out
def _get_timefreqs(tfr, timefreqs):
"""Find and/or setup timefreqs for `tfr.plot_joint`."""
# Input check
timefreq_error_msg = (
"Supplied `timefreqs` are somehow malformed. Please supply None, "
"a list of tuple pairs, or a dict of such tuple pairs, not: ")
if isinstance(timefreqs, dict):
for k, v in timefreqs.items():
for item in (k, v):
if len(item) != 2 or any((not _is_numeric(n) for n in item)):
raise ValueError(timefreq_error_msg, item)
elif timefreqs is not None:
if not hasattr(timefreqs, "__len__"):
raise ValueError(timefreq_error_msg, timefreqs)
if len(timefreqs) == 2 and all((_is_numeric(v) for v in timefreqs)):
timefreqs = [tuple(timefreqs)] # stick a pair of numbers in a list
else:
for item in timefreqs:
if (hasattr(item, "__len__") and len(item) == 2 and
all((_is_numeric(n) for n in item))):
pass
else:
raise ValueError(timefreq_error_msg, item)
# If None, automatic identification of max peak
else:
from scipy.signal import argrelmax
order = max((1, tfr.data.shape[2] // 30))
peaks_idx = argrelmax(tfr.data, order=order, axis=2)
if peaks_idx[0].size == 0:
_, p_t, p_f = np.unravel_index(tfr.data.argmax(), tfr.data.shape)
timefreqs = [(tfr.times[p_t], tfr.freqs[p_f])]
else:
peaks = [tfr.data[0, f, t] for f, t in
zip(peaks_idx[1], peaks_idx[2])]
peakmax_idx = np.argmax(peaks)
peakmax_time = tfr.times[peaks_idx[2][peakmax_idx]]
peakmax_freq = tfr.freqs[peaks_idx[1][peakmax_idx]]
timefreqs = [(peakmax_time, peakmax_freq)]
timefreqs = {
tuple(k): np.asarray(timefreqs[k]) if isinstance(timefreqs, dict)
else np.array([0, 0]) for k in timefreqs}
return timefreqs
def _preproc_tfr_instance(tfr, picks, tmin, tmax, fmin, fmax, vmin, vmax, dB,
mode, baseline, exclude, copy=True):
"""Baseline and truncate (times and freqs) a TFR instance."""
tfr = tfr.copy() if copy else tfr
exclude = None if picks is None else exclude
picks = _picks_to_idx(tfr.info, picks, exclude='bads')
pick_names = [tfr.info['ch_names'][pick] for pick in picks]
tfr.pick_channels(pick_names)
if exclude == 'bads':
exclude = [ch for ch in tfr.info['bads']
if ch in tfr.info['ch_names']]
if exclude is not None:
tfr.drop_channels(exclude)
data, times, freqs, _, _ = _preproc_tfr(
tfr.data, tfr.times, tfr.freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB, tfr.info['sfreq'], copy=False)
tfr.times = times
tfr.freqs = freqs
tfr.data = data
return tfr
| {
"repo_name": "Eric89GXL/mne-python",
"path": "mne/time_frequency/tfr.py",
"copies": "2",
"size": "96249",
"license": "bsd-3-clause",
"hash": 2373064867991649300,
"line_mean": 38.6412685338,
"line_max": 81,
"alpha_frac": 0.5660422446,
"autogenerated": false,
"ratio": 3.8558208476884865,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 2428
} |
"""A module which implements the time-frequency estimation.
Morlet code inspired by Matlab code from Sheraz Khan & Brainstorm & SPM
"""
# Authors : Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Hari Bharadwaj <hari@nmr.mgh.harvard.edu>
# Clement Moutard <clement.moutard@polytechnique.org>
# Jean-Remi King <jeanremi.king@gmail.com>
#
# License : BSD (3-clause)
from copy import deepcopy
from functools import partial
from math import sqrt
from warnings import warn
import numpy as np
from scipy import linalg
from scipy.fftpack import fft, ifft
from ..baseline import rescale
from ..parallel import parallel_func
from ..utils import logger, verbose, _time_mask, check_fname, sizeof_fmt
from ..channels.channels import ContainsMixin, UpdateChannelsMixin
from ..channels.layout import _pair_grad_sensors
from ..io.pick import (pick_info, _pick_data_channels,
channel_type, _pick_inst, _get_channel_types)
from ..io.meas_info import Info
from ..utils import SizeMixin, _is_numeric
from .multitaper import dpss_windows
from ..viz.utils import (figure_nobar, plt_show, _setup_cmap,
_connection_line, _prepare_joint_axes,
_setup_vmin_vmax, _set_title_multiple_electrodes)
from ..externals.h5io import write_hdf5, read_hdf5
from ..externals.six import string_types
# Make wavelet
def morlet(sfreq, freqs, n_cycles=7.0, sigma=None, zero_mean=False):
"""Compute Morlet wavelets for the given frequency range.
Parameters
----------
sfreq : float
The sampling Frequency.
freqs : array
frequency range of interest (1 x Frequencies)
n_cycles: float | array of float, defaults to 7.0
Number of cycles. Fixed number or one per frequency.
sigma : float, defaults to None
It controls the width of the wavelet ie its temporal
resolution. If sigma is None the temporal resolution
is adapted with the frequency like for all wavelet transform.
The higher the frequency the shorter is the wavelet.
If sigma is fixed the temporal resolution is fixed
like for the short time Fourier transform and the number
of oscillations increases with the frequency.
zero_mean : bool, defaults to False
Make sure the wavelet has a mean of zero.
Returns
-------
Ws : list of array
The wavelets time series.
"""
Ws = list()
n_cycles = np.atleast_1d(n_cycles)
if (n_cycles.size != 1) and (n_cycles.size != len(freqs)):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
# fixed or scale-dependent window
if sigma is None:
sigma_t = this_n_cycles / (2.0 * np.pi * f)
else:
sigma_t = this_n_cycles / (2.0 * np.pi * sigma)
# this scaling factor is proportional to (Tallon-Baudry 98):
# (sigma_t*sqrt(pi))^(-1/2);
t = np.arange(0., 5. * sigma_t, 1.0 / sfreq)
t = np.r_[-t[::-1], t[1:]]
oscillation = np.exp(2.0 * 1j * np.pi * f * t)
gaussian_enveloppe = np.exp(-t ** 2 / (2.0 * sigma_t ** 2))
if zero_mean: # to make it zero mean
real_offset = np.exp(- 2 * (np.pi * f * sigma_t) ** 2)
oscillation -= real_offset
W = oscillation * gaussian_enveloppe
W /= sqrt(0.5) * linalg.norm(W.ravel())
Ws.append(W)
return Ws
def _make_dpss(sfreq, freqs, n_cycles=7., time_bandwidth=4.0, zero_mean=False):
"""Compute DPSS tapers for the given frequency range.
Parameters
----------
sfreq : float
The sampling frequency.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,), defaults to 7.
The number of cycles globally or for each frequency.
time_bandwidth : float, defaults to 4.0
Time x Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1).
Default is 4.0, giving 3 good tapers.
zero_mean : bool | None, , defaults to False
Make sure the wavelet has a mean of zero.
Returns
-------
Ws : list of array
The wavelets time series.
"""
Ws = list()
if time_bandwidth < 2.0:
raise ValueError("time_bandwidth should be >= 2.0 for good tapers")
n_taps = int(np.floor(time_bandwidth - 1))
n_cycles = np.atleast_1d(n_cycles)
if n_cycles.size != 1 and n_cycles.size != len(freqs):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for m in range(n_taps):
Wm = list()
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
t_win = this_n_cycles / float(f)
t = np.arange(0., t_win, 1.0 / sfreq)
# Making sure wavelets are centered before tapering
oscillation = np.exp(2.0 * 1j * np.pi * f * (t - t_win / 2.))
# Get dpss tapers
tapers, conc = dpss_windows(t.shape[0], time_bandwidth / 2.,
n_taps)
Wk = oscillation * tapers[m]
if zero_mean: # to make it zero mean
real_offset = Wk.mean()
Wk -= real_offset
Wk /= sqrt(0.5) * linalg.norm(Wk.ravel())
Wm.append(Wk)
Ws.append(Wm)
return Ws
# Low level convolution
def _cwt(X, Ws, mode="same", decim=1, use_fft=True):
"""Compute cwt with fft based convolutions or temporal convolutions.
Parameters
----------
X : array of shape (n_signals, n_times)
The data.
Ws : list of array
Wavelets time series.
mode : {'full', 'valid', 'same'}
See numpy.convolve.
decim : int | slice, defaults to 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note:: Decimation may create aliasing artifacts.
use_fft : bool, defaults to True
Use the FFT for convolutions or not.
Returns
-------
out : array, shape (n_signals, n_freqs, n_time_decim)
The time-frequency transform of the signals.
"""
if mode not in ['same', 'valid', 'full']:
raise ValueError("`mode` must be 'same', 'valid' or 'full', "
"got %s instead." % mode)
decim = _check_decim(decim)
X = np.asarray(X)
# Precompute wavelets for given frequency range to save time
n_signals, n_times = X.shape
n_times_out = X[:, decim].shape[1]
n_freqs = len(Ws)
Ws_max_size = max(W.size for W in Ws)
size = n_times + Ws_max_size - 1
# Always use 2**n-sized FFT
fsize = 2 ** int(np.ceil(np.log2(size)))
# precompute FFTs of Ws
if use_fft:
fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128)
warn_me = True
for i, W in enumerate(Ws):
if use_fft:
fft_Ws[i] = fft(W, fsize)
if len(W) > n_times and warn_me:
msg = ('At least one of the wavelets is longer than the signal. '
'Consider padding the signal or using shorter wavelets.')
if use_fft:
warn(msg)
warn_me = False # Suppress further warnings
else:
raise ValueError(msg)
# Make generator looping across signals
tfr = np.zeros((n_freqs, n_times_out), dtype=np.complex128)
for x in X:
if use_fft:
fft_x = fft(x, fsize)
# Loop across wavelets
for ii, W in enumerate(Ws):
if use_fft:
ret = ifft(fft_x * fft_Ws[ii])[:n_times + W.size - 1]
else:
ret = np.convolve(x, W, mode=mode)
# Center and decimate decomposition
if mode == 'valid':
sz = int(abs(W.size - n_times)) + 1
offset = (n_times - sz) // 2
this_slice = slice(offset // decim.step,
(offset + sz) // decim.step)
if use_fft:
ret = _centered(ret, sz)
tfr[ii, this_slice] = ret[decim]
elif mode == 'full' and not use_fft:
start = (W.size - 1) // 2
end = len(ret) - (W.size // 2)
ret = ret[start:end]
tfr[ii, :] = ret[decim]
else:
if use_fft:
ret = _centered(ret, n_times)
tfr[ii, :] = ret[decim]
yield tfr
# Loop of convolution: single trial
def _compute_tfr(epoch_data, freqs, sfreq=1.0, method='morlet',
n_cycles=7.0, zero_mean=None, time_bandwidth=None,
use_fft=True, decim=1, output='complex', n_jobs=1,
verbose=None):
"""Compute time-frequency transforms.
Parameters
----------
epoch_data : array of shape (n_epochs, n_channels, n_times)
The epochs.
freqs : array-like of floats, shape (n_freqs)
The frequencies.
sfreq : float | int, defaults to 1.0
Sampling frequency of the data.
method : 'multitaper' | 'morlet', defaults to 'morlet'
The time-frequency method. 'morlet' convolves a Morlet wavelet.
'multitaper' uses Morlet wavelets windowed with multiple DPSS
multitapers.
n_cycles : float | array of float, defaults to 7.0
Number of cycles in the Morlet wavelet. Fixed number
or one per frequency.
zero_mean : bool | None, defaults to None
None means True for method='multitaper' and False for method='morlet'.
If True, make sure the wavelets have a mean of zero.
time_bandwidth : float, defaults to None
If None and method=multitaper, will be set to 4.0 (3 tapers).
Time x (Full) Bandwidth product. Only applies if
method == 'multitaper'. The number of good tapers (low-bias) is
chosen automatically based on this to equal floor(time_bandwidth - 1).
use_fft : bool, defaults to True
Use the FFT for convolutions or not.
decim : int | slice, defaults to 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note::
Decimation may create aliasing artifacts, yet decimation
is done after the convolutions.
output : str, defaults to 'complex'
* 'complex' : single trial complex.
* 'power' : single trial power.
* 'phase' : single trial phase.
* 'avg_power' : average of single trial power.
* 'itc' : inter-trial coherence.
* 'avg_power_itc' : average of single trial power and inter-trial
coherence across trials.
n_jobs : int, defaults to 1
The number of epochs to process at the same time. The parallelization
is implemented across channels.
verbose : bool, str, int, or None, defaults to None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
out : array
Time frequency transform of epoch_data. If output is in ['complex',
'phase', 'power'], then shape of out is (n_epochs, n_chans, n_freqs,
n_times), else it is (n_chans, n_freqs, n_times). If output is
'avg_power_itc', the real values code for 'avg_power' and the
imaginary values code for the 'itc': out = avg_power + i * itc
"""
# Check data
epoch_data = np.asarray(epoch_data)
if epoch_data.ndim != 3:
raise ValueError('epoch_data must be of shape '
'(n_epochs, n_chans, n_times)')
# Check params
freqs, sfreq, zero_mean, n_cycles, time_bandwidth, decim = \
_check_tfr_param(freqs, sfreq, method, zero_mean, n_cycles,
time_bandwidth, use_fft, decim, output)
# Setup wavelet
if method == 'morlet':
W = morlet(sfreq, freqs, n_cycles=n_cycles, zero_mean=zero_mean)
Ws = [W] # to have same dimensionality as the 'multitaper' case
elif method == 'multitaper':
Ws = _make_dpss(sfreq, freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, zero_mean=zero_mean)
# Check wavelets
if len(Ws[0][0]) > epoch_data.shape[2]:
raise ValueError('At least one of the wavelets is longer than the '
'signal. Use a longer signal or shorter wavelets.')
# Initialize output
decim = _check_decim(decim)
n_freqs = len(freqs)
n_epochs, n_chans, n_times = epoch_data[:, :, decim].shape
if output in ('power', 'phase', 'avg_power', 'itc'):
dtype = np.float
elif output in ('complex', 'avg_power_itc'):
# avg_power_itc is stored as power + 1i * itc to keep a
# simple dimensionality
dtype = np.complex
if ('avg_' in output) or ('itc' in output):
out = np.empty((n_chans, n_freqs, n_times), dtype)
else:
out = np.empty((n_chans, n_epochs, n_freqs, n_times), dtype)
# Parallel computation
parallel, my_cwt, _ = parallel_func(_time_frequency_loop, n_jobs)
# Parallelization is applied across channels.
tfrs = parallel(
my_cwt(channel, Ws, output, use_fft, 'same', decim)
for channel in epoch_data.transpose(1, 0, 2))
# FIXME: to avoid overheads we should use np.array_split()
for channel_idx, tfr in enumerate(tfrs):
out[channel_idx] = tfr
if ('avg_' not in output) and ('itc' not in output):
# This is to enforce that the first dimension is for epochs
out = out.transpose(1, 0, 2, 3)
return out
def _check_tfr_param(freqs, sfreq, method, zero_mean, n_cycles,
time_bandwidth, use_fft, decim, output):
"""Aux. function to _compute_tfr to check the params validity."""
# Check freqs
if not isinstance(freqs, (list, np.ndarray)):
raise ValueError('freqs must be an array-like, got %s '
'instead.' % type(freqs))
freqs = np.asarray(freqs, dtype=float)
if freqs.ndim != 1:
raise ValueError('freqs must be of shape (n_freqs,), got %s '
'instead.' % np.array(freqs.shape))
# Check sfreq
if not isinstance(sfreq, (float, int)):
raise ValueError('sfreq must be a float or an int, got %s '
'instead.' % type(sfreq))
sfreq = float(sfreq)
# Default zero_mean = True if multitaper else False
zero_mean = method == 'multitaper' if zero_mean is None else zero_mean
if not isinstance(zero_mean, bool):
raise ValueError('zero_mean should be of type bool, got %s. instead'
% type(zero_mean))
freqs = np.asarray(freqs)
if (method == 'multitaper') and (output == 'phase'):
raise NotImplementedError(
'This function is not optimized to compute the phase using the '
'multitaper method. Use np.angle of the complex output instead.')
# Check n_cycles
if isinstance(n_cycles, (int, float)):
n_cycles = float(n_cycles)
elif isinstance(n_cycles, (list, np.ndarray)):
n_cycles = np.array(n_cycles)
if len(n_cycles) != len(freqs):
raise ValueError('n_cycles must be a float or an array of length '
'%i frequencies, got %i cycles instead.' %
(len(freqs), len(n_cycles)))
else:
raise ValueError('n_cycles must be a float or an array, got %s '
'instead.' % type(n_cycles))
# Check time_bandwidth
if (method == 'morlet') and (time_bandwidth is not None):
raise ValueError('time_bandwidth only applies to "multitaper" method.')
elif method == 'multitaper':
time_bandwidth = (4.0 if time_bandwidth is None
else float(time_bandwidth))
# Check use_fft
if not isinstance(use_fft, bool):
raise ValueError('use_fft must be a boolean, got %s '
'instead.' % type(use_fft))
# Check decim
if isinstance(decim, int):
decim = slice(None, None, decim)
if not isinstance(decim, slice):
raise ValueError('decim must be an integer or a slice, '
'got %s instead.' % type(decim))
# Check output
allowed_ouput = ('complex', 'power', 'phase',
'avg_power_itc', 'avg_power', 'itc')
if output not in allowed_ouput:
raise ValueError("Unknown output type. Allowed are %s but "
"got %s." % (allowed_ouput, output))
if method not in ('multitaper', 'morlet'):
raise ValueError('method must be "morlet" or "multitaper", got %s '
'instead.' % type(method))
return freqs, sfreq, zero_mean, n_cycles, time_bandwidth, decim
def _time_frequency_loop(X, Ws, output, use_fft, mode, decim):
"""Aux. function to _compute_tfr.
Loops time-frequency transform across wavelets and epochs.
Parameters
----------
X : array, shape (n_epochs, n_times)
The epochs data of a single channel.
Ws : list, shape (n_tapers, n_wavelets, n_times)
The wavelets.
output : str
* 'complex' : single trial complex.
* 'power' : single trial power.
* 'phase' : single trial phase.
* 'avg_power' : average of single trial power.
* 'itc' : inter-trial coherence.
* 'avg_power_itc' : average of single trial power and inter-trial
coherence across trials.
use_fft : bool
Use the FFT for convolutions or not.
mode : {'full', 'valid', 'same'}
See numpy.convolve.
decim : slice
The decimation slice: e.g. power[:, decim]
"""
# Set output type
dtype = np.float
if output in ['complex', 'avg_power_itc']:
dtype = np.complex
# Init outputs
decim = _check_decim(decim)
n_epochs, n_times = X[:, decim].shape
n_freqs = len(Ws[0])
if ('avg_' in output) or ('itc' in output):
tfrs = np.zeros((n_freqs, n_times), dtype=dtype)
else:
tfrs = np.zeros((n_epochs, n_freqs, n_times), dtype=dtype)
# Loops across tapers.
for W in Ws:
coefs = _cwt(X, W, mode, decim=decim, use_fft=use_fft)
# Inter-trial phase locking is apparently computed per taper...
if 'itc' in output:
plf = np.zeros((n_freqs, n_times), dtype=np.complex)
# Loop across epochs
for epoch_idx, tfr in enumerate(coefs):
# Transform complex values
if output in ['power', 'avg_power']:
tfr = (tfr * tfr.conj()).real # power
elif output == 'phase':
tfr = np.angle(tfr)
elif output == 'avg_power_itc':
tfr_abs = np.abs(tfr)
plf += tfr / tfr_abs # phase
tfr = tfr_abs ** 2 # power
elif output == 'itc':
plf += tfr / np.abs(tfr) # phase
continue # not need to stack anything else than plf
# Stack or add
if ('avg_' in output) or ('itc' in output):
tfrs += tfr
else:
tfrs[epoch_idx] += tfr
# Compute inter trial coherence
if output == 'avg_power_itc':
tfrs += 1j * np.abs(plf)
elif output == 'itc':
tfrs += np.abs(plf)
# Normalization of average metrics
if ('avg_' in output) or ('itc' in output):
tfrs /= n_epochs
# Normalization by number of taper
tfrs /= len(Ws)
return tfrs
def cwt(X, Ws, use_fft=True, mode='same', decim=1):
"""Compute time freq decomposition with continuous wavelet transform.
Parameters
----------
X : array, shape (n_signals, n_times)
The signals.
Ws : list of array
Wavelets time series.
use_fft : bool
Use FFT for convolutions. Defaults to True.
mode : 'same' | 'valid' | 'full'
Convention for convolution. 'full' is currently not implemented with
`use_fft=False`. Defaults to 'same'.
decim : int | slice
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note:: Decimation may create aliasing artifacts.
Defaults to 1.
Returns
-------
tfr : array, shape (n_signals, n_freqs, n_times)
The time-frequency decompositions.
See Also
--------
mne.time_frequency.tfr_morlet : Compute time-frequency decomposition
with Morlet wavelets
"""
decim = _check_decim(decim)
n_signals, n_times = X[:, decim].shape
coefs = _cwt(X, Ws, mode, decim=decim, use_fft=use_fft)
tfrs = np.empty((n_signals, len(Ws), n_times), dtype=np.complex)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr
return tfrs
def _tfr_aux(method, inst, freqs, decim, return_itc, picks, average,
output=None, **tfr_params):
"""Help reduce redundancy between tfr_morlet and tfr_multitaper."""
decim = _check_decim(decim)
data = _get_data(inst, return_itc)
info = inst.info
info, data, picks = _prepare_picks(info, data, picks)
data = data[:, picks, :]
if average:
if output == 'complex':
raise ValueError('output must be "power" if average=True')
if return_itc:
output = 'avg_power_itc'
else:
output = 'avg_power'
else:
output = 'power' if output is None else output
if return_itc:
raise ValueError('Inter-trial coherence is not supported'
' with average=False')
out = _compute_tfr(data, freqs, info['sfreq'], method=method,
output=output, decim=decim, **tfr_params)
times = inst.times[decim].copy()
if average:
if return_itc:
power, itc = out.real, out.imag
else:
power = out
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave,
method='%s-power' % method)
if return_itc:
out = (out, AverageTFR(info, itc, times, freqs, nave,
method='%s-itc' % method))
else:
power = out
out = EpochsTFR(info, power, times, freqs, method='%s-power' % method)
return out
@verbose
def tfr_morlet(inst, freqs, n_cycles, use_fft=False, return_itc=True, decim=1,
n_jobs=1, picks=None, zero_mean=True, average=True,
output='power', verbose=None):
"""Compute Time-Frequency Representation (TFR) using Morlet wavelets.
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
use_fft : bool, defaults to False
The fft based convolution or not.
return_itc : bool, defaults to True
Return inter-trial coherence (ITC) as well as averaged power.
Must be ``False`` for evoked data.
decim : int | slice, defaults to 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note:: Decimation may create aliasing artifacts.
n_jobs : int, defaults to 1
The number of jobs to run in parallel.
picks : array-like of int | None, defaults to None
The indices of the channels to decompose. If None, all available
good data channels are decomposed.
zero_mean : bool, defaults to True
Make sure the wavelet has a mean of zero.
.. versionadded:: 0.13.0
average : bool, defaults to True
If True average across Epochs.
.. versionadded:: 0.13.0
output : str
Can be "power" (default) or "complex". If "complex", then
average must be False.
.. versionadded:: 0.15.0
verbose : bool, str, int, or None, defaults to None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
power : AverageTFR | EpochsTFR
The averaged or single-trial power.
itc : AverageTFR | EpochsTFR
The inter-trial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
mne.time_frequency.tfr_array_morlet
mne.time_frequency.tfr_multitaper
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_stockwell
mne.time_frequency.tfr_array_stockwell
"""
tfr_params = dict(n_cycles=n_cycles, n_jobs=n_jobs, use_fft=use_fft,
zero_mean=zero_mean, output=output)
return _tfr_aux('morlet', inst, freqs, decim, return_itc, picks,
average, **tfr_params)
@verbose
def tfr_array_morlet(epoch_data, sfreq, freqs, n_cycles=7.0,
zero_mean=False, use_fft=True, decim=1, output='complex',
n_jobs=1, verbose=None):
"""Compute time-frequency transform using Morlet wavelets.
Convolves epoch data with selected Morlet wavelets.
Parameters
----------
epoch_data : array of shape (n_epochs, n_channels, n_times)
The epochs.
sfreq : float | int
Sampling frequency of the data.
freqs : array-like of floats, shape (n_freqs)
The frequencies.
n_cycles : float | array of float, defaults to 7.0
Number of cycles in the Morlet wavelet. Fixed number or one per
frequency.
zero_mean : bool | False
If True, make sure the wavelets have a mean of zero. Defaults to False.
use_fft : bool
Use the FFT for convolutions or not. Defaults to True.
decim : int | slice
To reduce memory usage, decimation factor after time-frequency
decomposition. Defaults to 1
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note::
Decimation may create aliasing artifacts, yet decimation
is done after the convolutions.
output : str, defaults to 'complex'
* 'complex' : single trial complex.
* 'power' : single trial power.
* 'phase' : single trial phase.
* 'avg_power' : average of single trial power.
* 'itc' : inter-trial coherence.
* 'avg_power_itc' : average of single trial power and inter-trial
coherence across trials.
n_jobs : int
The number of epochs to process at the same time. The parallelization
is implemented across channels. Defaults to 1
verbose : bool, str, int, or None, defaults to None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
out : array
Time frequency transform of epoch_data. If output is in ['complex',
'phase', 'power'], then shape of out is (n_epochs, n_chans, n_freqs,
n_times), else it is (n_chans, n_freqs, n_times). If output is
'avg_power_itc', the real values code for 'avg_power' and the
imaginary values code for the 'itc': out = avg_power + i * itc
See Also
--------
mne.time_frequency.tfr_morlet
mne.time_frequency.tfr_multitaper
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_stockwell
mne.time_frequency.tfr_array_stockwell
Notes
-----
.. versionadded:: 0.14.0
"""
return _compute_tfr(epoch_data=epoch_data, freqs=freqs,
sfreq=sfreq, method='morlet', n_cycles=n_cycles,
zero_mean=zero_mean, time_bandwidth=None,
use_fft=use_fft, decim=decim, output=output,
n_jobs=n_jobs, verbose=verbose)
@verbose
def tfr_multitaper(inst, freqs, n_cycles, time_bandwidth=4.0,
use_fft=True, return_itc=True, decim=1,
n_jobs=1, picks=None, average=True, verbose=None):
"""Compute Time-Frequency Representation (TFR) using DPSS tapers.
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
The time-window length is thus T = n_cycles / freq.
time_bandwidth : float, (optional), defaults to 4.0 (3 good tapers).
Time x (Full) Bandwidth product. Should be >= 2.0.
Choose this along with n_cycles to get desired frequency resolution.
The number of good tapers (least leakage from far away frequencies)
is chosen automatically based on this to floor(time_bandwidth - 1).
E.g., With freq = 20 Hz and n_cycles = 10, we get time = 0.5 s.
If time_bandwidth = 4., then frequency smoothing is (4 / time) = 8 Hz.
use_fft : bool, defaults to True
The fft based convolution or not.
return_itc : bool, defaults to True
Return inter-trial coherence (ITC) as well as averaged (or
single-trial) power.
decim : int | slice, defaults to 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note:: Decimation may create aliasing artifacts.
n_jobs : int, defaults to 1
The number of jobs to run in parallel.
picks : array-like of int | None, defaults to None
The indices of the channels to decompose. If None, all available
good data channels are decomposed.
average : bool, defaults to True
If True average across Epochs.
.. versionadded:: 0.13.0
verbose : bool, str, int, or None, defaults to None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
power : AverageTFR | EpochsTFR
The averaged or single-trial power.
itc : AverageTFR | EpochsTFR
The inter-trial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_stockwell
mne.time_frequency.tfr_array_stockwell
mne.time_frequency.tfr_morlet
mne.time_frequency.tfr_array_morlet
Notes
-----
.. versionadded:: 0.9.0
"""
tfr_params = dict(n_cycles=n_cycles, n_jobs=n_jobs, use_fft=use_fft,
zero_mean=True, time_bandwidth=time_bandwidth)
return _tfr_aux('multitaper', inst, freqs, decim, return_itc, picks,
average, **tfr_params)
# TFR(s) class
class _BaseTFR(ContainsMixin, UpdateChannelsMixin, SizeMixin):
"""Base TFR class."""
@property
def data(self):
return self._data
@data.setter
def data(self, data):
self._data = data
@property
def ch_names(self):
"""Channel names."""
return self.info['ch_names']
def crop(self, tmin=None, tmax=None):
"""Crop data to a given time interval in place.
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
Returns
-------
inst : instance of AverageTFR
The modified instance.
"""
mask = _time_mask(self.times, tmin, tmax, sfreq=self.info['sfreq'])
self.times = self.times[mask]
self.data = self.data[..., mask]
return self
def copy(self):
"""Return a copy of the instance."""
return deepcopy(self)
@verbose
def apply_baseline(self, baseline, mode='mean', verbose=None):
"""Baseline correct the data.
Parameters
----------
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'mean' | 'ratio' | 'logratio' | 'percent' | 'zscore' | 'zlogratio'
Perform baseline correction by
- subtracting the mean of baseline values ('mean')
- dividing by the mean of baseline values ('ratio')
- dividing by the mean of baseline values and taking the log
('logratio')
- subtracting the mean of baseline values followed by dividing by
the mean of baseline values ('percent')
- subtracting the mean of baseline values and dividing by the
standard deviation of baseline values ('zscore')
- dividing by the mean of baseline values, taking the log, and
dividing by the standard deviation of log baseline values
('zlogratio')
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose`).
Returns
-------
inst : instance of AverageTFR
The modified instance.
""" # noqa: E501
rescale(self.data, self.times, baseline, mode, copy=False)
return self
def save(self, fname, overwrite=False):
"""Save TFR object to hdf5 file.
Parameters
----------
fname : str
The file name, which should end with -tfr.h5 .
overwrite : bool
If True, overwrite file (if it exists). Defaults to false
"""
write_tfrs(fname, self, overwrite=overwrite)
class AverageTFR(_BaseTFR):
"""Container for Time-Frequency data.
Can for example store induced power at sensor level or inter-trial
coherence.
Parameters
----------
info : Info
The measurement info.
data : ndarray, shape (n_channels, n_freqs, n_times)
The data.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
nave : int
The number of averaged TFRs.
comment : str | None, defaults to None
Comment on the data, e.g., the experimental condition.
method : str | None, defaults to None
Comment on the method used to compute the data, e.g., morlet wavelet.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Attributes
----------
info : instance of Info
Measurement info.
ch_names : list
The names of the channels.
nave : int
Number of averaged epochs.
data : ndarray, shape (n_channels, n_freqs, n_times)
The data array.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
comment : string
Comment on dataset. Can be the condition.
method : str | None, defaults to None
Comment on the method used to compute the data, e.g., morlet wavelet.
"""
@verbose
def __init__(self, info, data, times, freqs, nave, comment=None,
method=None, verbose=None): # noqa: D102
self.info = info
if data.ndim != 3:
raise ValueError('data should be 3d. Got %d.' % data.ndim)
n_channels, n_freqs, n_times = data.shape
if n_channels != len(info['chs']):
raise ValueError("Number of channels and data size don't match"
" (%d != %d)." % (n_channels, len(info['chs'])))
if n_freqs != len(freqs):
raise ValueError("Number of frequencies and data size don't match"
" (%d != %d)." % (n_freqs, len(freqs)))
if n_times != len(times):
raise ValueError("Number of times and data size don't match"
" (%d != %d)." % (n_times, len(times)))
self.data = data
self.times = np.array(times, dtype=float)
self.freqs = np.array(freqs, dtype=float)
self.nave = nave
self.comment = comment
self.method = method
self.preload = True
@verbose
def plot(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
cmap='RdBu_r', dB=False, colorbar=True, show=True, title=None,
axes=None, layout=None, yscale='auto', mask=None,
mask_style=None, mask_cmap="Greys", mask_alpha=0.1, combine=None,
exclude=[], verbose=None):
"""Plot TFRs as a two-dimensional image(s).
Parameters
----------
picks : None | array-like of int
The indices of the channels to plot, one figure per channel. If
None, plot the across-channel average.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'mean' | 'ratio' | 'logratio' | 'percent' | 'zscore' | 'zlogratio'
Perform baseline correction by
- subtracting the mean of baseline values ('mean')
- dividing by the mean of baseline values ('ratio')
- dividing by the mean of baseline values and taking the log
('logratio')
- subtracting the mean of baseline values followed by dividing by
the mean of baseline values ('percent')
- subtracting the mean of baseline values and dividing by the
standard deviation of baseline values ('zscore')
- dividing by the mean of baseline values, taking the log, and
dividing by the standard deviation of log baseline values
('zlogratio')
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The minimum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maximum value an the color scale. If vmax is None, the data
maximum value is used.
cmap : matplotlib colormap | 'interactive' | (colormap, bool)
The colormap to use. If tuple, the first value indicates the
colormap to use and the second value is a boolean defining
interactivity. In interactive mode the colors are adjustable by
clicking and dragging the colorbar with left and right mouse
button. Left mouse button moves the scale up and down and right
mouse button adjusts the range. Hitting space bar resets the range.
Up and down arrows can be used to change the colormap. If
'interactive', translates to ('RdBu_r', True). Defaults to
'RdBu_r'.
.. warning:: Interactive mode works smoothly only for a small
amount of images.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot. For user defined axes,
the colorbar cannot be drawn. Defaults to True.
show : bool
Call pyplot.show() at the end.
title : str | 'auto' | None
String for title. Defaults to None (blank/no title). If 'auto',
automatically create a title that lists up to 6 of the channels
used in the figure.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channels. If instance of Axes,
there must be only one channel plotted.
layout : Layout | None
Layout instance specifying sensor positions. Used for interactive
plotting of topographies on rectangle selection. If possible, the
correct layout is inferred from the data.
yscale : 'auto' (default) | 'linear' | 'log'
The scale of y (frequency) axis. 'linear' gives linear y axis,
'log' leads to log-spaced y axis and 'auto' detects if frequencies
are log-spaced and only then sets the y axis to 'log'.
.. versionadded:: 0.14.0
mask : ndarray | None
An array of booleans of the same shape as the data. Entries of the
data that correspond to False in the mask are plotted
transparently. Useful for, e.g., masking for statistical
significance.
.. versionadded:: 0.16.0
mask_style: None | 'both' | 'contour' | 'mask'
If `mask` is not None: if 'contour', a contour line is drawn around
the masked areas (``True`` in `mask`). If 'mask', entries not
``True`` in `mask` are shown transparently. If 'both', both a contour
and transparency are used.
If ``None``, defaults to 'both' if `mask` is not None, and is ignored
otherwise.
.. versionadded:: 0.17
mask_cmap : matplotlib colormap | (colormap, bool) | 'interactive'
The colormap chosen for masked parts of the image (see below), if
`mask` is not ``None``. If None, `cmap` is reused. Defaults to
``Greys``. Not interactive. Otherwise, as `cmap`.
.. versionadded:: 0.17
mask_alpha : float
A float between 0 and 1. If ``mask`` is not None, this sets the
alpha level (degree of transparency) for the masked-out segments.
I.e., if 0, masked-out segments are not visible at all.
Defaults to 0.1.
.. versionadded:: 0.16.0
combine : 'mean' | 'rms' | None
Type of aggregation to perform across selected channels. If
None, plot one figure per selected channel.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded. Defaults to an empty list.
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose`).
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
""" # noqa: E501
return self._plot(picks=picks, baseline=baseline, mode=mode,
tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
vmin=vmin, vmax=vmax, cmap=cmap, dB=dB,
colorbar=colorbar, show=show, title=title,
axes=axes, layout=layout, yscale=yscale, mask=mask,
mask_style=mask_style, mask_cmap=mask_cmap,
mask_alpha=mask_alpha, combine=combine,
exclude=exclude, verbose=verbose)
@verbose
def _plot(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
cmap='RdBu_r', dB=False, colorbar=True, show=True, title=None,
axes=None, layout=None, yscale='auto', mask=None,
mask_style=None, mask_cmap="Greys", mask_alpha=.25,
combine=None, exclude=None, copy=True,
source_plot_joint=False, topomap_args=dict(), ch_type=None,
verbose=None):
"""Plot TFRs as a two-dimensional image(s).
See self.plot() for parameters description.
"""
import matplotlib.pyplot as plt
from ..viz.topo import _imshow_tfr
# channel selection
# simply create a new tfr object(s) with the desired channel selection
tfr = _preproc_tfr_instance(
self, picks, tmin, tmax, fmin, fmax, vmin, vmax, dB, mode,
baseline, exclude, copy)
data = tfr.data
n_picks = len(tfr.ch_names) if combine is None else 1
if combine == 'mean':
data = data.mean(axis=0, keepdims=True)
elif combine == 'rms':
data = np.sqrt((data ** 2).mean(axis=0, keepdims=True))
elif combine is not None:
raise ValueError('combine must be None, mean or rms.')
if isinstance(axes, list) or isinstance(axes, np.ndarray):
if len(axes) != n_picks:
raise RuntimeError('There must be an axes for each picked '
'channel.')
tmin, tmax = tfr.times[[0, -1]]
if vmax is None:
vmax = np.abs(data).max()
if vmin is None:
vmin = -np.abs(data).max()
if isinstance(axes, plt.Axes):
axes = [axes]
cmap = _setup_cmap(cmap)
for idx in range(len(data)):
if axes is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = axes[idx]
fig = ax.get_figure()
onselect_callback = partial(
tfr._onselect, cmap=cmap, source_plot_joint=source_plot_joint,
topomap_args={k: v for k, v in topomap_args.items()
if k not in {"vmin", "vmax", "cmap", "axes"}})
t_end = _imshow_tfr(
ax, 0, tmin, tmax, vmin, vmax, onselect_callback, ylim=None,
tfr=data[idx: idx + 1], freq=tfr.freqs, x_label='Time (s)',
y_label='Frequency (Hz)', colorbar=colorbar, cmap=cmap,
yscale=yscale, mask=mask, mask_style=mask_style,
mask_cmap=mask_cmap, mask_alpha=mask_alpha)
if title is None:
if combine is None or len(tfr.info['ch_names']) == 1:
title = tfr.info['ch_names'][0] + t_end
else:
title = _set_title_multiple_electrodes(
title, combine, tfr.info["ch_names"], all=True,
ch_type=ch_type) + t_end
if title:
fig.suptitle(title)
plt_show(show)
return fig
@verbose
def plot_joint(self, timefreqs=None, picks=None, baseline=None,
mode='mean', tmin=None, tmax=None, fmin=None, fmax=None,
vmin=None, vmax=None, cmap='RdBu_r', dB=False,
colorbar=True, show=True, title=None, layout=None,
yscale='auto', combine='mean', exclude=[],
topomap_args=None, image_args=None, verbose=None):
"""Plot TFRs as a two-dimensional image with topomaps.
Parameters
----------
timefreqs : None | list of tuples | dict of tuples
The time-frequency point(s) for which topomaps will be plotted.
See Notes.
picks : None | array-like of int
The indices of the channels to plot, one figure per channel. If
None, plot the across-channel aggregation (defaults to "mean").
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None, the beginning of the data is used.
If b is None, then b is set to the end of the interval.
If baseline is equal to (None, None), the entire time
interval is used.
mode : None | str
If str, must be one of 'ratio', 'zscore', 'mean', 'percent',
'logratio' and 'zlogratio'.
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)),
mean simply subtracts the mean power, percent is the same as
applying ratio then mean, logratio is the same as mean but then
rendered in log-scale, zlogratio is the same as zscore but data
is rendered in log-scale first.
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The minimum value of the color scale for the image (for
topomaps, see `topomap_args`). If vmin is None, the data
absolute minimum value is used.
vmax : float | None
The maximum value of the color scale for the image (for
topomaps, see `topomap_args`). If vmax is None, the data
absolute maximum value is used.
cmap : matplotlib colormap
The colormap to use.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot (relating to the
topomaps). For user defined axes, the colorbar cannot be drawn.
Defaults to True.
show : bool
Call pyplot.show() at the end.
title : str | None
String for title. Defaults to None (blank/no title).
layout : Layout | None
Layout instance specifying sensor positions. Used for interactive
plotting of topographies on rectangle selection. If possible, the
correct layout is inferred from the data.
yscale : 'auto' (default) | 'linear' | 'log'
The scale of y (frequency) axis. 'linear' gives linear y axis,
'log' leads to log-spaced y axis and 'auto' detects if frequencies
are log-spaced and only then sets the y axis to 'log'.
combine : 'mean' | 'rms'
Type of aggregation to perform across selected channels.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded. Defaults to an empty list, i.e., `[]`.
topomap_args : None | dict
A dict of `kwargs` that are forwarded to
:func:`mne.viz.plot_topomap` to style the topomaps. `axes` and
`show` are ignored. If `times` is not in this dict, automatic
peak detection is used. Beyond that, if ``None``, no customizable
arguments will be passed.
Defaults to ``None``.
image_args : None | dict
A dict of `kwargs` that are forwarded to :meth:`AverageTFR.plot`
to style the image. `axes` and `show` are ignored. Beyond that,
if ``None``, no customizable arguments will be passed.
Defaults to ``None``.
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose`).
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
Notes
-----
`timefreqs` has three different modes: tuples, dicts, and auto.
For (list of) tuple(s) mode, each tuple defines a pair
(time, frequency) in s and Hz on the TFR plot. For example, to
look at 10 Hz activity 1 second into the epoch and 3 Hz activity
300 msec into the epoch,::
timefreqs=((1, 10), (.3, 3))
If provided as a dictionary, (time, frequency) tuples are keys and
(time_window, frequency_window) tuples are the values - indicating the
width of the windows (centered on the time and frequency indicated by
the key) to be averaged over. For example,::
timefreqs={(1, 10): (0.1, 2)}
would translate into a window that spans 0.95 to 1.05 seconds, as
well as 9 to 11 Hz. If None, a single topomap will be plotted at the
absolute peak across the time-frequency representation.
.. versionadded:: 0.16.0
""" # noqa: E501
from ..viz.topomap import _set_contour_locator
from ..channels.layout import (find_layout, _merge_grad_data,
_pair_grad_sensors)
import matplotlib.pyplot as plt
#####################################
# Handle channels (picks and types) #
#####################################
# it would be nicer to let this happen in self._plot,
# but we need it here to do the loop over the remaining channel
# types in case a user supplies `picks` that pre-select only one
# channel type.
# Nonetheless, it should be refactored for code reuse.
copy = any(var is not None for var in (exclude, picks, baseline))
tfr = _pick_inst(self, picks, exclude, copy=copy)
ch_types = _get_channel_types(tfr.info)
# if multiple sensor types: one plot per channel type, recursive call
if len(ch_types) > 1:
logger.info("Multiple channel types selected, returning one "
"figure per type.")
figs = list()
for this_type in ch_types: # pick corresponding channel type
type_picks = [idx for idx in range(tfr.info['nchan'])
if channel_type(tfr.info, idx) == this_type]
tf_ = _pick_inst(tfr, type_picks, None, copy=True)
if len(_get_channel_types(tf_.info)) > 1:
raise RuntimeError(
'Possibly infinite loop due to channel selection '
'problem. This should never happen! Please check '
'your channel types.')
figs.append(
tf_.plot_joint(
timefreqs=timefreqs, picks=None, baseline=baseline,
mode=mode, tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
vmin=vmin, vmax=vmax, cmap=cmap, dB=dB,
colorbar=colorbar, show=False, title=title,
layout=layout, yscale=yscale, combine=combine,
exclude=None, topomap_args=topomap_args,
verbose=verbose))
return figs
else:
ch_type = ch_types.pop()
# Handle timefreqs
timefreqs = _get_timefreqs(tfr, timefreqs)
n_timefreqs = len(timefreqs)
if topomap_args is None:
topomap_args = dict()
topomap_args_pass = {k: v for k, v in topomap_args.items() if
k not in ('axes', 'show', 'colorbar')}
topomap_args_pass['outlines'] = topomap_args.get('outlines', 'skirt')
topomap_args_pass["contours"] = topomap_args.get('contours', 6)
##############
# Image plot #
##############
fig, tf_ax, map_ax, cbar_ax = _prepare_joint_axes(n_timefreqs)
cmap = _setup_cmap(cmap)
# image plot
# we also use this to baseline and truncate (times and freqs)
# (a copy of) the instance
if image_args is None:
image_args = dict()
fig = tfr._plot(
picks=None, baseline=baseline, mode=mode, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax, vmin=vmin, vmax=vmax, cmap=cmap, dB=dB,
colorbar=False, show=False, title=title, axes=tf_ax, layout=layout,
yscale=yscale, combine=combine, exclude=None, copy=False,
source_plot_joint=True, topomap_args=topomap_args_pass,
ch_type=ch_type, **image_args)
# set and check time and freq limits ...
# can only do this after the tfr plot because it may change these
# parameters
tmax, tmin = tfr.times.max(), tfr.times.min()
fmax, fmin = tfr.freqs.max(), tfr.freqs.min()
for time, freq in timefreqs.keys():
if not (tmin <= time <= tmax):
error_value = "time point (" + str(time) + " s)"
elif not (fmin <= freq <= fmax):
error_value = "frequency (" + str(freq) + " Hz)"
else:
continue
raise ValueError("Requested " + error_value + " exceeds the range"
"of the data. Choose different `timefreqs`.")
############
# Topomaps #
############
from ..viz import plot_topomap
titles, all_data, all_pos, vlims = [], [], [], []
# the structure here is a bit complicated to allow aggregating vlims
# over all topomaps. First, one loop over all timefreqs to collect
# vlims. Then, find the max vlims and in a second loop over timefreqs,
# do the actual plotting.
timefreqs_array = np.array([np.array(keys) for keys in timefreqs])
order = timefreqs_array[:, 0].argsort() # sort by time
for ii, (time, freq) in enumerate(timefreqs_array[order]):
avg = timefreqs[(time, freq)]
# set up symmetric windows
time_half_range, freq_half_range = avg / 2.
if time_half_range == 0:
time = tfr.times[np.argmin(np.abs(tfr.times - time))]
if freq_half_range == 0:
freq = tfr.freqs[np.argmin(np.abs(tfr.freqs - freq))]
if (time_half_range == 0) and (freq_half_range == 0):
sub_map_title = '(%.2f s,\n%.1f Hz)' % (time, freq)
else:
sub_map_title = \
'(%.1f \u00B1 %.1f s,\n%.1f \u00B1 %.1f Hz)' % \
(time, time_half_range, freq, freq_half_range)
tmin = time - time_half_range
tmax = time + time_half_range
fmin = freq - freq_half_range
fmax = freq + freq_half_range
data = tfr.data
pos = find_layout(tfr.info).pos if layout is None else layout.pos
# merging grads here before rescaling makes ERDs visible
if ch_type == 'grad':
picks, new_pos = _pair_grad_sensors(tfr.info,
find_layout(tfr.info))
if layout is None:
pos = new_pos
method = combine or 'rms'
data = _merge_grad_data(data[picks], method=method)
all_pos.append(pos)
data, times, freqs, _, _ = _preproc_tfr(
data, tfr.times, tfr.freqs, tmin, tmax, fmin, fmax,
mode, baseline, vmin, vmax, None, tfr.info['sfreq'])
vlims.append(np.abs(data).max())
titles.append(sub_map_title)
all_data.append(data)
new_t = tfr.times[np.abs(tfr.times - np.median([times])).argmin()]
new_f = tfr.freqs[np.abs(tfr.freqs - np.median([freqs])).argmin()]
timefreqs_array[ii] = (new_t, new_f)
# passing args to the topomap calls
max_lim = max(vlims)
topomap_args_pass["vmin"] = vmin = topomap_args.get('vmin', -max_lim)
topomap_args_pass["vmax"] = vmax = topomap_args.get('vmax', max_lim)
locator, contours = _set_contour_locator(
vmin, vmax, topomap_args_pass["contours"])
topomap_args_pass['contours'] = contours
for ax, title, data, pos in zip(map_ax, titles, all_data, all_pos):
ax.set_title(title)
plot_topomap(data.mean(axis=(-1, -2)), pos,
cmap=cmap[0], axes=ax, show=False,
**topomap_args_pass)
#############
# Finish up #
#############
if colorbar:
from matplotlib import ticker
cbar = plt.colorbar(ax.images[0], cax=cbar_ax)
if locator is None:
locator = ticker.MaxNLocator(nbins=5)
cbar.locator = locator
cbar.update_ticks()
plt.subplots_adjust(left=.12, right=.925, bottom=.14,
top=1. if title is not None else 1.2)
# draw the connection lines between time series and topoplots
lines = [_connection_line(time_, fig, tf_ax, map_ax_, y=freq_,
y_source_transform="transData")
for (time_, freq_), map_ax_ in zip(timefreqs_array, map_ax)]
fig.lines.extend(lines)
plt_show(show)
return fig
def _onselect(self, eclick, erelease, baseline=None, mode=None,
layout=None, cmap=None, source_plot_joint=False,
topomap_args=None):
"""Handle rubber band selector in channel tfr."""
from ..viz.topomap import plot_tfr_topomap, plot_topomap, _add_colorbar
if abs(eclick.x - erelease.x) < .1 or abs(eclick.y - erelease.y) < .1:
return
tmin = round(min(eclick.xdata, erelease.xdata), 5) # s
tmax = round(max(eclick.xdata, erelease.xdata), 5)
fmin = round(min(eclick.ydata, erelease.ydata), 5) # Hz
fmax = round(max(eclick.ydata, erelease.ydata), 5)
tmin = min(self.times, key=lambda x: abs(x - tmin)) # find closest
tmax = min(self.times, key=lambda x: abs(x - tmax))
fmin = min(self.freqs, key=lambda x: abs(x - fmin))
fmax = min(self.freqs, key=lambda x: abs(x - fmax))
if tmin == tmax or fmin == fmax:
logger.info('The selected area is too small. '
'Select a larger time-frequency window.')
return
types = list()
if 'eeg' in self:
types.append('eeg')
if 'mag' in self:
types.append('mag')
if 'grad' in self:
if len(_pair_grad_sensors(self.info, topomap_coords=False,
raise_error=False)) >= 2:
types.append('grad')
elif len(types) == 0:
return # Don't draw a figure for nothing.
fig = figure_nobar()
fig.suptitle('{0:.2f} s - {1:.2f} s, {2:.2f} Hz - {3:.2f} Hz'.format(
tmin, tmax, fmin, fmax), y=0.04)
if source_plot_joint:
ax = fig.add_subplot(111)
data = _preproc_tfr(
self.data, self.times, self.freqs, tmin, tmax, fmin, fmax,
None, None, None, None, None, self.info['sfreq'])[0]
data = data.mean(-1).mean(-1)
vmax = np.abs(data).max()
im, _ = plot_topomap(data, self.info, vmin=-vmax, vmax=vmax,
cmap=cmap[0], axes=ax, show=False,
**topomap_args)
_add_colorbar(ax, im, cmap, title="AU", pad=.1)
fig.show()
else:
for idx, ch_type in enumerate(types):
ax = fig.add_subplot(1, len(types), idx + 1)
plot_tfr_topomap(self, ch_type=ch_type, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax, layout=layout,
baseline=baseline, mode=mode, cmap=None,
title=ch_type, vmin=None, vmax=None, axes=ax)
def plot_topo(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
layout=None, cmap='RdBu_r', title=None, dB=False,
colorbar=True, layout_scale=0.945, show=True,
border='none', fig_facecolor='k', fig_background=None,
font_color='w', yscale='auto'):
"""Plot TFRs in a topography with images.
Parameters
----------
picks : array-like of int | None
The indices of the channels to plot. If None, all available
channels are displayed.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'mean' | 'ratio' | 'logratio' | 'percent' | 'zscore' | 'zlogratio'
Perform baseline correction by
- subtracting the mean of baseline values ('mean')
- dividing by the mean of baseline values ('ratio')
- dividing by the mean of baseline values and taking the log
('logratio')
- subtracting the mean of baseline values followed by dividing by
the mean of baseline values ('percent')
- subtracting the mean of baseline values and dividing by the
standard deviation of baseline values ('zscore')
- dividing by the mean of baseline values, taking the log, and
dividing by the standard deviation of log baseline values
('zlogratio')
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The minimum value of the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maximum value of the color scale. If vmax is None, the data
maximum value is used.
layout : Layout | None
Layout instance specifying sensor positions. If possible, the
correct layout is inferred from the data.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
title : str
Title of the figure.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
show : bool
Call pyplot.show() at the end.
border : str
matplotlib borders style to be used for each sensor plot.
fig_facecolor : str | obj
The figure face color. Defaults to black.
fig_background : None | array
A background image for the figure. This must be a valid input to
`matplotlib.pyplot.imshow`. Defaults to None.
font_color: str | obj
The color of tick labels in the colorbar. Defaults to white.
yscale : 'auto' (default) | 'linear' | 'log'
The scale of y (frequency) axis. 'linear' gives linear y axis,
'log' leads to log-spaced y axis and 'auto' detects if frequencies
are log-spaced and only then sets the y axis to 'log'.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
""" # noqa: E501
from ..viz.topo import _imshow_tfr, _plot_topo, _imshow_tfr_unified
from ..viz import add_background_image
times = self.times.copy()
freqs = self.freqs
data = self.data
info = self.info
info, data, picks = _prepare_picks(info, data, picks)
data = data[picks]
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax,
mode, baseline, vmin, vmax, dB, info['sfreq'])
if layout is None:
from mne import find_layout
layout = find_layout(self.info)
onselect_callback = partial(self._onselect, baseline=baseline,
mode=mode, layout=layout)
click_fun = partial(_imshow_tfr, tfr=data, freq=freqs, yscale=yscale,
cmap=(cmap, True), onselect=onselect_callback)
imshow = partial(_imshow_tfr_unified, tfr=data, freq=freqs, cmap=cmap,
onselect=onselect_callback)
fig = _plot_topo(info=info, times=times, show_func=imshow,
click_func=click_fun, layout=layout,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title, border=border,
x_label='Time (s)', y_label='Frequency (Hz)',
fig_facecolor=fig_facecolor, font_color=font_color,
unified=True, img=True)
add_background_image(fig, fig_background)
plt_show(show)
return fig
def plot_topomap(self, tmin=None, tmax=None, fmin=None, fmax=None,
ch_type=None, baseline=None, mode='mean', layout=None,
vmin=None, vmax=None, cmap=None, sensors=True,
colorbar=True, unit=None, res=64, size=2,
cbar_fmt='%1.1e', show_names=False, title=None,
axes=None, show=True, outlines='head', head_pos=None,
contours=6):
"""Plot topographic maps of time-frequency intervals of TFR data.
Parameters
----------
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'mean' | 'ratio' | 'logratio' | 'percent' | 'zscore' | 'zlogratio'
Perform baseline correction by
- subtracting the mean of baseline values ('mean')
- dividing by the mean of baseline values ('ratio')
- dividing by the mean of baseline values and taking the log
('logratio')
- subtracting the mean of baseline values followed by dividing by
the mean of baseline values ('percent')
- subtracting the mean of baseline values and dividing by the
standard deviation of baseline values ('zscore')
- dividing by the mean of baseline values, taking the log, and
dividing by the standard deviation of log baseline values
('zlogratio')
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file was
found, the layout is automatically generated from the sensor
locations.
vmin : float | callable | None
The value specifying the lower bound of the color range. If None,
and vmax is None, -vmax is used. Else np.min(data) or in case
data contains only positive values 0. If callable, the output
equals vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range. If None,
the maximum value is used. If callable, the output equals
vmax(data). Defaults to None.
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap
to use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging
the colorbar with left and right mouse button. Left mouse button
moves the scale up and down and right mouse button adjusts the
range. Hitting space bar resets the range. Up and down arrows can
be used to change the colormap. If None (default), 'Reds' is used
for all positive data, otherwise defaults to 'RdBu_r'. If
'interactive', translates to (None, True).
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle
will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
axes : instance of Axes | None
The axes to plot to. If None the axes is defined automatically.
show : bool
Call pyplot.show() at the end.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
contours : int | array of float
The number of contour lines to draw. If 0, no contours will be
drawn. When an integer, matplotlib ticker locator is used to find
suitable values for the contour thresholds (may sometimes be
inaccurate, use array for accuracy). If an array, the values
represent the levels for the contours. If colorbar=True, the ticks
in colorbar correspond to the contour levels. Defaults to 6.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
""" # noqa: E501
from ..viz import plot_tfr_topomap
return plot_tfr_topomap(self, tmin=tmin, tmax=tmax, fmin=fmin,
fmax=fmax, ch_type=ch_type, baseline=baseline,
mode=mode, layout=layout, vmin=vmin, vmax=vmax,
cmap=cmap, sensors=sensors, colorbar=colorbar,
unit=unit, res=res, size=size,
cbar_fmt=cbar_fmt, show_names=show_names,
title=title, axes=axes, show=show,
outlines=outlines, head_pos=head_pos,
contours=contours)
def _check_compat(self, tfr):
"""Check that self and tfr have the same time-frequency ranges."""
assert np.all(tfr.times == self.times)
assert np.all(tfr.freqs == self.freqs)
def __add__(self, tfr): # noqa: D105
"""Add instances."""
self._check_compat(tfr)
out = self.copy()
out.data += tfr.data
return out
def __iadd__(self, tfr): # noqa: D105
self._check_compat(tfr)
self.data += tfr.data
return self
def __sub__(self, tfr): # noqa: D105
"""Subtract instances."""
self._check_compat(tfr)
out = self.copy()
out.data -= tfr.data
return out
def __isub__(self, tfr): # noqa: D105
self._check_compat(tfr)
self.data -= tfr.data
return self
def __repr__(self): # noqa: D105
s = "time : [%f, %f]" % (self.times[0], self.times[-1])
s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1])
s += ", nave : %d" % self.nave
s += ', channels : %d' % self.data.shape[0]
s += ', ~%s' % (sizeof_fmt(self._size),)
return "<AverageTFR | %s>" % s
class EpochsTFR(_BaseTFR):
"""Container for Time-Frequency data on epochs.
Can for example store induced power at sensor level.
Parameters
----------
info : Info
The measurement info.
data : ndarray, shape (n_epochs, n_channels, n_freqs, n_times)
The data.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
comment : str | None, defaults to None
Comment on the data, e.g., the experimental condition.
method : str | None, defaults to None
Comment on the method used to compute the data, e.g., morlet wavelet.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Attributes
----------
info : instance of Info
Measurement info.
ch_names : list
The names of the channels.
data : ndarray, shape (n_epochs, n_channels, n_freqs, n_times)
The data array.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
comment : string
Comment on dataset. Can be the condition.
method : str | None, defaults to None
Comment on the method used to compute the data, e.g., morlet wavelet.
Notes
-----
.. versionadded:: 0.13.0
"""
@verbose
def __init__(self, info, data, times, freqs, comment=None,
method=None, verbose=None): # noqa: D102
self.info = info
if data.ndim != 4:
raise ValueError('data should be 4d. Got %d.' % data.ndim)
n_epochs, n_channels, n_freqs, n_times = data.shape
if n_channels != len(info['chs']):
raise ValueError("Number of channels and data size don't match"
" (%d != %d)." % (n_channels, len(info['chs'])))
if n_freqs != len(freqs):
raise ValueError("Number of frequencies and data size don't match"
" (%d != %d)." % (n_freqs, len(freqs)))
if n_times != len(times):
raise ValueError("Number of times and data size don't match"
" (%d != %d)." % (n_times, len(times)))
self.data = data
self.times = np.array(times, dtype=float)
self.freqs = np.array(freqs, dtype=float)
self.comment = comment
self.method = method
self.preload = True
def __repr__(self): # noqa: D105
s = "time : [%f, %f]" % (self.times[0], self.times[-1])
s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1])
s += ", epochs : %d" % self.data.shape[0]
s += ', channels : %d' % self.data.shape[1]
s += ', ~%s' % (sizeof_fmt(self._size),)
return "<EpochsTFR | %s>" % s
def __abs__(self):
"""Take the absolute value."""
return EpochsTFR(info=self.info.copy(), data=np.abs(self.data),
times=self.times.copy(), freqs=self.freqs.copy(),
method=self.method, comment=self.comment)
def copy(self):
"""Give a copy of the EpochsTFR.
Returns
-------
tfr : instance of EpochsTFR
The copy.
"""
return EpochsTFR(info=self.info.copy(), data=self.data.copy(),
times=self.times.copy(), freqs=self.freqs.copy(),
method=self.method, comment=self.comment)
def average(self):
"""Average the data across epochs.
Returns
-------
ave : instance of AverageTFR
The averaged data.
"""
data = np.mean(self.data, axis=0)
return AverageTFR(info=self.info.copy(), data=data,
times=self.times.copy(), freqs=self.freqs.copy(),
nave=self.data.shape[0], method=self.method,
comment=self.comment)
def combine_tfr(all_tfr, weights='nave'):
"""Merge AverageTFR data by weighted addition.
Create a new AverageTFR instance, using a combination of the supplied
instances as its data. By default, the mean (weighted by trials) is used.
Subtraction can be performed by passing negative weights (e.g., [1, -1]).
Data must have the same channels and the same time instants.
Parameters
----------
all_tfr : list of AverageTFR
The tfr datasets.
weights : list of float | str
The weights to apply to the data of each AverageTFR instance.
Can also be ``'nave'`` to weight according to tfr.nave,
or ``'equal'`` to use equal weighting (each weighted as ``1/N``).
Returns
-------
tfr : AverageTFR
The new TFR data.
Notes
-----
.. versionadded:: 0.11.0
"""
tfr = all_tfr[0].copy()
if isinstance(weights, string_types):
if weights not in ('nave', 'equal'):
raise ValueError('Weights must be a list of float, or "nave" or '
'"equal"')
if weights == 'nave':
weights = np.array([e.nave for e in all_tfr], float)
weights /= weights.sum()
else: # == 'equal'
weights = [1. / len(all_tfr)] * len(all_tfr)
weights = np.array(weights, float)
if weights.ndim != 1 or weights.size != len(all_tfr):
raise ValueError('Weights must be the same size as all_tfr')
ch_names = tfr.ch_names
for t_ in all_tfr[1:]:
assert t_.ch_names == ch_names, ValueError("%s and %s do not contain "
"the same channels"
% (tfr, t_))
assert np.max(np.abs(t_.times - tfr.times)) < 1e-7, \
ValueError("%s and %s do not contain the same time instants"
% (tfr, t_))
# use union of bad channels
bads = list(set(tfr.info['bads']).union(*(t_.info['bads']
for t_ in all_tfr[1:])))
tfr.info['bads'] = bads
# XXX : should be refactored with combined_evoked function
tfr.data = sum(w * t_.data for w, t_ in zip(weights, all_tfr))
tfr.nave = max(int(1. / sum(w ** 2 / e.nave
for w, e in zip(weights, all_tfr))), 1)
return tfr
# Utils
def _get_data(inst, return_itc):
"""Get data from Epochs or Evoked instance as epochs x ch x time."""
from ..epochs import BaseEpochs
from ..evoked import Evoked
if not isinstance(inst, (BaseEpochs, Evoked)):
raise TypeError('inst must be Epochs or Evoked')
if isinstance(inst, BaseEpochs):
data = inst.get_data()
else:
if return_itc:
raise ValueError('return_itc must be False for evoked data')
data = inst.data[np.newaxis].copy()
return data
def _prepare_picks(info, data, picks):
"""Prepare the picks."""
if picks is None:
picks = _pick_data_channels(info, with_ref_meg=True, exclude='bads')
if np.array_equal(picks, np.arange(len(data))):
picks = slice(None)
else:
info = pick_info(info, picks)
return info, data, picks
def _centered(arr, newsize):
"""Aux Function to center data."""
# Return the center newsize portion of the array.
newsize = np.asarray(newsize)
currsize = np.array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB, sfreq, copy=None):
"""Aux Function to prepare tfr computation."""
if copy is None:
copy = baseline is not None
data = rescale(data, times, baseline, mode, copy=copy)
# crop time
itmin, itmax = None, None
idx = np.where(_time_mask(times, tmin, tmax, sfreq=sfreq))[0]
if tmin is not None:
itmin = idx[0]
if tmax is not None:
itmax = idx[-1] + 1
times = times[itmin:itmax]
# crop freqs
ifmin, ifmax = None, None
idx = np.where(_time_mask(freqs, fmin, fmax, sfreq=sfreq))[0]
if fmin is not None:
ifmin = idx[0]
if fmax is not None:
ifmax = idx[-1] + 1
freqs = freqs[ifmin:ifmax]
# crop data
data = data[:, ifmin:ifmax, itmin:itmax]
if dB:
data = 10 * np.log10((data * data.conj()).real)
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
return data, times, freqs, vmin, vmax
def _check_decim(decim):
"""Aux function checking the decim parameter."""
if isinstance(decim, int):
decim = slice(None, None, decim)
elif not isinstance(decim, slice):
raise(TypeError, '`decim` must be int or slice, got %s instead'
% type(decim))
return decim
# i/o
def write_tfrs(fname, tfr, overwrite=False):
"""Write a TFR dataset to hdf5.
Parameters
----------
fname : string
The file name, which should end with -tfr.h5
tfr : AverageTFR instance, or list of AverageTFR instances
The TFR dataset, or list of TFR datasets, to save in one file.
Note. If .comment is not None, a name will be generated on the fly,
based on the order in which the TFR objects are passed
overwrite : bool
If True, overwrite file (if it exists). Defaults to False.
See Also
--------
read_tfrs
Notes
-----
.. versionadded:: 0.9.0
"""
out = []
if not isinstance(tfr, (list, tuple)):
tfr = [tfr]
for ii, tfr_ in enumerate(tfr):
comment = ii if tfr_.comment is None else tfr_.comment
out.append(_prepare_write_tfr(tfr_, condition=comment))
write_hdf5(fname, out, overwrite=overwrite, title='mnepython')
def _prepare_write_tfr(tfr, condition):
"""Aux function."""
attributes = dict(times=tfr.times, freqs=tfr.freqs, data=tfr.data,
info=tfr.info, comment=tfr.comment, method=tfr.method)
if hasattr(tfr, 'nave'):
attributes['nave'] = tfr.nave
return (condition, attributes)
def read_tfrs(fname, condition=None):
"""Read TFR datasets from hdf5 file.
Parameters
----------
fname : string
The file name, which should end with -tfr.h5 .
condition : int or str | list of int or str | None
The condition to load. If None, all conditions will be returned.
Defaults to None.
See Also
--------
write_tfrs
Returns
-------
tfrs : list of instances of AverageTFR | instance of AverageTFR
Depending on `condition` either the TFR object or a list of multiple
TFR objects.
Notes
-----
.. versionadded:: 0.9.0
"""
check_fname(fname, 'tfr', ('-tfr.h5', '_tfr.h5'))
logger.info('Reading %s ...' % fname)
tfr_data = read_hdf5(fname, title='mnepython')
for k, tfr in tfr_data:
tfr['info'] = Info(tfr['info'])
is_average = 'nave' in tfr
if condition is not None:
if not is_average:
raise NotImplementedError('condition not supported when reading '
'EpochsTFR.')
tfr_dict = dict(tfr_data)
if condition not in tfr_dict:
keys = ['%s' % k for k in tfr_dict]
raise ValueError('Cannot find condition ("{0}") in this file. '
'The file contains "{1}""'
.format(condition, " or ".join(keys)))
out = AverageTFR(**tfr_dict[condition])
else:
inst = AverageTFR if is_average else EpochsTFR
out = [inst(**d) for d in list(zip(*tfr_data))[1]]
return out
def _get_timefreqs(tfr, timefreqs):
"""Find and/or setup timefreqs for `tfr.plot_joint`."""
# Input check
timefreq_error_msg = (
"Supplied `timefreqs` are somehow malformed. Please supply None, "
"a list of tuple pairs, or a dict of such tuple pairs, not: ")
if isinstance(timefreqs, dict):
for k, v in timefreqs.items():
for item in (k, v):
if len(item) != 2 or any((not _is_numeric(n) for n in item)):
raise ValueError(timefreq_error_msg, item)
elif timefreqs is not None:
if not hasattr(timefreqs, "__len__"):
raise ValueError(timefreq_error_msg, timefreqs)
if len(timefreqs) == 2 and all((_is_numeric(v) for v in timefreqs)):
timefreqs = [tuple(timefreqs)] # stick a pair of numbers in a list
else:
for item in timefreqs:
if (hasattr(item, "__len__") and len(item) == 2 and
all((_is_numeric(n) for n in item))):
pass
else:
raise ValueError(timefreq_error_msg, item)
# If None, automatic identification of max peak
else:
from scipy.signal import argrelmax
order = max((1, tfr.data.shape[2] // 30))
peaks_idx = argrelmax(tfr.data, order=order, axis=2)
if peaks_idx[0].size == 0:
_, p_t, p_f = np.unravel_index(tfr.data.argmax(), tfr.data.shape)
timefreqs = [(tfr.times[p_t], tfr.freqs[p_f])]
else:
peaks = [tfr.data[0, f, t] for f, t in
zip(peaks_idx[1], peaks_idx[2])]
peakmax_idx = np.argmax(peaks)
peakmax_time = tfr.times[peaks_idx[2][peakmax_idx]]
peakmax_freq = tfr.freqs[peaks_idx[1][peakmax_idx]]
timefreqs = [(peakmax_time, peakmax_freq)]
timefreqs = {
tuple(k): np.asarray(timefreqs[k]) if isinstance(timefreqs, dict)
else np.array([0, 0]) for k in timefreqs}
return timefreqs
def _preproc_tfr_instance(tfr, picks, tmin, tmax, fmin, fmax, vmin, vmax, dB,
mode, baseline, exclude, copy=True):
"""Baseline and truncate (times and freqs) a TFR instance."""
tfr = tfr.copy() if copy else tfr
if picks is None:
picks = _pick_data_channels(tfr.info, exclude='bads')
exclude = None
pick_names = [tfr.info['ch_names'][pick] for pick in picks]
tfr.pick_channels(pick_names)
if exclude == 'bads':
exclude = [ch for ch in tfr.info['bads']
if ch in tfr.info['ch_names']]
if exclude is not None:
tfr.drop_channels(exclude)
data, times, freqs, _, _ = _preproc_tfr(
tfr.data, tfr.times, tfr.freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB, tfr.info['sfreq'], copy=False)
tfr.times = times
tfr.freqs = freqs
tfr.data = data
return tfr
| {
"repo_name": "teonlamont/mne-python",
"path": "mne/time_frequency/tfr.py",
"copies": "2",
"size": "95190",
"license": "bsd-3-clause",
"hash": -3097065776452647000,
"line_mean": 39.1137800253,
"line_max": 81,
"alpha_frac": 0.5697132052,
"autogenerated": false,
"ratio": 3.9023490345590948,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5472062239759095,
"avg_score": null,
"num_lines": null
} |
"""A module which implements the time frequency estimation.
Morlet code inspired by Matlab code from Sheraz Khan & Brainstorm & SPM
"""
# Authors : Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Hari Bharadwaj <hari@nmr.mgh.harvard.edu>
# Clement Moutard <clement.moutard@polytechnique.org>
#
# License : BSD (3-clause)
from copy import deepcopy
from math import sqrt
import numpy as np
from scipy import linalg
from scipy.fftpack import fftn, ifftn
from ..fixes import partial
from ..baseline import rescale
from ..parallel import parallel_func
from ..utils import (logger, verbose, _time_mask, warn, check_fname,
_check_copy_dep)
from ..channels.channels import ContainsMixin, UpdateChannelsMixin
from ..io.pick import pick_info, pick_types
from ..io.meas_info import Info
from .multitaper import dpss_windows
from ..viz.utils import figure_nobar, plt_show
from ..externals.h5io import write_hdf5, read_hdf5
from ..externals.six import string_types
def _get_data(inst, return_itc):
"""Get data from Epochs or Evoked instance as epochs x ch x time"""
from ..epochs import _BaseEpochs
from ..evoked import Evoked
if not isinstance(inst, (_BaseEpochs, Evoked)):
raise TypeError('inst must be Epochs or Evoked')
if isinstance(inst, _BaseEpochs):
data = inst.get_data()
else:
if return_itc:
raise ValueError('return_itc must be False for evoked data')
data = inst.data[np.newaxis, ...].copy()
return data
def morlet(sfreq, freqs, n_cycles=7, sigma=None, zero_mean=False):
"""Compute Wavelets for the given frequency range
Parameters
----------
sfreq : float
Sampling Frequency
freqs : array
frequency range of interest (1 x Frequencies)
n_cycles: float | array of float
Number of cycles. Fixed number or one per frequency.
sigma : float, (optional)
It controls the width of the wavelet ie its temporal
resolution. If sigma is None the temporal resolution
is adapted with the frequency like for all wavelet transform.
The higher the frequency the shorter is the wavelet.
If sigma is fixed the temporal resolution is fixed
like for the short time Fourier transform and the number
of oscillations increases with the frequency.
zero_mean : bool
Make sure the wavelet is zero mean
Returns
-------
Ws : list of array
Wavelets time series
See Also
--------
mne.time_frequency.cwt_morlet : Compute time-frequency decomposition
with Morlet wavelets
"""
Ws = list()
n_cycles = np.atleast_1d(n_cycles)
if (n_cycles.size != 1) and (n_cycles.size != len(freqs)):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
# fixed or scale-dependent window
if sigma is None:
sigma_t = this_n_cycles / (2.0 * np.pi * f)
else:
sigma_t = this_n_cycles / (2.0 * np.pi * sigma)
# this scaling factor is proportional to (Tallon-Baudry 98):
# (sigma_t*sqrt(pi))^(-1/2);
t = np.arange(0., 5. * sigma_t, 1.0 / sfreq)
t = np.r_[-t[::-1], t[1:]]
oscillation = np.exp(2.0 * 1j * np.pi * f * t)
gaussian_enveloppe = np.exp(-t ** 2 / (2.0 * sigma_t ** 2))
if zero_mean: # to make it zero mean
real_offset = np.exp(- 2 * (np.pi * f * sigma_t) ** 2)
oscillation -= real_offset
W = oscillation * gaussian_enveloppe
W /= sqrt(0.5) * linalg.norm(W.ravel())
Ws.append(W)
return Ws
def _dpss_wavelet(sfreq, freqs, n_cycles=7, time_bandwidth=4.0,
zero_mean=False):
"""Compute Wavelets for the given frequency range
Parameters
----------
sfreq : float
Sampling Frequency.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
Defaults to 7.
time_bandwidth : float, (optional)
Time x Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1).
Default is 4.0, giving 3 good tapers.
Returns
-------
Ws : list of array
Wavelets time series
"""
Ws = list()
if time_bandwidth < 2.0:
raise ValueError("time_bandwidth should be >= 2.0 for good tapers")
n_taps = int(np.floor(time_bandwidth - 1))
n_cycles = np.atleast_1d(n_cycles)
if n_cycles.size != 1 and n_cycles.size != len(freqs):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for m in range(n_taps):
Wm = list()
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
t_win = this_n_cycles / float(f)
t = np.arange(0., t_win, 1.0 / sfreq)
# Making sure wavelets are centered before tapering
oscillation = np.exp(2.0 * 1j * np.pi * f * (t - t_win / 2.))
# Get dpss tapers
tapers, conc = dpss_windows(t.shape[0], time_bandwidth / 2.,
n_taps)
Wk = oscillation * tapers[m]
if zero_mean: # to make it zero mean
real_offset = Wk.mean()
Wk -= real_offset
Wk /= sqrt(0.5) * linalg.norm(Wk.ravel())
Wm.append(Wk)
Ws.append(Wm)
return Ws
def _centered(arr, newsize):
"""Aux Function to center data"""
# Return the center newsize portion of the array.
newsize = np.asarray(newsize)
currsize = np.array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _cwt(X, Ws, mode="same", decim=1, use_fft=True):
"""Compute cwt with fft based convolutions or temporal convolutions.
Return a generator over signals.
"""
if mode not in ['same', 'valid', 'full']:
raise ValueError("`mode` must be 'same', 'valid' or 'full', "
"got %s instead." % mode)
if mode == 'full' and (not use_fft):
# XXX JRK: full wavelet decomposition needs to be implemented
raise ValueError('`full` decomposition with convolution is currently' +
' not supported.')
decim = _check_decim(decim)
X = np.asarray(X)
# Precompute wavelets for given frequency range to save time
n_signals, n_times = X.shape
n_times_out = X[:, decim].shape[1]
n_freqs = len(Ws)
Ws_max_size = max(W.size for W in Ws)
size = n_times + Ws_max_size - 1
# Always use 2**n-sized FFT
fsize = 2 ** int(np.ceil(np.log2(size)))
# precompute FFTs of Ws
if use_fft:
fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128)
for i, W in enumerate(Ws):
if len(W) > n_times:
raise ValueError('Wavelet is too long for such a short signal. '
'Reduce the number of cycles.')
if use_fft:
fft_Ws[i] = fftn(W, [fsize])
# Make generator looping across signals
tfr = np.zeros((n_freqs, n_times_out), dtype=np.complex128)
for x in X:
if use_fft:
fft_x = fftn(x, [fsize])
# Loop across wavelets
for ii, W in enumerate(Ws):
if use_fft:
ret = ifftn(fft_x * fft_Ws[ii])[:n_times + W.size - 1]
else:
ret = np.convolve(x, W, mode=mode)
# Center and decimate decomposition
if mode == "valid":
sz = abs(W.size - n_times) + 1
offset = (n_times - sz) / 2
this_slice = slice(offset // decim.step,
(offset + sz) // decim.step)
if use_fft:
ret = _centered(ret, sz)
tfr[ii, this_slice] = ret[decim]
else:
if use_fft:
ret = _centered(ret, n_times)
tfr[ii, :] = ret[decim]
yield tfr
def cwt_morlet(X, sfreq, freqs, use_fft=True, n_cycles=7.0, zero_mean=False,
decim=1):
"""Compute time freq decomposition with Morlet wavelets
This function operates directly on numpy arrays. Consider using
`tfr_morlet` to process `Epochs` or `Evoked` instances.
Parameters
----------
X : array, shape (n_signals, n_times)
Signals (one per line)
sfreq : float
Sampling frequency.
freqs : array
Array of frequencies of interest
use_fft : bool
Compute convolution with FFT or temoral convolution.
n_cycles: float | array of float
Number of cycles. Fixed number or one per frequency.
zero_mean : bool
Make sure the wavelets are zero mean.
decim : int | slice
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice` returns tfr[..., decim].
Note that decimation may create aliasing artifacts.
Defaults to 1.
Returns
-------
tfr : 3D array
Time Frequency Decompositions (n_signals x n_frequencies x n_times)
See Also
--------
tfr.cwt : Compute time-frequency decomposition with user-provided wavelets
"""
mode = 'same'
# mode = "valid"
decim = _check_decim(decim)
n_signals, n_times = X[:, decim].shape
# Precompute wavelets for given frequency range to save time
Ws = morlet(sfreq, freqs, n_cycles=n_cycles, zero_mean=zero_mean)
coefs = cwt(X, Ws, use_fft=use_fft, mode=mode, decim=decim)
tfrs = np.empty((n_signals, len(freqs), n_times), dtype=np.complex)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr
return tfrs
def cwt(X, Ws, use_fft=True, mode='same', decim=1):
"""Compute time freq decomposition with continuous wavelet transform
Parameters
----------
X : array, shape (n_signals, n_times)
The signals.
Ws : list of array
Wavelets time series.
use_fft : bool
Use FFT for convolutions. Defaults to True.
mode : 'same' | 'valid' | 'full'
Convention for convolution. 'full' is currently not implemented with
`use_fft=False`. Defaults to 'same'.
decim : int | slice
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice` returns tfr[..., decim].
Note that decimation may create aliasing artifacts.
Defaults to 1.
Returns
-------
tfr : array, shape (n_signals, n_frequencies, n_times)
The time frequency decompositions.
See Also
--------
mne.time_frequency.cwt_morlet : Compute time-frequency decomposition
with Morlet wavelets
"""
decim = _check_decim(decim)
n_signals, n_times = X[:, decim].shape
coefs = _cwt(X, Ws, mode, decim=decim, use_fft=use_fft)
tfrs = np.empty((n_signals, len(Ws), n_times), dtype=np.complex)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr
return tfrs
def _time_frequency(X, Ws, use_fft, decim):
"""Aux of time_frequency for parallel computing over channels
"""
decim = _check_decim(decim)
n_epochs, n_times = X[:, decim].shape
n_frequencies = len(Ws)
psd = np.zeros((n_frequencies, n_times)) # PSD
plf = np.zeros((n_frequencies, n_times), np.complex) # phase lock
mode = 'same'
tfrs = _cwt(X, Ws, mode, decim=decim, use_fft=use_fft)
for tfr in tfrs:
tfr_abs = np.abs(tfr)
psd += tfr_abs ** 2
plf += tfr / tfr_abs
psd /= n_epochs
plf = np.abs(plf) / n_epochs
return psd, plf
@verbose
def single_trial_power(data, sfreq, frequencies, use_fft=True, n_cycles=7,
baseline=None, baseline_mode='ratio', times=None,
decim=1, n_jobs=1, zero_mean=False, verbose=None):
"""Compute time-frequency power on single epochs
Parameters
----------
data : array of shape [n_epochs, n_channels, n_times]
The epochs
sfreq : float
Sampling rate
frequencies : array-like
The frequencies
use_fft : bool
Use the FFT for convolutions or not.
n_cycles : float | array of float
Number of cycles in the Morlet wavelet. Fixed number
or one per frequency.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
baseline_mode : None | 'ratio' | 'zscore'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
times : array
Required to define baseline
decim : int | slice
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice` returns tfr[..., decim].
Note that decimation may create aliasing artifacts.
Defaults to 1.
n_jobs : int
The number of epochs to process at the same time
zero_mean : bool
Make sure the wavelets are zero mean.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : 4D array
Power estimate (Epochs x Channels x Frequencies x Timepoints).
"""
decim = _check_decim(decim)
mode = 'same'
n_frequencies = len(frequencies)
n_epochs, n_channels, n_times = data[:, :, decim].shape
# Precompute wavelets for given frequency range to save time
Ws = morlet(sfreq, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
parallel, my_cwt, _ = parallel_func(cwt, n_jobs)
logger.info("Computing time-frequency power on single epochs...")
power = np.empty((n_epochs, n_channels, n_frequencies, n_times),
dtype=np.float)
# Package arguments for `cwt` here to minimize omissions where only one of
# the two calls below is updated with new function arguments.
cwt_kw = dict(Ws=Ws, use_fft=use_fft, mode=mode, decim=decim)
if n_jobs == 1:
for k, e in enumerate(data):
x = cwt(e, **cwt_kw)
power[k] = (x * x.conj()).real
else:
# Precompute tf decompositions in parallel
tfrs = parallel(my_cwt(e, **cwt_kw) for e in data)
for k, tfr in enumerate(tfrs):
power[k] = (tfr * tfr.conj()).real
# Run baseline correction. Be sure to decimate the times array as well if
# needed.
if times is not None:
times = times[decim]
power = rescale(power, times, baseline, baseline_mode, copy=False)
return power
def _induced_power_cwt(data, sfreq, frequencies, use_fft=True, n_cycles=7,
decim=1, n_jobs=1, zero_mean=False):
"""Compute time induced power and inter-trial phase-locking factor
The time frequency decomposition is done with Morlet wavelets
Parameters
----------
data : array
3D array of shape [n_epochs, n_channels, n_times]
sfreq : float
Sampling frequency.
frequencies : array
Array of frequencies of interest
use_fft : bool
Compute transform with fft based convolutions or temporal
convolutions.
n_cycles : float | array of float
Number of cycles. Fixed number or one per frequency.
decim : int | slice
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice` returns tfr[..., decim].
Note that decimation may create aliasing artifacts.
Defaults to 1.
n_jobs : int
The number of CPUs used in parallel. All CPUs are used in -1.
Requires joblib package.
zero_mean : bool
Make sure the wavelets are zero mean.
Returns
-------
power : 2D array
Induced power (Channels x Frequencies x Timepoints).
Squared amplitude of time-frequency coefficients.
phase_lock : 2D array
Phase locking factor in [0, 1] (Channels x Frequencies x Timepoints)
"""
decim = _check_decim(decim)
n_frequencies = len(frequencies)
n_epochs, n_channels, n_times = data[:, :, decim].shape
# Precompute wavelets for given frequency range to save time
Ws = morlet(sfreq, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
psd = np.empty((n_channels, n_frequencies, n_times))
plf = np.empty((n_channels, n_frequencies, n_times))
# Separate to save memory for n_jobs=1
parallel, my_time_frequency, _ = parallel_func(_time_frequency, n_jobs)
psd_plf = parallel(my_time_frequency(data[:, c, :], Ws, use_fft, decim)
for c in range(n_channels))
for c, (psd_c, plf_c) in enumerate(psd_plf):
psd[c, :, :], plf[c, :, :] = psd_c, plf_c
return psd, plf
def _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB, sfreq):
"""Aux Function to prepare tfr computation"""
from ..viz.utils import _setup_vmin_vmax
copy = baseline is not None
data = rescale(data, times, baseline, mode, copy=copy)
# crop time
itmin, itmax = None, None
idx = np.where(_time_mask(times, tmin, tmax, sfreq=sfreq))[0]
if tmin is not None:
itmin = idx[0]
if tmax is not None:
itmax = idx[-1] + 1
times = times[itmin:itmax]
# crop freqs
ifmin, ifmax = None, None
idx = np.where(_time_mask(freqs, fmin, fmax, sfreq=sfreq))[0]
if fmin is not None:
ifmin = idx[0]
if fmax is not None:
ifmax = idx[-1] + 1
freqs = freqs[ifmin:ifmax]
# crop data
data = data[:, ifmin:ifmax, itmin:itmax]
times *= 1e3
if dB:
data = 10 * np.log10((data * data.conj()).real)
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
return data, times, freqs, vmin, vmax
class AverageTFR(ContainsMixin, UpdateChannelsMixin):
"""Container for Time-Frequency data
Can for example store induced power at sensor level or intertrial
coherence.
Parameters
----------
info : Info
The measurement info.
data : ndarray, shape (n_channels, n_freqs, n_times)
The data.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
nave : int
The number of averaged TFRs.
comment : str | None
Comment on the data, e.g., the experimental condition.
Defaults to None.
method : str | None
Comment on the method used to compute the data, e.g., morlet wavelet.
Defaults to None.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Attributes
----------
ch_names : list
The names of the channels.
"""
@verbose
def __init__(self, info, data, times, freqs, nave, comment=None,
method=None, verbose=None):
self.info = info
if data.ndim != 3:
raise ValueError('data should be 3d. Got %d.' % data.ndim)
n_channels, n_freqs, n_times = data.shape
if n_channels != len(info['chs']):
raise ValueError("Number of channels and data size don't match"
" (%d != %d)." % (n_channels, len(info['chs'])))
if n_freqs != len(freqs):
raise ValueError("Number of frequencies and data size don't match"
" (%d != %d)." % (n_freqs, len(freqs)))
if n_times != len(times):
raise ValueError("Number of times and data size don't match"
" (%d != %d)." % (n_times, len(times)))
self.data = data
self.times = np.asarray(times)
self.freqs = np.asarray(freqs)
self.nave = nave
self.comment = comment
self.method = method
@property
def ch_names(self):
return self.info['ch_names']
def crop(self, tmin=None, tmax=None, copy=None):
"""Crop data to a given time interval
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
copy : bool
This parameter has been deprecated and will be removed in 0.13.
Use inst.copy() instead.
Whether to return a new instance or modify in place.
Returns
-------
inst : instance of AverageTFR
The modified instance.
"""
inst = _check_copy_dep(self, copy)
mask = _time_mask(inst.times, tmin, tmax, sfreq=self.info['sfreq'])
inst.times = inst.times[mask]
inst.data = inst.data[:, :, mask]
return inst
@verbose
def plot(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
cmap='RdBu_r', dB=False, colorbar=True, show=True,
title=None, axes=None, layout=None, verbose=None):
"""Plot TFRs in a topography with images
Parameters
----------
picks : array-like of int | None
The indices of the channels to plot.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot. For user defined axes,
the colorbar cannot be drawn. Defaults to True.
show : bool
Call pyplot.show() at the end.
title : str | None
String for title. Defaults to None (blank/no title).
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channels. If instance of Axes,
there must be only one channel plotted.
layout : Layout | None
Layout instance specifying sensor positions. Used for interactive
plotting of topographies on rectangle selection. If possible, the
correct layout is inferred from the data.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz.topo import _imshow_tfr
import matplotlib.pyplot as plt
times, freqs = self.times.copy(), self.freqs.copy()
info = self.info
data = self.data
n_picks = len(picks)
info, data, picks = _prepare_picks(info, data, picks)
data = data[picks]
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB, info['sfreq'])
tmin, tmax = times[0], times[-1]
if isinstance(axes, plt.Axes):
axes = [axes]
if isinstance(axes, list) or isinstance(axes, np.ndarray):
if len(axes) != n_picks:
raise RuntimeError('There must be an axes for each picked '
'channel.')
for idx in range(len(data)):
if axes is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = axes[idx]
fig = ax.get_figure()
onselect_callback = partial(self._onselect, baseline=baseline,
mode=mode, layout=layout)
_imshow_tfr(ax, 0, tmin, tmax, vmin, vmax, onselect_callback,
ylim=None, tfr=data[idx: idx + 1], freq=freqs,
x_label='Time (ms)', y_label='Frequency (Hz)',
colorbar=colorbar, picker=False, cmap=cmap)
if title:
fig.suptitle(title)
colorbar = False # only one colorbar for multiple axes
plt_show(show)
return fig
def _onselect(self, eclick, erelease, baseline, mode, layout):
"""Callback function called by rubber band selector in channel tfr."""
import matplotlib.pyplot as plt
from ..viz import plot_tfr_topomap
if abs(eclick.x - erelease.x) < .1 or abs(eclick.y - erelease.y) < .1:
return
plt.ion() # turn interactive mode on
tmin = round(min(eclick.xdata, erelease.xdata) / 1000., 5) # ms to s
tmax = round(max(eclick.xdata, erelease.xdata) / 1000., 5)
fmin = round(min(eclick.ydata, erelease.ydata), 5) # Hz
fmax = round(max(eclick.ydata, erelease.ydata), 5)
tmin = min(self.times, key=lambda x: abs(x - tmin)) # find closest
tmax = min(self.times, key=lambda x: abs(x - tmax))
fmin = min(self.freqs, key=lambda x: abs(x - fmin))
fmax = min(self.freqs, key=lambda x: abs(x - fmax))
if tmin == tmax or fmin == fmax:
logger.info('The selected area is too small. '
'Select a larger time-frequency window.')
return
types = list()
if 'eeg' in self:
types.append('eeg')
if 'mag' in self:
types.append('mag')
if 'grad' in self:
types.append('grad')
fig = figure_nobar()
fig.suptitle('{:.2f} s - {:.2f} s, {:.2f} Hz - {:.2f} Hz'.format(tmin,
tmax,
fmin,
fmax),
y=0.04)
for idx, ch_type in enumerate(types):
ax = plt.subplot(1, len(types), idx + 1)
plot_tfr_topomap(self, ch_type=ch_type, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax, layout=layout,
baseline=baseline, mode=mode, cmap=None,
title=ch_type, vmin=None, vmax=None,
axes=ax)
def plot_topo(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
layout=None, cmap='RdBu_r', title=None, dB=False,
colorbar=True, layout_scale=0.945, show=True,
border='none', fig_facecolor='k', font_color='w'):
"""Plot TFRs in a topography with images
Parameters
----------
picks : array-like of int | None
The indices of the channels to plot. If None all available
channels are displayed.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
layout : Layout | None
Layout instance specifying sensor positions. If possible, the
correct layout is inferred from the data.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
title : str
Title of the figure.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
show : bool
Call pyplot.show() at the end.
border : str
matplotlib borders style to be used for each sensor plot.
fig_facecolor : str | obj
The figure face color. Defaults to black.
font_color: str | obj
The color of tick labels in the colorbar. Defaults to white.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz.topo import _imshow_tfr, _plot_topo, _imshow_tfr_unified
times = self.times.copy()
freqs = self.freqs
data = self.data
info = self.info
info, data, picks = _prepare_picks(info, data, picks)
data = data[picks]
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax,
mode, baseline, vmin, vmax, dB, info['sfreq'])
if layout is None:
from mne import find_layout
layout = find_layout(self.info)
onselect_callback = partial(self._onselect, baseline=baseline,
mode=mode, layout=layout)
click_fun = partial(_imshow_tfr, tfr=data, freq=freqs, cmap=cmap,
onselect=onselect_callback)
imshow = partial(_imshow_tfr_unified, tfr=data, freq=freqs, cmap=cmap,
onselect=onselect_callback)
fig = _plot_topo(info=info, times=times, show_func=imshow,
click_func=click_fun, layout=layout,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title, border=border,
x_label='Time (ms)', y_label='Frequency (Hz)',
fig_facecolor=fig_facecolor, font_color=font_color,
unified=True, img=True)
plt_show(show)
return fig
def _check_compat(self, tfr):
"""checks that self and tfr have the same time-frequency ranges"""
assert np.all(tfr.times == self.times)
assert np.all(tfr.freqs == self.freqs)
def __add__(self, tfr):
self._check_compat(tfr)
out = self.copy()
out.data += tfr.data
return out
def __iadd__(self, tfr):
self._check_compat(tfr)
self.data += tfr.data
return self
def __sub__(self, tfr):
self._check_compat(tfr)
out = self.copy()
out.data -= tfr.data
return out
def __isub__(self, tfr):
self._check_compat(tfr)
self.data -= tfr.data
return self
def copy(self):
"""Return a copy of the instance."""
return deepcopy(self)
def __repr__(self):
s = "time : [%f, %f]" % (self.times[0], self.times[-1])
s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1])
s += ", nave : %d" % self.nave
s += ', channels : %d' % self.data.shape[0]
return "<AverageTFR | %s>" % s
@verbose
def apply_baseline(self, baseline, mode='mean', verbose=None):
"""Baseline correct the data
Parameters
----------
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
self.data = rescale(self.data, self.times, baseline, mode,
copy=False)
def plot_topomap(self, tmin=None, tmax=None, fmin=None, fmax=None,
ch_type=None, baseline=None, mode='mean',
layout=None, vmin=None, vmax=None, cmap=None,
sensors=True, colorbar=True, unit=None, res=64, size=2,
cbar_fmt='%1.1e', show_names=False, title=None,
axes=None, show=True, outlines='head', head_pos=None):
"""Plot topographic maps of time-frequency intervals of TFR data
Parameters
----------
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file was
found, the layout is automatically generated from the sensor
locations.
vmin : float | callable | None
The value specifying the lower bound of the color range. If None,
and vmax is None, -vmax is used. Else np.min(data) or in case
data contains only positive values 0. If callable, the output
equals vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range. If None,
the maximum value is used. If callable, the output equals
vmax(data). Defaults to None.
cmap : matplotlib colormap | None
Colormap. If None and the plotted data is all positive, defaults to
'Reds'. If None and data contains also negative values, defaults to
'RdBu_r'. Defaults to None.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle
will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
axes : instance of Axes | None
The axes to plot to. If None the axes is defined automatically.
show : bool
Call pyplot.show() at the end.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz import plot_tfr_topomap
return plot_tfr_topomap(self, tmin=tmin, tmax=tmax, fmin=fmin,
fmax=fmax, ch_type=ch_type, baseline=baseline,
mode=mode, layout=layout, vmin=vmin, vmax=vmax,
cmap=cmap, sensors=sensors, colorbar=colorbar,
unit=unit, res=res, size=size,
cbar_fmt=cbar_fmt, show_names=show_names,
title=title, axes=axes, show=show,
outlines=outlines, head_pos=head_pos)
def save(self, fname, overwrite=False):
"""Save TFR object to hdf5 file
Parameters
----------
fname : str
The file name, which should end with -tfr.h5 .
overwrite : bool
If True, overwrite file (if it exists). Defaults to false
"""
write_tfrs(fname, self, overwrite=overwrite)
def _prepare_write_tfr(tfr, condition):
"""Aux function"""
return (condition, dict(times=tfr.times, freqs=tfr.freqs,
data=tfr.data, info=tfr.info,
nave=tfr.nave, comment=tfr.comment,
method=tfr.method))
def write_tfrs(fname, tfr, overwrite=False):
"""Write a TFR dataset to hdf5.
Parameters
----------
fname : string
The file name, which should end with -tfr.h5
tfr : AverageTFR instance, or list of AverageTFR instances
The TFR dataset, or list of TFR datasets, to save in one file.
Note. If .comment is not None, a name will be generated on the fly,
based on the order in which the TFR objects are passed
overwrite : bool
If True, overwrite file (if it exists). Defaults to False.
See Also
--------
read_tfrs
Notes
-----
.. versionadded:: 0.9.0
"""
out = []
if not isinstance(tfr, (list, tuple)):
tfr = [tfr]
for ii, tfr_ in enumerate(tfr):
comment = ii if tfr_.comment is None else tfr_.comment
out.append(_prepare_write_tfr(tfr_, condition=comment))
write_hdf5(fname, out, overwrite=overwrite, title='mnepython')
def read_tfrs(fname, condition=None):
"""
Read TFR datasets from hdf5 file.
Parameters
----------
fname : string
The file name, which should end with -tfr.h5 .
condition : int or str | list of int or str | None
The condition to load. If None, all conditions will be returned.
Defaults to None.
See Also
--------
write_tfrs
Returns
-------
tfrs : list of instances of AverageTFR | instance of AverageTFR
Depending on `condition` either the TFR object or a list of multiple
TFR objects.
Notes
-----
.. versionadded:: 0.9.0
"""
check_fname(fname, 'tfr', ('-tfr.h5',))
logger.info('Reading %s ...' % fname)
tfr_data = read_hdf5(fname, title='mnepython')
for k, tfr in tfr_data:
tfr['info'] = Info(tfr['info'])
if condition is not None:
tfr_dict = dict(tfr_data)
if condition not in tfr_dict:
keys = ['%s' % k for k in tfr_dict]
raise ValueError('Cannot find condition ("{0}") in this file. '
'I can give you "{1}""'
.format(condition, " or ".join(keys)))
out = AverageTFR(**tfr_dict[condition])
else:
out = [AverageTFR(**d) for d in list(zip(*tfr_data))[1]]
return out
@verbose
def tfr_morlet(inst, freqs, n_cycles, use_fft=False, return_itc=True, decim=1,
n_jobs=1, picks=None, verbose=None):
"""Compute Time-Frequency Representation (TFR) using Morlet wavelets
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
use_fft : bool
The fft based convolution or not.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
Must be ``False`` for evoked data.
decim : int | slice
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice` returns tfr[..., decim].
Note that decimation may create aliasing artifacts.
Defaults to 1.
n_jobs : int
The number of jobs to run in parallel.
picks : array-like of int | None
The indices of the channels to plot. If None all available
channels are displayed.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : instance of AverageTFR
The averaged power.
itc : instance of AverageTFR
The intertrial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
tfr_multitaper, tfr_stockwell
"""
decim = _check_decim(decim)
data = _get_data(inst, return_itc)
info = inst.info
info, data, picks = _prepare_picks(info, data, picks)
data = data[:, picks, :]
power, itc = _induced_power_cwt(data, sfreq=info['sfreq'],
frequencies=freqs,
n_cycles=n_cycles, n_jobs=n_jobs,
use_fft=use_fft, decim=decim,
zero_mean=True)
times = inst.times[decim].copy()
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave, method='morlet-power')
if return_itc:
out = (out, AverageTFR(info, itc, times, freqs, nave,
method='morlet-itc'))
return out
def _prepare_picks(info, data, picks):
if picks is None:
picks = pick_types(info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
if np.array_equal(picks, np.arange(len(data))):
picks = slice(None)
else:
info = pick_info(info, picks)
return info, data, picks
@verbose
def _induced_power_mtm(data, sfreq, frequencies, time_bandwidth=4.0,
use_fft=True, n_cycles=7, decim=1, n_jobs=1,
zero_mean=True, verbose=None):
"""Compute time induced power and inter-trial phase-locking factor
The time frequency decomposition is done with DPSS wavelets
Parameters
----------
data : np.ndarray, shape (n_epochs, n_channels, n_times)
The input data.
sfreq : float
Sampling frequency.
frequencies : np.ndarray, shape (n_frequencies,)
Array of frequencies of interest
time_bandwidth : float
Time x (Full) Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1). Default is 4.0 (3 tapers).
use_fft : bool
Compute transform with fft based convolutions or temporal
convolutions. Defaults to True.
n_cycles : float | np.ndarray shape (n_frequencies,)
Number of cycles. Fixed number or one per frequency. Defaults to 7.
decim : int | slice
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice` returns tfr[..., decim].
Note that decimation may create aliasing artifacts.
Defaults to 1.
n_jobs : int
The number of CPUs used in parallel. All CPUs are used in -1.
Requires joblib package. Defaults to 1.
zero_mean : bool
Make sure the wavelets are zero mean. Defaults to True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : np.ndarray, shape (n_channels, n_frequencies, n_times)
Induced power. Squared amplitude of time-frequency coefficients.
itc : np.ndarray, shape (n_channels, n_frequencies, n_times)
Phase locking value.
"""
decim = _check_decim(decim)
n_epochs, n_channels, n_times = data[:, :, decim].shape
logger.info('Data is %d trials and %d channels', n_epochs, n_channels)
n_frequencies = len(frequencies)
logger.info('Multitaper time-frequency analysis for %d frequencies',
n_frequencies)
# Precompute wavelets for given frequency range to save time
Ws = _dpss_wavelet(sfreq, frequencies, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, zero_mean=zero_mean)
n_taps = len(Ws)
logger.info('Using %d tapers', n_taps)
n_times_wavelets = Ws[0][0].shape[0]
if data.shape[2] <= n_times_wavelets:
warn('Time windows are as long or longer than the epoch. Consider '
'reducing n_cycles.')
psd = np.zeros((n_channels, n_frequencies, n_times))
itc = np.zeros((n_channels, n_frequencies, n_times))
parallel, my_time_frequency, _ = parallel_func(_time_frequency,
n_jobs)
for m in range(n_taps):
psd_itc = parallel(my_time_frequency(data[:, c, :], Ws[m], use_fft,
decim)
for c in range(n_channels))
for c, (psd_c, itc_c) in enumerate(psd_itc):
psd[c, :, :] += psd_c
itc[c, :, :] += itc_c
psd /= n_taps
itc /= n_taps
return psd, itc
@verbose
def tfr_multitaper(inst, freqs, n_cycles, time_bandwidth=4.0,
use_fft=True, return_itc=True, decim=1,
n_jobs=1, picks=None, verbose=None):
"""Compute Time-Frequency Representation (TFR) using DPSS wavelets
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
The time-window length is thus T = n_cycles / freq.
time_bandwidth : float, (optional)
Time x (Full) Bandwidth product. Should be >= 2.0.
Choose this along with n_cycles to get desired frequency resolution.
The number of good tapers (least leakage from far away frequencies)
is chosen automatically based on this to floor(time_bandwidth - 1).
Default is 4.0 (3 good tapers).
E.g., With freq = 20 Hz and n_cycles = 10, we get time = 0.5 s.
If time_bandwidth = 4., then frequency smoothing is (4 / time) = 8 Hz.
use_fft : bool
The fft based convolution or not.
Defaults to True.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
Defaults to True.
decim : int | slice
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice` returns tfr[..., decim].
Note that decimation may create aliasing artifacts.
Defaults to 1.
n_jobs : int
The number of jobs to run in parallel. Defaults to 1.
picks : array-like of int | None
The indices of the channels to plot. If None all available
channels are displayed.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : AverageTFR
The averaged power.
itc : AverageTFR
The intertrial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
tfr_multitaper, tfr_stockwell
Notes
-----
.. versionadded:: 0.9.0
"""
decim = _check_decim(decim)
data = _get_data(inst, return_itc)
info = inst.info
info, data, picks = _prepare_picks(info, data, picks)
data = data = data[:, picks, :]
power, itc = _induced_power_mtm(data, sfreq=info['sfreq'],
frequencies=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth,
use_fft=use_fft, decim=decim,
n_jobs=n_jobs, zero_mean=True,
verbose='INFO')
times = inst.times[decim].copy()
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave,
method='mutlitaper-power')
if return_itc:
out = (out, AverageTFR(info, itc, times, freqs, nave,
method='mutlitaper-itc'))
return out
def combine_tfr(all_tfr, weights='nave'):
"""Merge AverageTFR data by weighted addition
Create a new AverageTFR instance, using a combination of the supplied
instances as its data. By default, the mean (weighted by trials) is used.
Subtraction can be performed by passing negative weights (e.g., [1, -1]).
Data must have the same channels and the same time instants.
Parameters
----------
all_tfr : list of AverageTFR
The tfr datasets.
weights : list of float | str
The weights to apply to the data of each AverageTFR instance.
Can also be ``'nave'`` to weight according to tfr.nave,
or ``'equal'`` to use equal weighting (each weighted as ``1/N``).
Returns
-------
tfr : AverageTFR
The new TFR data.
Notes
-----
.. versionadded:: 0.11.0
"""
tfr = all_tfr[0].copy()
if isinstance(weights, string_types):
if weights not in ('nave', 'equal'):
raise ValueError('Weights must be a list of float, or "nave" or '
'"equal"')
if weights == 'nave':
weights = np.array([e.nave for e in all_tfr], float)
weights /= weights.sum()
else: # == 'equal'
weights = [1. / len(all_tfr)] * len(all_tfr)
weights = np.array(weights, float)
if weights.ndim != 1 or weights.size != len(all_tfr):
raise ValueError('Weights must be the same size as all_tfr')
ch_names = tfr.ch_names
for t_ in all_tfr[1:]:
assert t_.ch_names == ch_names, ValueError("%s and %s do not contain "
"the same channels"
% (tfr, t_))
assert np.max(np.abs(t_.times - tfr.times)) < 1e-7, \
ValueError("%s and %s do not contain the same time instants"
% (tfr, t_))
# use union of bad channels
bads = list(set(tfr.info['bads']).union(*(t_.info['bads']
for t_ in all_tfr[1:])))
tfr.info['bads'] = bads
tfr.data = sum(w * t_.data for w, t_ in zip(weights, all_tfr))
tfr.nave = max(int(1. / sum(w ** 2 / e.nave
for w, e in zip(weights, all_tfr))), 1)
return tfr
def _check_decim(decim):
""" aux function checking the decim parameter """
if isinstance(decim, int):
decim = slice(None, None, decim)
elif not isinstance(decim, slice):
raise(TypeError, '`decim` must be int or slice, got %s instead'
% type(decim))
return decim
| {
"repo_name": "wronk/mne-python",
"path": "mne/time_frequency/tfr.py",
"copies": "1",
"size": "57568",
"license": "bsd-3-clause",
"hash": -12549370799085928,
"line_mean": 37.7663299663,
"line_max": 79,
"alpha_frac": 0.5754585881,
"autogenerated": false,
"ratio": 3.8831703204047217,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9958628908504722,
"avg_score": 0,
"num_lines": 1485
} |
# A module which manages a queue of images to render in the background
# and a UI for the same
import copy
import gobject
import gtk
import gtkfractal
import dialog
import preferences
class QueueEntry:
def __init__(self, f, name, w, h):
self.f = f
self.name = name
self.w = w
self.h = h
# the underlying queue object
class T(gobject.GObject):
__gsignals__ = {
'done' : (
(gobject.SIGNAL_RUN_FIRST | gobject.SIGNAL_NO_RECURSE),
gobject.TYPE_NONE, ()),
'changed' : (
(gobject.SIGNAL_RUN_FIRST | gobject.SIGNAL_NO_RECURSE),
gobject.TYPE_NONE, ()),
'progress-changed' : (
(gobject.SIGNAL_RUN_FIRST | gobject.SIGNAL_NO_RECURSE),
gobject.TYPE_NONE, (gobject.TYPE_FLOAT,))
}
def __init__(self):
gobject.GObject.__init__(self)
self.queue = []
self.current = None
def add(self,f,name,w,h):
entry = QueueEntry(copy.copy(f),name,w,h)
self.queue.append(entry)
self.emit('changed')
def start(self):
if self.current == None:
self.next()
def empty(self):
return self.queue == []
def next(self):
if self.empty():
self.current = None
self.emit('done')
return
entry = self.queue[0]
self.current = gtkfractal.HighResolution(entry.f.compiler,entry.w,entry.h)
self.current.set_fractal(entry.f)
self.current.connect('status-changed', self.onImageComplete)
self.current.connect('progress-changed', self.onProgressChanged)
self.current.set_nthreads(preferences.userPrefs.getint("general","threads"))
self.current.draw_image(entry.name)
def onImageComplete(self, f, status):
if status == 0:
self.queue.pop(0)
self.emit('changed')
self.next()
def onProgressChanged(self,f,progress):
self.emit('progress-changed',progress)
# explain our existence to GTK's object system
gobject.type_register(T)
def show(parent, alt_parent, f):
QueueDialog.show(parent, alt_parent, f)
instance = T()
class CellRendererProgress(gtk.GenericCellRenderer):
__gproperties__ = {
"progress": (gobject.TYPE_FLOAT, "Progress",
"Progress (0.0-100.0)", 0.0, 100.0, 0,
gobject.PARAM_READWRITE),
}
def __init__(self):
self.__gobject_init__()
self.progress = 0.0
def do_set_property(self, pspec, value):
setattr(self, pspec.name, value)
def do_get_property(self, pspec):
return getattr(self, pspec.name)
def on_render(self, window, widget, background_area,
cell_area, expose_area, flags):
x_offset, y_offset, width, height = self.on_get_size(widget, cell_area)
widget.style.paint_box(window, gtk.STATE_NORMAL, gtk.SHADOW_IN,
None, widget, "",
cell_area.x+x_offset, cell_area.y+y_offset,
width, height)
xt = widget.style.xthickness
xpad = self.get_property("xpad")
space = (width-2*xt-2*xpad)*(self.progress/100.)
widget.style.paint_box(window, gtk.STATE_PRELIGHT, gtk.SHADOW_OUT,
None, widget, "bar",
cell_area.x+x_offset+xt,
cell_area.y+y_offset+xt,
int(space), height-2*xt)
def on_get_size(self, widget, cell_area):
xpad = self.get_property("xpad")
ypad = self.get_property("ypad")
if cell_area:
width = cell_area.width
height = cell_area.height
x_offset = xpad
y_offset = ypad
else:
width = self.get_property("width")
height = self.get_property("height")
if width == -1: width = 100
if height == -1: height = 30
width += xpad*2
height += ypad*2
x_offset = 0
y_offset = 0
return x_offset, y_offset, width, height
gobject.type_register(CellRendererProgress)
class QueueDialog(dialog.T):
def show(parent, alt_parent, f):
dialog.T.reveal(QueueDialog,True, parent, alt_parent, f)
show = staticmethod(show)
def __init__(self, main_window, f):
dialog.T.__init__(
self,
_("Render Queue"),
main_window,
gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE))
self.main_window = main_window
self.q = instance
self.q.connect('changed', self.onQueueChanged)
self.q.connect('progress-changed', self.onProgressChanged)
self.controls = gtk.VBox()
self.store = gtk.ListStore(
gobject.TYPE_STRING, # name
gobject.TYPE_STRING, # size
gobject.TYPE_FLOAT, # % complete
)
self.view = gtk.TreeView(self.store)
column = gtk.TreeViewColumn(
_('_Name'),gtk.CellRendererText(),text=0)
self.view.append_column(column)
column = gtk.TreeViewColumn(
_('_Size'),gtk.CellRendererText(),text=1)
self.view.append_column(column)
column = gtk.TreeViewColumn(
_('_Progress'),CellRendererProgress(),progress=2)
self.view.append_column(column)
self.controls.add(self.view)
self.vbox.add(self.controls)
def onQueueChanged(self,q):
self.store.clear()
for item in self.q.queue:
self.store.append((item.name,"%dx%d" % (item.w,item.h),0.0))
def onProgressChanged(self,f,progress):
iter = self.store.get_iter_first()
if iter:
self.store.set_value(iter,2,progress)
| {
"repo_name": "ericchill/gnofract4d",
"path": "fract4dgui/renderqueue.py",
"copies": "1",
"size": "5931",
"license": "bsd-3-clause",
"hash": 5025205982180082000,
"line_mean": 29.4153846154,
"line_max": 84,
"alpha_frac": 0.553532288,
"autogenerated": false,
"ratio": 3.725502512562814,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9710436921375359,
"avg_score": 0.013719575837490797,
"num_lines": 195
} |
'''A module which models finite state machines/automata.'''
from util.weighted_digraph import WeightedDigraph
import util.dot
class Automaton(WeightedDigraph):
'''An automaton where each state can have at most one transition per symbol
leading out of a state. The fact that there may be no transition on a
symbol leading out of a state makes this technically not a DFA.'''
def add_transition(self, source, symbol, dest):
'''Add a transition to the automaton. The source and destination states
are added to the automaton if they do not already exist. If a
transition on the given symbol already exists at the source state, it
is overwritten.'''
self.add_edge(source, symbol, dest)
def has_transition(self, source, symbol):
'''Test whether a state has an outgoing transition on a given symbol.
'''
return self.has_edge(source, symbol)
def next_state(self, source, symbol):
'''Get the state to which a state has a transition on the given symbol.
'''
return self.get_edge(source, symbol)
def add_state(self, s):
'''Add a state to the automaton.'''
self.add_vertex(s)
@property
def states(self):
'''A set containing all of the states in the automaton.'''
result = set(self._edges.keys())
for qi, a, qj in self.transitions:
result.add(qi)
result.add(qj)
return result
@property
def transitions(self):
'''A list of triples (s, X, t), where s is the source state, t is the
destination state, and X is the transition symbol of each transition in
the automaton.'''
return self.edges
def _dot_str(self, tostring, shape):
lines = ['q%s [label=%s]' % (id(q), tostring(q)) for q in self.states]
lines += ['q%s -> q%s [label="%s"]' % (id(qi), id(qj), util.dot.escape(str(a))) for qi, a, qj in self.transitions]
return '''\
digraph {
node [shape=%s];
%s
}
''' % (shape, ';\n\t'.join(lines))
def dot_str(self, shape='circle'):
return self._dot_str(lambda s: '"%s"' % util.dot.escape(self.state_dot_label(s)), shape)
def dot_html(self, shape='circle'):
return self._dot_str(lambda s: '<%s>' % self.state_dot_html_label(s), shape)
def state_dot_label(self, s):
return str(s)
def state_dot_html_label(self, s):
return str(s)
| {
"repo_name": "bdusell/pycfg",
"path": "src/cfg/automaton.py",
"copies": "1",
"size": "2433",
"license": "mit",
"hash": 8230293499414346000,
"line_mean": 34.2608695652,
"line_max": 122,
"alpha_frac": 0.6214549938,
"autogenerated": false,
"ratio": 3.6586466165413536,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47801016103413535,
"avg_score": null,
"num_lines": null
} |
"""A module with a few helpful diagnostic tools.
"""
import pandas as pd
def unique_summary(df, subset):
"""Given a dataframe and a subset of columns, print a summary of unique rows
:param df: The dataframe to inspect.
:type df: Dataframe.
:subset df: The subset of columns to consider.
:type df: array-like.
"""
total = len(df)
unique = len(df.drop_duplicates(subset=subset))
print "Total rows: {}".format(total)
print "Unique rows: {}".format(unique)
print "Unaccounted-for rows: {}".format(total-unique)
def null_summary(df):
"""Given a dataframe, print a summary of null values for each column
:param df: The dataframe to inspect.
:type df: Dataframe.
"""
for column in df.columns:
print column
print count_nulls(df[column])
def count_nulls(s):
"""Given a series, return a summary of null values for that series
:param s: The series to inspect.
:type s: Series.
"""
count = len(s[pd.isnull(s)])
percent = 100 * float(len(s[pd.isnull(s)])) / len(s)
return "**NaNs:** " + str(count) + " / " + str(int(percent)) + "%"
def generate_sankey(df, left, right):
"""Given a dataframe, a left column, and a right column, print the input for `SankeyMATIC <http://sankeymatic.com>`_.
:param df: The dataframe to use.
:type df: Dataframe.
:param left: The left column to use.
:type left: str.
:param right: The right column to use.
:type right: str.
"""
grouped = df[[left, right]].dropna().groupby([left, right])
for group, rows in grouped.groups.iteritems():
print "{} [{}] {}".format(group[0], len(rows), group[1])
| {
"repo_name": "dssg/homelessness-public",
"path": "diagnostics.py",
"copies": "2",
"size": "1677",
"license": "mit",
"hash": -3426860802498867700,
"line_mean": 28.9464285714,
"line_max": 121,
"alpha_frac": 0.6285032797,
"autogenerated": false,
"ratio": 3.5305263157894737,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003099334558072335,
"num_lines": 56
} |
""" A module with classes for aggregation.
An Aggregator has two methods: process and finish.
process(group, val) is called to "add" val to the aggregation of the set of
values identified by the value of group. The value in group (which could be any
hashable type, also a tuple as ('A', 'B')) thus corresponds to the GROUP BY
attributes in SQL.
finish(group, default) is called to get the final result for group. If no such
results exists, default is returned.
"""
# Copyright (c) 2011, Christian Thomsen (chr@cs.aau.dk)
# All rights reserved.
# Redistribution and use in source anqd binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__author__ = "Christian Thomsen"
__maintainer__ = "Christian Thomsen"
__version__ = '0.2.0'
__all__ = ['Aggregator', 'SimpleAggregator', 'Sum', 'Count', 'CountDistinct',
'Max', 'Min', 'Avg']
class Aggregator(object):
def process(self, group, val):
raise NotImplementedError
def finish(self, group, default=None):
raise NotImplementedError
class SimpleAggregator(Aggregator):
def __init__(self):
self._results = {}
def process(self, group, val):
pass
def finish(self, group, default=None):
return self._results.get(group, default)
class Sum(SimpleAggregator):
def process(self, group, val):
tmp = self._results.get(group, 0)
tmp += val
self._results[group] = tmp
class Count(SimpleAggregator):
def process(self, group, val):
tmp = self._results.get(group, 0)
tmp += 1
self._results[group] = tmp
class CountDistinct(SimpleAggregator):
def process(self, group, val):
if group not in self._results:
self._results[group] = set()
self._results[group].add(val)
def finish(self, group, default=None):
if group not in self._results:
return default
return len(self._results[group])
class Max(SimpleAggregator):
def process(self, group, val):
if group not in self._results:
self._results[group] = val
else:
tmp = self._results[group]
if val > tmp:
self._results[group] = val
class Min(SimpleAggregator):
def process(self, group, val):
if group not in self._results:
self._results[group] = val
else:
tmp = self._results[group]
if val < tmp:
self._results[group] = val
class Avg(Aggregator):
def __init__(self):
self.__sum = Sum()
self.__count = Count()
def process(self, group, val):
self.__sum.process(group, val)
self.__count.process(group, val)
def finish(self, group, default=None):
tmp = self.__sum.finish(group, None)
if tmp is None:
return default
else:
return float(tmp) / self.__count(group)
| {
"repo_name": "mattharrison/pygrametl",
"path": "pygrametl/aggregators.py",
"copies": "2",
"size": "4071",
"license": "bsd-2-clause",
"hash": -2168604232220288800,
"line_mean": 31.568,
"line_max": 80,
"alpha_frac": 0.663718988,
"autogenerated": false,
"ratio": 4.124620060790273,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5788339048790274,
"avg_score": null,
"num_lines": null
} |
"""A module with classes for aggregation.
An Aggregator has two methods: process and finish.
process(group, val) is called to "add" val to the aggregation of the set of
values identified by the value of group. The value in group (which could be
any hashable type, also a tuple as ('A', 'B')) thus corresponds to the
GROUP BY attributes in SQL.
finish(group, default) is called to get the final result for group.
If no such results exists, default is returned.
"""
# Copyright (c) 2011-2014, Aalborg University (chr@cs.aau.dk)
# All rights reserved.
# Redistribution and use in source anqd binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__author__ = "Christian Thomsen"
__maintainer__ = "Christian Thomsen"
__version__ = '2.2'
__all__ = ['Aggregator', 'SimpleAggregator', 'Sum', 'Count', 'CountDistinct',
'Max', 'Min', 'Avg']
class Aggregator(object):
def process(self, group, val):
raise NotImplementedError
def finish(self, group, default=None):
raise NotImplementedError
class SimpleAggregator(Aggregator):
def __init__(self):
self._results = {}
def process(self, group, val):
pass
def finish(self, group, default=None):
return self._results.get(group, default)
class Sum(SimpleAggregator):
def process(self, group, val):
tmp = self._results.get(group, 0)
tmp += val
self._results[group] = tmp
class Count(SimpleAggregator):
def process(self, group, val):
tmp = self._results.get(group, 0)
tmp += 1
self._results[group] = tmp
class CountDistinct(SimpleAggregator):
def process(self, group, val):
if group not in self._results:
self._results[group] = set()
self._results[group].add(val)
def finish(self, group, default=None):
if group not in self._results:
return default
return len(self._results[group])
class Max(SimpleAggregator):
def process(self, group, val):
if group not in self._results:
self._results[group] = val
else:
tmp = self._results[group]
if val > tmp:
self._results[group] = val
class Min(SimpleAggregator):
def process(self, group, val):
if group not in self._results:
self._results[group] = val
else:
tmp = self._results[group]
if val < tmp:
self._results[group] = val
class Avg(Aggregator):
def __init__(self):
self.__sum = Sum()
self.__count = Count()
def process(self, group, val):
self.__sum.process(group, val)
self.__count.process(group, val)
def finish(self, group, default=None):
tmp = self.__sum.finish(group, None)
if tmp is None:
return default
else:
return float(tmp) / self.__count(group)
| {
"repo_name": "haleemur/pygrametl-python3",
"path": "pygrametl/aggregators.py",
"copies": "1",
"size": "4092",
"license": "bsd-2-clause",
"hash": -302535253796786370,
"line_mean": 32.5409836066,
"line_max": 80,
"alpha_frac": 0.6612903226,
"autogenerated": false,
"ratio": 4.158536585365853,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002282432705929974,
"num_lines": 122
} |
"""A module with classes for aggregation.
An Aggregator has two methods: process and finish.
process(group, val) is called to "add" val to the aggregation of the set of
values identified by the value of group. The value in group (which could be
any hashable type, also a tuple as ('A', 'B')) thus corresponds to the
GROUP BY attributes in SQL.
finish(group, default) is called to get the final result for group.
If no such results exists, default is returned.
"""
# Copyright (c) 2011-2014, Aalborg University (chr@cs.aau.dk)
# All rights reserved.
# Redistribution and use in source anqd binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__author__ = "Christian Thomsen"
__maintainer__ = "Christian Thomsen"
__version__ = '2.2'
__all__ = ['Aggregator', 'SimpleAggregator', 'Sum', 'Count', 'CountDistinct',
'Max', 'Min', 'Avg']
class Aggregator(object):
def process(self, group, val):
raise NotImplementedError()
def finish(self, group, default=None):
raise NotImplementedError()
class SimpleAggregator(Aggregator):
def __init__(self):
self._results = {}
def process(self, group, val):
pass
def finish(self, group, default=None):
return self._results.get(group, default)
class Sum(SimpleAggregator):
def process(self, group, val):
tmp = self._results.get(group, 0)
tmp += val
self._results[group] = tmp
class Count(SimpleAggregator):
def process(self, group, val):
tmp = self._results.get(group, 0)
tmp += 1
self._results[group] = tmp
class CountDistinct(SimpleAggregator):
def process(self, group, val):
if group not in self._results:
self._results[group] = set()
self._results[group].add(val)
def finish(self, group, default=None):
if group not in self._results:
return default
return len(self._results[group])
class Max(SimpleAggregator):
def process(self, group, val):
if group not in self._results:
self._results[group] = val
else:
tmp = self._results[group]
if val > tmp:
self._results[group] = val
class Min(SimpleAggregator):
def process(self, group, val):
if group not in self._results:
self._results[group] = val
else:
tmp = self._results[group]
if val < tmp:
self._results[group] = val
class Avg(Aggregator):
def __init__(self):
self.__sum = Sum()
self.__count = Count()
def process(self, group, val):
self.__sum.process(group, val)
self.__count.process(group, val)
def finish(self, group, default=None):
tmp = self.__sum.finish(group, None)
if tmp is None:
return default
else:
return float(tmp) / self.__count(group)
| {
"repo_name": "jpercent/pygrametl",
"path": "pygrametl/aggregators.py",
"copies": "2",
"size": "4098",
"license": "bsd-2-clause",
"hash": -8389048834220121000,
"line_mean": 30.2824427481,
"line_max": 80,
"alpha_frac": 0.6603221083,
"autogenerated": false,
"ratio": 4.168870803662259,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5829192911962259,
"avg_score": null,
"num_lines": null
} |
"""A module with commonly- and uncommonly-used feature sets for modeling.
To be used primarily with :mod:`pipeline`.
"""
unused = ['ClientUniqueID',
'ClientID',
'HouseholdID',
'Head Of Household?',
'Relationship to HoH',
'Entry Exit GroupID',
'EntryID',
'ProviderID',
'ProgramEntryDate',
'ProgramExitDate',
'PrimaryRace',
'Ethnicity',
'Veteran?',
'YearOfBirth',
'AgeEntered',
'AgeEnteredBucketDFSS',
'SecondaryRace',
'PrimaryLanguageSpoken',
'ZipCodeOfLastPermanentAddress',
'ZipDataQuality',
'ExitReason',
'Anonymous',
'DateCreated',
'DateUpdated',
'Provider',
'ProviderLevel',
'ParentProvider',
'CurrentlyOperational',
'AddressLine1',
'AddressLine2',
'City',
'State',
'NumActiveUsers',
'ProgramTypeCode',
'AltProgramType',
'Last30DayIncome',
'ProgramEntryDateOfFirstEntry',
'ProgramTypeOfFirstEntry',
'ProgramEntryDateOfFirstEntryHomelessnessProgram',
'ProgramTypeOfFirstEntryHomelessnessProgram',
'DaysSinceFirstEntry',
'DaysSinceFirstEntryHomelessnessProgram',
'DaysSinceFirstEntryHomelessnessProgramBucket',
'ValidZipCodeOfLastPermanentAddress?',
'ProgramEntryDateReentry',
'ProgramTypeReentry']
demographics = ['Race/Ethnicity (4-way)',
'Veteran?Imputed',
'Refused',
'AgeEnteredBucket']
housing_demographics = ['PreviousLivingSituation',
'LengthOfStayInPreviousLivingSituation',
'DaysSinceFirstEntryBucket']
family_demographics = ['Family?',
'SingleAdult?',
'Children?',
'SingleAdultWithChildren?',
'Under 6 years?',
'6 to 17 years?',
'18 to 64 years?',
'65 years and over?']
family_counts = ['Under 6 years',
'6 to 17 years',
'18 to 64 years',
'65 years and over',
'OtherFamilyMembers']
exit_stuff = ['DestinationAtExit',
'LengthOfStay']
program = ['ProgramType',
'Reviewed?']
program_aggs = ['ProgramTypeAggregate',
'HomelessnessProgram?']
program_location = ['Zip']
disabilities_entry = ['Alcohol Abuse (HUD 40118) Entry',
'Both alcohol and drug abuse (HUD 40118) Entry',
'Developmental (HUD 40118) Entry',
'Drug Abuse (HUD 40118) Entry',
'HIV/AIDS (HUD 40118) Entry',
'Mental Health Problem (HUD 40118) Entry',
'Physical (HUD 40118) Entry',
'Disabled? Entry']
disabilities_review = ['Alcohol Abuse (HUD 40118) Review',
'Both alcohol and drug abuse (HUD 40118) Review',
'Developmental (HUD 40118) Review',
'Drug Abuse (HUD 40118) Review',
'HIV/AIDS (HUD 40118) Review',
'Mental Health Problem (HUD 40118) Review',
'Physical (HUD 40118) Review',
'Disabled? Review']
disabilities_breakout = ['Mental Health Problem (HUD 40118)',
'Physical (HUD 40118)',
'Drug Abuse (HUD 40118)',
'HIV/AIDS (HUD 40118)',
'Alcohol Abuse (HUD 40118)',
'Both alcohol and drug abuse (HUD 40118)',
'Developmental (HUD 40118)']
disabilities = ['Disabled?']
income_breakout = ['Alimony or Other Spousal Support (HUD)',
'Child Support (HUD)',
'Contributions From Other People',
'Earned Income (HUD)',
'General Assistance (HUD)',
'Pension From a Former Job (HUD)',
'Private Disability Insurance (HUD)',
'Private Health Insurance',
'Rental Income',
'Retirement Disability',
'Retirement Income From Social Security (HUD)',
'SSDI (HUD)',
'SSI (HUD)',
'TANF (HUD)',
'Unemployment Insurance (HUD)',
"Veteran's Disability Payment (HUD)",
"Veteran's Pension (HUD)",
"Worker's Compensation (HUD)"]
income = ['Last30DayIncomeBucket']
income_exit = ['Alimony or Other Spousal Support (HUD) Exit',
'Child Support (HUD) Exit',
'Contributions From Other People Exit',
'Dividends (Investments) Exit',
'Earned Income (HUD) Exit',
'General Assistance (HUD) Exit',
'Pension From a Former Job (HUD) Exit',
'Private Disability Insurance (HUD) Exit',
'Private Health Insurance Exit',
'Rental Income Exit',
'Retirement Disability Exit',
'Retirement Income From Social Security (HUD) Exit',
'SSDI (HUD) Exit',
'SSI (HUD) Exit',
'TANF (HUD) Exit',
'Unemployment Insurance (HUD) Exit',
"Veteran's Disability Payment (HUD) Exit",
"Veteran's Pension (HUD) Exit",
"Worker's Compensation (HUD) Exit",
'Last30DayIncome Exit',
'EarnedIncomeExitChange',
'CashIncomeExitChange']
income_outcomes = ['EarnedIncomeExitHas',
'CashIncomeExitHas']
ncb_breakout = ['MEDICAID (HUD)',
'MEDICARE (HUD)',
'SCHIP (HUD)',
'Section 8, Public Housing or rental assistance (HUD)',
'Special Supplemental Nutrition Program for WIC (HUD)',
'Supplemental Nutrition Assistance Program (Food Stamps) (HUD)',
'Temporary rental assistance (HUD)',
"Veteran's Administration (VA) Medical Services (HUD)"]
ncb = ['NCBIncomeHas']
ncb_exit = ['MEDICAID (HUD) Exit',
'MEDICARE (HUD) Exit',
'SCHIP (HUD) Exit',
'Section 8, Public Housing or rental assistance (HUD) Exit',
'Special Supplemental Nutrition Program for WIC (HUD) Exit',
'Supplemental Nutrition Assistance Program (Food Stamps) (HUD) Exit',
'Temporary rental assistance (HUD) Exit',
"Veteran's Administration (VA) Medical Services (HUD) Exit"]
ncb_outcomes = ['NCBIncomeExitHas']
services_breakout = ['B Service',
'D Service',
'F Service',
'H Service',
'L Service',
'N Service',
'P Service',
'R Service',
'T Service']
services = ['Services?']
housing_outcomes = ['CaseOutcome',
'CaseSuccess',
'Reentered6Month',
'Reentered12Month',
'Reentered6MonthFromPermanent',
'Reentered12MonthFromPermanent']
reduced = ['ProgramType',
'Reviewed?',
'Veteran?Imputed',
'PreviousLivingSituation',
'DaysSinceFirstEntryBucket',
'AgeEnteredBucket',
'Last30DayIncomeBucket',
'NCBIncomeHas']
all_sets = demographics + housing_demographics + family_counts + program + disabilities + income + ncb + services
| {
"repo_name": "math4youbyusgroupillinois/homelessness-public",
"path": "features.py",
"copies": "2",
"size": "7872",
"license": "mit",
"hash": -1727505506999504400,
"line_mean": 35.785046729,
"line_max": 113,
"alpha_frac": 0.5067327236,
"autogenerated": false,
"ratio": 4.057731958762886,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5564464682362886,
"avg_score": null,
"num_lines": null
} |
"""A module with functions to aid generating MCOE."""
import pandas as pd
import pudl
def heat_rate_by_unit(pudl_out):
"""
Calculate heat rates (mmBTU/MWh) within separable generation units.
Assumes a "good" Boiler Generator Association (bga) i.e. one that only
contains boilers and generators which have been completely associated at
some point in the past.
The BGA dataframe needs to have the following columns:
- report_date (annual)
- plant_id_eia
- unit_id_pudl
- generator_id
- boiler_id
The unit_id is associated with generation records based on report_date,
plant_id_eia, and generator_id. Analogously, the unit_id is associtated
with boiler fuel consumption records based on report_date, plant_id_eia,
and boiler_id.
Then the total net generation and fuel consumption per unit per time period
are calculated, allowing the calculation of a per unit heat rate. That
per unit heat rate is returned in a dataframe containing:
- report_date
- plant_id_eia
- unit_id_pudl
- net_generation_mwh
- total_heat_content_mmbtu
- heat_rate_mmbtu_mwh
"""
# pudl_out must have a freq, otherwise capacity factor will fail and merges
# between tables with different frequencies will fail
if pudl_out.freq is None:
raise ValueError(
"pudl_out must include a frequency for heat rate calculation")
# Create a dataframe containing only the unit-generator mappings:
bga_gens = pudl_out.bga()[['report_date',
'plant_id_eia',
'generator_id',
'unit_id_pudl']].drop_duplicates()
gen = pudl_out.gen_eia923()
# Merge those unit ids into the generation data:
gen_w_unit = pudl.helpers.merge_on_date_year(
gen, bga_gens, on=['plant_id_eia', 'generator_id'])
# Sum up the net generation per unit for each time period:
gen_gb = gen_w_unit.groupby(['report_date',
'plant_id_eia',
'unit_id_pudl'])
gen_by_unit = gen_gb.agg({'net_generation_mwh': pudl.helpers.sum_na})
gen_by_unit = gen_by_unit.reset_index()
# Create a dataframe containingonly the unit-boiler mappings:
bga_boils = pudl_out.bga()[['report_date', 'plant_id_eia',
'boiler_id', 'unit_id_pudl']].drop_duplicates()
# Merge those unit ids into the boiler fule consumption data:
bf_w_unit = pudl.helpers.merge_on_date_year(
pudl_out.bf_eia923(), bga_boils, on=['plant_id_eia', 'boiler_id'])
# Sum up all the fuel consumption per unit for each time period:
bf_gb = bf_w_unit.groupby(['report_date',
'plant_id_eia',
'unit_id_pudl'])
bf_by_unit = bf_gb.agg({'total_heat_content_mmbtu': pudl.helpers.sum_na})
bf_by_unit = bf_by_unit.reset_index()
# Merge together the per-unit generation and fuel consumption data so we
# can calculate a per-unit heat rate:
hr_by_unit = pd.merge(gen_by_unit, bf_by_unit,
on=['report_date', 'plant_id_eia', 'unit_id_pudl'],
validate='one_to_one')
hr_by_unit['heat_rate_mmbtu_mwh'] = \
hr_by_unit.total_heat_content_mmbtu / hr_by_unit.net_generation_mwh
return hr_by_unit
def heat_rate_by_gen(pudl_out):
"""Convert by-unit heat rate to by-generator, adding fuel type & count."""
# pudl_out must have a freq, otherwise capacity factor will fail and merges
# between tables with different frequencies will fail
if pudl_out.freq is None:
raise ValueError(
"pudl_out must include a frequency for heat rate calculation")
bga_gens = pudl_out.bga()[['report_date',
'plant_id_eia',
'unit_id_pudl',
'generator_id']].drop_duplicates()
# Associate those heat rates with individual generators. This also means
# losing the net generation and fuel consumption information for now.
hr_by_gen = pudl.helpers.merge_on_date_year(
pudl_out.hr_by_unit()
[['report_date', 'plant_id_eia',
'unit_id_pudl', 'heat_rate_mmbtu_mwh']],
bga_gens, on=['plant_id_eia', 'unit_id_pudl']
)
hr_by_gen = hr_by_gen.drop('unit_id_pudl', axis=1)
# Now bring information about generator fuel type & count
hr_by_gen = pudl.helpers.merge_on_date_year(
hr_by_gen,
pudl_out.gens_eia860()[['report_date', 'plant_id_eia', 'generator_id',
'fuel_type_code_pudl', 'fuel_type_count']],
on=['plant_id_eia', 'generator_id']
)
return hr_by_gen
def fuel_cost(pudl_out):
"""
Calculate fuel costs per MWh on a per generator basis for MCOE.
Fuel costs are reported on a per-plant basis, but we want to estimate them
at the generator level. This is complicated by the fact that some plants
have several different types of generators, using different fuels. We have
fuel costs broken out by type of fuel (coal, oil, gas), and we know which
generators use which fuel based on their energy_source_code and reported
prime_mover. Coal plants use a little bit of natural gas or diesel to get
started, but based on our analysis of the "pure" coal plants, this amounts
to only a fraction of a percent of their overal fuel consumption on a
heat content basis, so we're ignoring it for now.
For plants whose generators all rely on the same fuel source, we simply
attribute the fuel costs proportional to the fuel heat content consumption
associated with each generator.
For plants with more than one type of generator energy source, we need to
split out the fuel costs according to fuel type -- so the gas fuel costs
are associated with generators that have energy_source_code gas, and the
coal fuel costs are associated with the generators that have
energy_source_code coal.
"""
# pudl_out must have a freq, otherwise capacity factor will fail and merges
# between tables with different frequencies will fail
if pudl_out.freq is None:
raise ValueError(
"pudl_out must include a frequency for fuel cost calculation")
# Split up the plants on the basis of how many different primary energy
# sources the component generators have:
hr_by_gen = pudl_out.hr_by_gen()[['plant_id_eia',
'report_date',
'generator_id',
'heat_rate_mmbtu_mwh']]
gens = pudl_out.gens_eia860()[['plant_id_eia',
'report_date',
'plant_name_eia',
'plant_id_pudl',
'generator_id',
'utility_id_eia',
'utility_name_eia',
'utility_id_pudl',
'fuel_type_count',
'fuel_type_code_pudl']]
# We are inner merging here, which means that we don't get every generator
# in this output... we only get the ones that show up in hr_by_gen.
# See Issue #608
gen_w_ft = pudl.helpers.merge_on_date_year(
hr_by_gen, gens,
on=['plant_id_eia', 'generator_id'],
how='inner')
one_fuel = gen_w_ft[gen_w_ft.fuel_type_count == 1]
multi_fuel = gen_w_ft[gen_w_ft.fuel_type_count > 1]
# Bring the single fuel cost & generation information together for just
# the one fuel plants:
one_fuel = pd.merge(one_fuel,
pudl_out.frc_eia923()[['plant_id_eia',
'report_date',
'fuel_cost_per_mmbtu',
'fuel_type_code_pudl',
'total_fuel_cost',
'total_heat_content_mmbtu',
'fuel_cost_from_eiaapi',
]],
how='left', on=['plant_id_eia', 'report_date'])
# We need to retain the different energy_source_code information from the
# generators (primary for the generator) and the fuel receipts (which is
# per-delivery), and in the one_fuel case, there will only be a single
# generator getting all of the fuels:
one_fuel.rename(columns={'fuel_type_code_pudl_x': 'ftp_gen',
'fuel_type_code_pudl_y': 'ftp_frc'},
inplace=True)
# Do the same thing for the multi fuel plants, but also merge based on
# the different fuel types within the plant, so that we keep that info
# as separate records:
multi_fuel = pd.merge(multi_fuel,
pudl_out.frc_eia923()[['plant_id_eia',
'report_date',
'fuel_cost_per_mmbtu',
'fuel_type_code_pudl',
'fuel_cost_from_eiaapi', ]],
how='left', on=['plant_id_eia', 'report_date',
'fuel_type_code_pudl'])
# At this point, within each plant, we should have one record per
# combination of generator & fuel type, which includes the heat rate of
# each generator, as well as *plant* level fuel cost per unit heat input
# for *each* fuel, which we can combine to figure out the fuel cost per
# unit net electricity generation on a generator basis.
# We have to do these calculations separately for the single and multi-fuel
# plants because in the case of the one fuel plants we need to sum up all
# of the fuel costs -- including both primary and secondary fuel
# consumption -- whereas in the multi-fuel plants we are going to look at
# fuel costs on a per-fuel basis (this is very close to being correct,
# since secondary fuels are typically a fraction of a percent of the
# plant's overall costs).
one_fuel_gb = one_fuel.groupby(by=['report_date', 'plant_id_eia'])
one_fuel_agg = one_fuel_gb.agg({
'total_fuel_cost': pudl.helpers.sum_na,
'total_heat_content_mmbtu': pudl.helpers.sum_na,
'fuel_cost_from_eiaapi': 'any',
})
one_fuel_agg['fuel_cost_per_mmbtu'] = \
one_fuel_agg['total_fuel_cost'] / \
one_fuel_agg['total_heat_content_mmbtu']
one_fuel_agg = one_fuel_agg.reset_index()
one_fuel = pd.merge(
one_fuel[['plant_id_eia', 'report_date', 'generator_id',
'heat_rate_mmbtu_mwh', 'fuel_cost_from_eiaapi']],
one_fuel_agg[['plant_id_eia', 'report_date', 'fuel_cost_per_mmbtu']],
on=['plant_id_eia', 'report_date'])
one_fuel = one_fuel.drop_duplicates(
subset=['plant_id_eia', 'report_date', 'generator_id'])
multi_fuel = multi_fuel[['plant_id_eia', 'report_date', 'generator_id',
'fuel_cost_per_mmbtu', 'heat_rate_mmbtu_mwh',
'fuel_cost_from_eiaapi', ]]
fuel_cost = one_fuel.append(multi_fuel, sort=True)
fuel_cost['fuel_cost_per_mwh'] = \
fuel_cost['fuel_cost_per_mmbtu'] * fuel_cost['heat_rate_mmbtu_mwh']
fuel_cost = \
fuel_cost.sort_values(['report_date', 'plant_id_eia', 'generator_id'])
out_df = gen_w_ft.drop('heat_rate_mmbtu_mwh', axis=1)
out_df = pd.merge(out_df.drop_duplicates(), fuel_cost,
on=['report_date', 'plant_id_eia', 'generator_id'])
return out_df
def capacity_factor(pudl_out, min_cap_fact=0, max_cap_fact=1.5):
"""
Calculate the capacity factor for each generator.
Capacity Factor is calculated by using the net generation from eia923 and
the nameplate capacity from eia860. The net gen and capacity are pulled
into one dataframe, then the dates from that dataframe are pulled out to
determine the hours in each period based on the frequency. The number of
hours is used in calculating the capacity factor. Then records with
capacity factors outside the range specified by min_cap_fact and
max_cap_fact are dropped.
"""
# pudl_out must have a freq, otherwise capacity factor will fail and merges
# between tables with different frequencies will fail
if pudl_out.freq is None:
raise ValueError(
"pudl_out must include a frequency for capacity factor calculation"
)
# Only include columns to be used
gens_eia860 = pudl_out.gens_eia860()[['plant_id_eia',
'report_date',
'generator_id',
'capacity_mw']]
gen = pudl_out.gen_eia923()
gen = gen[['plant_id_eia', 'report_date',
'generator_id', 'net_generation_mwh']]
# merge the generation and capacity to calculate capacity factor
capacity_factor = pudl.helpers.merge_on_date_year(gen,
gens_eia860,
on=['plant_id_eia',
'generator_id'],
how='inner')
# get a unique set of dates to generate the number of hours
dates = capacity_factor['report_date'].drop_duplicates()
dates_to_hours = pd.DataFrame(
data={'report_date': dates,
'hours': dates.apply(
lambda d: (
pd.date_range(d, periods=2, freq=pudl_out.freq)[1] -
pd.date_range(d, periods=2, freq=pudl_out.freq)[0]) /
pd.Timedelta(hours=1))})
# merge in the hours for the calculation
capacity_factor = capacity_factor.merge(dates_to_hours, on=['report_date'])
# actually calculate capacity factor wooo!
capacity_factor['capacity_factor'] = \
capacity_factor['net_generation_mwh'] / \
(capacity_factor['capacity_mw'] * capacity_factor['hours'])
# Replace unrealistic capacity factors with NaN
capacity_factor = pudl.helpers.oob_to_nan(
capacity_factor, ['capacity_factor'], lb=min_cap_fact, ub=max_cap_fact)
# drop the hours column, cause we don't need it anymore
capacity_factor.drop(['hours'], axis=1, inplace=True)
return capacity_factor
def mcoe(pudl_out,
min_heat_rate=5.5, min_fuel_cost_per_mwh=0.0,
min_cap_fact=0.0, max_cap_fact=1.5):
"""
Compile marginal cost of electricity (MCOE) at the generator level.
Use data from EIA 923, EIA 860, and (eventually) FERC Form 1 to estimate
the MCOE of individual generating units. The calculation is performed at
the time resolution, and for the period indicated by the pudl_out object.
that is passed in.
Args:
pudl_out: a PudlTabl object, specifying the time resolution and
date range for which the calculations should be performed.
min_heat_rate: lowest plausible heat rate, in mmBTU/MWh. Any MCOE
records with lower heat rates are presumed to be invalid, and are
discarded before returning.
min_cap_fact, max_cap_fact: minimum & maximum generator capacity
factor. Generator records with a lower capacity factor will be
filtered out before returning. This allows the user to exclude
generators that aren't being used enough to have valid.
min_fuel_cost_per_mwh: minimum fuel cost on a per MWh basis that is
required for a generator record to be considered valid. For some
reason there are now a large number of $0 fuel cost records, which
previously would have been NaN.
Returns:
pandas.DataFrame: a dataframe organized by date and generator,
with lots of juicy information about the generators -- including fuel
cost on a per MWh and MMBTU basis, heat rates, and net generation.
"""
# because lots of these input dfs include same info columns, this generates
# drop columnss for fuel_cost. This avoids needing to hard code columns.
merge_cols = ['plant_id_eia', 'generator_id', 'report_date']
drop_cols = [x for x in pudl_out.gens_eia860().columns
if x in pudl_out.fuel_cost().columns and x not in merge_cols]
# start with the generators table so we have all of the generators
mcoe_out = pudl.helpers.merge_on_date_year(
pudl_out.fuel_cost().drop(drop_cols, axis=1),
pudl_out.gens_eia860(),
on=[x for x in merge_cols if x != 'report_date'],
how='inner',
)
# Bring together the fuel cost and capacity factor dataframes, which
# also include heat rate information.
mcoe_out = pd.merge(
mcoe_out,
pudl_out.capacity_factor(min_cap_fact=min_cap_fact,
max_cap_fact=max_cap_fact)
[['report_date', 'plant_id_eia', 'generator_id',
'capacity_factor', 'net_generation_mwh']],
on=['report_date', 'plant_id_eia', 'generator_id'],
how='outer')
# Bring the PUDL Unit IDs into the output dataframe so we can see how
# the generators are really grouped.
mcoe_out = pudl.helpers.merge_on_date_year(
mcoe_out,
pudl_out.bga()[['report_date',
'plant_id_eia',
'unit_id_pudl',
'generator_id']].drop_duplicates(),
how='left',
on=['plant_id_eia', 'generator_id'])
# Instead of getting the total MMBTU through this multiplication... we
# could also calculate the total fuel consumed on a per-unit basis, from
# the boiler_fuel table, and then determine what proportion should be
# distributed to each generator based on its heat-rate and net generation.
mcoe_out['total_mmbtu'] = \
mcoe_out.net_generation_mwh * mcoe_out.heat_rate_mmbtu_mwh
mcoe_out['total_fuel_cost'] = \
mcoe_out.total_mmbtu * mcoe_out.fuel_cost_per_mmbtu
first_cols = ['report_date',
'plant_id_eia',
'plant_id_pudl',
'unit_id_pudl',
'generator_id',
'plant_name_eia',
'utility_id_eia',
'utility_id_pudl',
'utility_name_eia']
mcoe_out = pudl.helpers.organize_cols(mcoe_out, first_cols)
mcoe_out = mcoe_out.sort_values(
['plant_id_eia', 'unit_id_pudl', 'generator_id', 'report_date']
)
# Filter the output based on the range of validity supplied by the user:
mcoe_out = pudl.helpers.oob_to_nan(mcoe_out, ['heat_rate_mmbtu_mwh'],
lb=min_heat_rate, ub=None)
mcoe_out = pudl.helpers.oob_to_nan(mcoe_out, ['fuel_cost_per_mwh'],
lb=min_fuel_cost_per_mwh, ub=None)
mcoe_out = pudl.helpers.oob_to_nan(mcoe_out, ['capacity_factor'],
lb=min_cap_fact, ub=max_cap_fact)
return mcoe_out
| {
"repo_name": "catalyst-cooperative/pudl",
"path": "src/pudl/analysis/mcoe.py",
"copies": "1",
"size": "19422",
"license": "mit",
"hash": 6970634905143909000,
"line_mean": 45.5755395683,
"line_max": 79,
"alpha_frac": 0.5877355576,
"autogenerated": false,
"ratio": 3.63639767833739,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47241332359373894,
"avg_score": null,
"num_lines": null
} |
"""A module with functions to aid in data analysis using the PUDL database.
NOTE: Currently the functions here are for reference only. They are not
currently being tested and maintained, but are being retained because we are
working on similar things right now, and may update or re-purpose them.
"""
import logging
# Useful high-level external modules.
import numpy as np
import pandas as pd
import sqlalchemy as sa
# Our own code...
import pudl
import pudl.helpers
logger = logging.getLogger(__name__)
def fuel_ferc1_by_pudl(pudl_plant_ids, pudl_engine,
fuels=('gas', 'oil', 'coal'),
cols=('fuel_consumed_total_mmbtu',
'fuel_consumed_total_cost_mmbtu',
'fuel_consumed_total_cost_unit')):
"""
Aggregate FERC Form 1 fuel data by PUDL plant id and, optionally, fuel.
Args:
pudl_plant_ids (list-like): which PUDL plants should we retain for
aggregation?
fuels (list-like): Should the columns listed in cols be broken out by
each individual fuel? If so, which fuels do we want totals for? If
you want all fuels lumped together, pass in 'all'.
cols (list-like): which columns from the fuel_ferc1 table should be
summed.
Returns:
pandas.DataFrame: with pudl_plant_id, year, and the summed values
specified in cols. If fuels is not 'all' then it also has a column
specifying fuel type.
"""
fuel_df = pudl.output.ferc1.fuel_ferc1_df(pudl_engine)
# Calculate the total fuel heat content for the plant by fuel
fuel_df = fuel_df[fuel_df.plant_id_pudl.isin(pudl_plant_ids)]
if fuels == 'all':
cols_to_gb = ['plant_id_pudl', 'report_year']
else:
# Limit to records that pertain to our fuels of interest.
fuel_df = fuel_df[fuel_df['fuel'].isin(fuels)]
# Group by fuel as well, so we get individual fuel totals.
cols_to_gb = ['plant_id_pudl', 'report_year', 'fuel']
fuel_df = fuel_df.groupby(cols_to_gb)[cols].sum()
fuel_df = fuel_df.reset_index()
return fuel_df
def steam_ferc1_by_pudl(pudl_plant_ids, pudl_engine,
cols=('net_generation_mwh', )):
"""
Aggregate and return data from the steam_ferc1 table by pudl_plant_id.
Args:
pudl_plant_ids (list-like): A list of ids to include in the output.
cols (list-like): The data columns that you want to aggregate and
return.
Returns:
pandas.DataFrameA dataframe with columns for report_year, pudl_plant_id
and cols, with the values in cols aggregated by plant and year.
"""
steam_df = pudl.output.ferc1.plants_steam_ferc1_df(pudl_engine)
steam_df = steam_df[steam_df.plant_id_pudl.isin(pudl_plant_ids)]
steam_df = steam_df.groupby(['plant_id_pudl', 'report_year'])[cols].sum()
steam_df = steam_df.reset_index()
return steam_df
def frc_by_pudl(pudl_plant_ids, pudl_engine,
fuels=('gas', 'oil', 'coal'),
cols=('total_fuel_cost', )):
"""
Aggregate fuel_receipts_costs_eia923 table for comparison with FERC Form 1.
In order to correlate information between EIA 923 and FERC Form 1, we need
to aggregate the EIA data annually, and potentially by fuel. This function
groups fuel_receipts_costs_eia923 by pudl_plant_id, fuel, and year, and
sums the columns of interest specified in cols, and returns a dataframe
with the totals by pudl_plant_id, fuel, and year.
Args:
pudl_plant_ids: list of plant IDs to keep.
fuels: list of fuel strings that we want to group by. Alternatively,
this can be set to 'all' in which case fuel is not grouped by.
cols: List of data columns which we are summing.
Returns:
pandas.DataFrame: A dataframe with the sums of cols, as grouped by pudl
ID, year, and (optionally) fuel.
"""
md = sa.MetaData(bind=pudl_engine)
md.reflect()
# Get all the EIA info from generation_fuel_eia923
frc_df = pudl.output.eia923.fuel_receipts_costs_eia923(pudl_engine,
md.tables)
# Limit just to the plants we're looking at
frc_df = frc_df[frc_df.plant_id_pudl.isin(pudl_plant_ids)]
# Just keep the columns we need for output:
cols_to_keep = ['plant_id_pudl', 'report_date']
cols_to_keep = cols_to_keep + cols
cols_to_gb = [pd.Grouper(freq='A'), 'plant_id_pudl']
if fuels != 'all':
frc_df = frc_df[frc_df.fuel.isin(fuels)]
cols_to_keep = cols_to_keep + ['fuel', ]
cols_to_gb = cols_to_gb + ['fuel', ]
# Pare down the dataframe to make it easier to play with:
frc_df = frc_df[cols_to_keep]
# Prepare to group annually
frc_df['report_date'] = pd.to_datetime(frc_df['report_date'])
frc_df.index = frc_df.report_date
frc_df.drop('report_date', axis=1, inplace=True)
# Group and sum of the columns of interest:
frc_gb = frc_df.groupby(by=cols_to_gb)
frc_totals_df = frc_gb[cols].sum()
# Simplify and clean the DF for return:
frc_totals_df = frc_totals_df.reset_index()
frc_totals_df['report_year'] = frc_totals_df.report_date.dt.year
frc_totals_df = frc_totals_df.drop('report_date', axis=1)
frc_totals_df = frc_totals_df.dropna()
return frc_totals_df
def gen_fuel_by_pudl(pudl_plant_ids, pudl_engine,
fuels=('gas', 'oil', 'coal'),
cols=('fuel_consumed_mmbtu',
'net_generation_mwh')):
"""
Aggregate generation_fuel_eia923 table for comparison with FERC Form 1.
In order to correlate informataion between EIA 923 and FERC Form 1, we need
to aggregate the EIA data annually, and potentially by fuel. This function
groups generation_fuel_eia923 by pudl_plant_id, fuel, and year, and sums
the columns of interest specified in cols, and returns a dataframe with
the totals by pudl_plant_id, fuel, and year.
Args:
pudl_plant_ids (list-like): list of plant IDs to keep.
fuels (list-like): list of fuel strings that we want to group by.
Alternatively, this can be set to 'all' in which case fuel is not
grouped by.
cols (list-like): List of data columns which we are summing.
Returns:
pandas.DataFrame: A dataframe with the sums of cols, as grouped by pudl
ID, year, and (optionally) fuel.
"""
md = sa.MetaData(bind=pudl_engine)
md.reflect()
# Get all the EIA info from generation_fuel_eia923
gf_df = pudl.output.eia923.generation_fuel_eia923(pudl_engine, md.tables)
# Standardize the fuel codes (need to fix this in the DB!!!!)
gf_df = gf_df.rename(columns={'fuel_type_code_pudl': 'fuel'})
# gf_df['fuel'] = gf_df.fuel.replace(to_replace='petroleum', value='oil')
# Select only the records that pertain to our target IDs
gf_df = gf_df[gf_df.plant_id_pudl.isin(pudl_plant_ids)]
cols_to_keep = ['plant_id_pudl', 'report_date']
cols_to_keep = cols_to_keep + cols
cols_to_gb = [pd.Grouper(freq='A'), 'plant_id_pudl']
if fuels != 'all':
gf_df = gf_df[gf_df.fuel.isin(fuels)]
cols_to_keep = cols_to_keep + ['fuel', ]
cols_to_gb = cols_to_gb + ['fuel', ]
# Pare down the dataframe to make it easier to play with:
gf_df = gf_df[cols_to_keep]
# Prepare to group annually
gf_df['report_date'] = pd.to_datetime(gf_df['report_date'])
gf_df.index = gf_df.report_date
gf_df.drop('report_date', axis=1, inplace=True)
gf_gb = gf_df.groupby(by=cols_to_gb)
gf_totals_df = gf_gb[cols].sum()
gf_totals_df = gf_totals_df.reset_index()
# Simplify date info for easy comparison with FERC.
gf_totals_df['report_year'] = gf_totals_df.report_date.dt.year
gf_totals_df = gf_totals_df.drop('report_date', axis=1)
gf_totals_df = gf_totals_df.dropna()
return gf_totals_df
def generator_proportion_eia923(g, id_col='plant_id_eia'):
"""
Generate a dataframe with the proportion of generation for each generator.
Args:
g (pandas.DataFrame): a dataframe from either all of generation_eia923
or some subset of records from generation_eia923. The dataframe
needs the following columns to be present: plant_id_eia,
generator_id, report_date, net_generation_mwh
Returns:
pandas.DataFrame: containing report_year, plant_id_eia, generator_id,
proportion_of_generation
"""
# Set the datetimeindex
g = g.set_index(pd.DatetimeIndex(g['report_year']))
# groupby plant_id_eia and by year
g_yr = g.groupby([pd.Grouper(freq='A'), id_col, 'generator_id'])
# sum net_gen by year by plant
g_net_generation_per_generator = pd.DataFrame(
g_yr.net_generation_mwh.sum())
g_net_generation_per_generator = \
g_net_generation_per_generator.reset_index(level=['generator_id'])
# groupby plant_id_eia and by year
g_net_generation_per_plant = g.groupby(
[pd.Grouper(freq='A'), id_col])
# sum net_gen by year by plant and convert to datafram
g_net_generation_per_plant = pd.DataFrame(
g_net_generation_per_plant.net_generation_mwh.sum())
# Merge the summed net generation by generator with the summed net
# generation by plant
g_gens_proportion = g_net_generation_per_generator.merge(
g_net_generation_per_plant, how="left", left_index=True,
right_index=True)
g_gens_proportion['proportion_of_generation'] = (
g_gens_proportion.net_generation_mwh_x /
g_gens_proportion.net_generation_mwh_y)
# Remove the net generation columns
g_gens_proportion = g_gens_proportion.drop(
['net_generation_mwh_x', 'net_generation_mwh_y'], axis=1)
g_gens_proportion.reset_index(inplace=True)
return g_gens_proportion
def capacity_proportion_eia923(g, id_col='plant_id_eia',
capacity='capacity_mw'):
"""
Generate dataframe with proportion of plant capacity for each generator.
Args:
g (pandas.DataFrame): a dataframe from either all of generation_eia923
or some subset of records from generation_eia923. The dataframe
needs the following columns to be present: generator_id,
report_date, capacity_mw
id_col (str): either plant_id_eia (default) or plant_id_pudl
capacity (str): capacity_mw (default), summer_capacity_mw, or
winter_capacity_mw
Returns:
pandas.DataFrame: containing report_year, plant_id_eia, generator_id,
proportion_of_capacity
"""
# groupby plant_id_eia and by year
g_net_capacity_per_plant = g.groupby(['report_year', id_col])
# sum net_gen by year by plant and convert to datafram
g_net_capacity_per_plant = pd.DataFrame(
g_net_capacity_per_plant.capacity_mw.sum())
g_net_capacity_per_plant.reset_index(inplace=True)
# Merge the summed net generation by generator with the summed net
# generation by plant
g_capacity_proportion = g.merge(
g_net_capacity_per_plant, on=[id_col, 'report_year'], how="left")
g_capacity_proportion['proportion_of_plant_capacity'] = (
g_capacity_proportion.capacity_mw_x /
g_capacity_proportion.capacity_mw_y)
# Remove the net generation columns
g_capacity_proportion = g_capacity_proportion.rename(
columns={'capacity_mw_x': 'capacity_gen_mw',
'capacity_mw_y': 'capacity_plant_mw'})
return g_capacity_proportion
def values_by_generator_eia923(table_eia923, column_name, g):
"""
Generate a dataframe with a plant value proportioned out by generator.
Args:
table_eia923 (pandas.DataFrame: an EIA923 table (this has been tested
with fuel_receipts_costs_eia923 and generation_fuel_eia923).
column_name: a column name from the table_eia923.
g (pandas.DataFrame): a dataframe from either all of generation_eia923
or some subset of records from generation_eia923. The dataframe
needs the following columns to be present: plant_id_eia,
generator_id, report_date, and net_generation_mwh.
Returns:
pandas.DataFrame: with report_date, plant_id_eia, generator_id, and the
proportioned value from the column_name.
"""
# Set the datetimeindex
table_eia923 = table_eia923.set_index(
pd.DatetimeIndex(table_eia923['report_date']))
# groupby plant_id_eia and by year
table_eia923_gb = table_eia923.groupby(
[pd.Grouper(freq='A'), 'plant_id_eia'])
# sum fuel cost by year by plant
table_eia923_sr = table_eia923_gb[column_name].sum()
# Convert back into a dataframe
table_eia923_df = pd.DataFrame(table_eia923_sr)
column_name_by_plant = f"{column_name}_plant"
table_eia923_df = table_eia923_df.rename(
columns={column_name: column_name_by_plant})
# get the generator proportions
g_gens_proportion = generator_proportion_eia923(g)
# merge the per generator proportions with the summed fuel cost
g_generator = g_gens_proportion.merge(
table_eia923_df, how="left", right_index=True, left_index=True)
# calculate the proportional fuel costs
g_generator[f"{column_name}_generator"] = (
g_generator[column_name_by_plant] *
g_generator.proportion_of_generation)
# drop the unneccessary columns
g_generator = g_generator.drop(
['proportion_of_generation', column_name_by_plant], axis=1)
return g_generator
def primary_fuel_ferc1(fuel_df, fuel_thresh=0.5):
"""
Determine the primary fuel for plants listed in the PUDL fuel_ferc1 table.
Given a selection of records from the PUDL fuel_ferc1 table, determine
the primary fuel type for each plant (as identified by a unique
combination of report_year, respondent_id, and plant_name).
Args:
fuel_df (DataFrame): a DataFrame selected from the PUDL fuel_ferc1
table, with columns including report_year, respondent_id,
plant_name, fuel, fuel_qty_burned, and fuel_avg_mmbtu_per_unit.
fuel_thresh (float): What is the minimum proportion of a plant's
annual fuel consumption in terms of heat content, that a fuel
must account for, in order for that fuel to be considered the
primary fuel.
Returns:
pandas.DataFrame: a containing report_year, respondent_id, plant_name,
and primary_fuel.
"""
plants_by_heat = plant_fuel_proportions_ferc1(fuel_df)
# On a per plant, per year basis, identify the fuel that made the largest
# contribution to the plant's overall heat content consumed. If that
# proportion is greater than fuel_thresh, set the primary_fuel to be
# that fuel. Otherwise, leave it None.
plants_by_heat = plants_by_heat.set_index(['report_year',
'respondent_id',
'plant_name'])
plants_by_heat = plants_by_heat.drop('total_mmbtu', axis=1)
mask = plants_by_heat >= fuel_thresh
plants_by_heat = plants_by_heat.where(mask)
plants_by_heat['primary_fuel'] = plants_by_heat.idxmax(axis=1)
return plants_by_heat[['primary_fuel', ]].reset_index()
def plant_fuel_proportions_ferc1(fuel_df):
"""
Calculate annual fuel proportions by plant based on FERC data.
Args:
fuel_df (pandas.DataFrame): FERC 1 Fuel table, or some subset of it.
Returns:
pandas.DataFrame
"""
fuel_df = fuel_df.copy()
fuel_df['total_mmbtu'] = \
fuel_df['fuel_qty_burned'] * fuel_df['fuel_avg_mmbtu_per_unit']
heat_df = fuel_df[['report_year',
'respondent_id',
'plant_name',
'fuel',
'total_mmbtu']]
heat_pivot = heat_df.pivot_table(
index=['report_year', 'respondent_id', 'plant_name'],
columns='fuel',
values='total_mmbtu')
heat_pivot['total'] = heat_pivot.sum(axis=1, numeric_only=True)
mmbtu_total = heat_pivot.copy()
mmbtu_total = pd.DataFrame(mmbtu_total['total'])
heat_pivot = heat_pivot.fillna(value=0)
heat_pivot = heat_pivot.divide(heat_pivot.total, axis='index')
heat_pivot = heat_pivot.drop('total', axis=1)
heat_pivot = heat_pivot.reset_index()
heat_pivot = heat_pivot.merge(mmbtu_total.reset_index())
heat_pivot.rename(columns={'total': 'total_mmbtu'},
inplace=True)
del heat_pivot.columns.name
return heat_pivot
def plant_fuel_proportions_frc_eia923(frc_df, id_col='plant_id_eia'):
"""Calculate annual fuel proportions by plant from EIA923 fuel receipts."""
frc_df = frc_df.copy()
# Add a column with total fuel heat content per delivery
frc_df['total_mmbtu'] = frc_df.fuel_qty_units * frc_df.average_heat_content
# Drop everything but report_date, plant_id_eia, fuel_group_code,
# total_mmbtu
frc_df = frc_df[['report_date', 'plant_id_eia',
'plant_id_pudl', 'fuel_group_code', 'total_mmbtu']]
# Group by report_date(annual), plant_id_eia, fuel_group_code
frc_gb = frc_df.groupby(
[id_col, pd.Grouper(freq='A'), 'fuel_group_code'])
# Add up all the MMBTU for each plant & year. At this point each record
# in the dataframe contains only information about a single fuel.
heat_df = frc_gb.agg(np.sum)
# Simplfy the DF a little before we turn it into a pivot table.
heat_df = heat_df.reset_index()
heat_df['year'] = pd.DatetimeIndex(heat_df['report_date']).year
heat_df = heat_df.drop('report_date', axis=1)
# Take the individual rows organized by fuel_group_code, and turn them into
# columns, each with the total MMBTU for that fuel, year, and plant.
heat_pivot = heat_df.pivot_table(
index=['year', id_col],
columns='fuel_group_code',
values='total_mmbtu')
# Add a column that has the *total* heat content of all fuels:
heat_pivot['total'] = heat_pivot.sum(axis=1, numeric_only=True)
# Replace any NaN values we got from pivoting with zeros.
heat_pivot = heat_pivot.fillna(value=0)
# Divide all columns by the total heat content, giving us the proportions
# for each fuel instead of the heat content.
heat_pivot = heat_pivot.divide(heat_pivot.total, axis='index')
# Drop the total column (it's nothing but 1.0 values) and clean up the
# index and columns a bit before returning the DF.
heat_pivot = heat_pivot.drop('total', axis=1)
heat_pivot = heat_pivot.reset_index()
del heat_pivot.columns.name
return heat_pivot
def primary_fuel_frc_eia923(frc_df, id_col='plant_id_eia', fuel_thresh=0.5):
"""Determine a plant's primary fuel from EIA923 fuel receipts table."""
frc_df = frc_df.copy()
# Figure out the heat content proportions of each fuel received:
frc_by_heat = plant_fuel_proportions_frc_eia923(frc_df)
# On a per plant, per year basis, identify the fuel that made the largest
# contribution to the plant's overall heat content consumed. If that
# proportion is greater than fuel_thresh, set the primary_fuel to be
# that fuel. Otherwise, leave it None.
frc_by_heat = frc_by_heat.set_index([id_col, 'year'])
mask = frc_by_heat >= fuel_thresh
frc_by_heat = frc_by_heat.where(mask)
frc_by_heat['primary_fuel'] = frc_by_heat.idxmax(axis=1)
return frc_by_heat[['primary_fuel', ]].reset_index()
def plant_fuel_proportions_gf_eia923(gf_df):
"""Calculate annual fuel proportions by plant from EIA923 gen fuel."""
gf_df = gf_df.copy()
# Drop everything but report_date, plant_id_eia, fuel_type_code_pudl,
# total_mmbtu
gf_df = gf_df[['report_date',
'plant_id_eia',
'fuel_type_code_pudl',
'fuel_consumed_mmbtu']]
# Set report_date as a DatetimeIndex
gf_df = gf_df.set_index(pd.DatetimeIndex(gf_df['report_date']))
# Group by report_date(annual), plant_id_eia, fuel_type_code_pudl
gf_gb = gf_df.groupby(
['plant_id_eia', pd.Grouper(freq='A'), 'fuel_type_code_pudl'])
# Add up all the MMBTU for each plant & year. At this point each record
# in the dataframe contains only information about a single fuel.
heat_df = gf_gb.agg(np.sum)
# Simplfy the DF a little before we turn it into a pivot table.
heat_df = heat_df.reset_index()
heat_df['year'] = pd.DatetimeIndex(heat_df['report_date']).year
heat_df = heat_df.drop('report_date', axis=1)
# Take the individual rows organized by fuel_type_code_pudl, and turn them
# into columns, each with the total MMBTU for that fuel, year, and plant.
heat_pivot = heat_df.pivot_table(
index=['year', 'plant_id_eia'],
columns='fuel_type_code_pudl',
values='fuel_consumed_mmbtu')
# Add a column that has the *total* heat content of all fuels:
heat_pivot['total'] = heat_pivot.sum(axis=1, numeric_only=True)
# Replace any NaN values we got from pivoting with zeros.
heat_pivot = heat_pivot.fillna(value=0)
# Divide all columns by the total heat content, giving us the proportions
# for each fuel instead of the heat content.
heat_pivot = heat_pivot.divide(heat_pivot.total, axis='index')
# Drop the total column (it's nothing but 1.0 values) and clean up the
# index and columns a bit before returning the DF.
heat_pivot = heat_pivot.drop('total', axis=1)
heat_pivot = heat_pivot.reset_index()
del heat_pivot.columns.name
return heat_pivot
def primary_fuel_gf_eia923(gf_df, id_col='plant_id_eia', fuel_thresh=0.5):
"""Determines a plant's primary fuel from EIA923 generation fuel table."""
gf_df = gf_df.copy()
# Figure out the heat content proportions of each fuel received:
gf_by_heat = plant_fuel_proportions_gf_eia923(gf_df)
# On a per plant, per year basis, identify the fuel that made the largest
# contribution to the plant's overall heat content consumed. If that
# proportion is greater than fuel_thresh, set the primary_fuel to be
# that fuel. Otherwise, leave it None.
gf_by_heat = gf_by_heat.set_index([id_col, 'report_year'])
mask = gf_by_heat >= fuel_thresh
gf_by_heat = gf_by_heat.where(mask)
gf_by_heat['primary_fuel'] = gf_by_heat.idxmax(axis=1)
return gf_by_heat[['primary_fuel', ]].reset_index()
| {
"repo_name": "catalyst-cooperative/pudl",
"path": "src/pudl/analysis/analysis.py",
"copies": "1",
"size": "22581",
"license": "mit",
"hash": 3605611282541193000,
"line_mean": 38.7552816901,
"line_max": 79,
"alpha_frac": 0.6492626544,
"autogenerated": false,
"ratio": 3.1979889534060333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43472516078060336,
"avg_score": null,
"num_lines": null
} |
"""A module with platform-specific extended precision `numpy.number` subclasses.
The subclasses are defined here (instead of ``__init__.pyi``) such
that they can be imported conditionally via the numpy's mypy plugin.
"""
from typing import TYPE_CHECKING
import numpy as np
from . import (
_80Bit,
_96Bit,
_128Bit,
_256Bit,
)
if TYPE_CHECKING:
uint128 = np.unsignedinteger[_128Bit]
uint256 = np.unsignedinteger[_256Bit]
int128 = np.signedinteger[_128Bit]
int256 = np.signedinteger[_256Bit]
float80 = np.floating[_80Bit]
float96 = np.floating[_96Bit]
float128 = np.floating[_128Bit]
float256 = np.floating[_256Bit]
complex160 = np.complexfloating[_80Bit, _80Bit]
complex192 = np.complexfloating[_96Bit, _96Bit]
complex256 = np.complexfloating[_128Bit, _128Bit]
complex512 = np.complexfloating[_256Bit, _256Bit]
else:
uint128 = NotImplemented
uint256 = NotImplemented
int128 = NotImplemented
int256 = NotImplemented
float80 = NotImplemented
float96 = NotImplemented
float128 = NotImplemented
float256 = NotImplemented
complex160 = NotImplemented
complex192 = NotImplemented
complex256 = NotImplemented
complex512 = NotImplemented
| {
"repo_name": "madphysicist/numpy",
"path": "numpy/typing/_extended_precision.py",
"copies": "4",
"size": "1243",
"license": "bsd-3-clause",
"hash": 6822020524361552000,
"line_mean": 28.5952380952,
"line_max": 80,
"alpha_frac": 0.7103781175,
"autogenerated": false,
"ratio": 3.872274143302181,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.6582652260802181,
"avg_score": null,
"num_lines": null
} |
"""a module with some custom QtWidget Classes"""
from PyQt5 import QtWidgets, QtCore, QtGui
class MdiArea(QtWidgets.QMdiArea):
urlsDropped = QtCore.pyqtSignal(object)
def __init__(self, parent=None):
super(MdiArea, self).__init__(parent)
self.setAcceptDrops(True)
def dragEnterEvent(self, event):
if event.mimeData().hasUrls:
event.acceptProposedAction()
else:
super(MdiArea, self).dragEnterEvent(event)
def dragMoveEvent(self, event):
super(MdiArea, self).dragMoveEvent(event)
def dropEvent(self, dropEvent):
"""
:type dropEvent: QtGui.QDropEvent
:return:
"""
mime = dropEvent.mimeData()
source = dropEvent.source()
if mime.hasUrls():
self.urlsDropped.emit(mime)
class TextWarning(QtWidgets.QDialog):
def __init__(self, text, parent=None):
super(TextWarning, self).__init__(parent)
self.resize(400, 300)
self.gridLayout = QtWidgets.QGridLayout(self)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.textBrowser = QtWidgets.QTextBrowser(self)
self.verticalLayout.addWidget(self.textBrowser)
self.buttonBox = QtWidgets.QDialogButtonBox(self)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.verticalLayout.addWidget(self.buttonBox)
self.gridLayout.addLayout(self.verticalLayout, 0, 0, 1, 1)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
if type(text) in (list, tuple):
text = "\n".join(text)
self.textBrowser.setText(text)
self.setWindowTitle("Warning")
class WarningMsgBox(QtWidgets.QDialog):
def __init__(self, text, title="Warning", parent=None):
super(WarningMsgBox, self).__init__(parent)
self.resize(500, 400)
self.verticalLayout = QtWidgets.QVBoxLayout(self)
self.verticalLayout.setContentsMargins(0, 0, 0, -1)
self.textBrowser = QtWidgets.QTextBrowser(self)
self.verticalLayout.addWidget(self.textBrowser)
self.buttonBox = QtWidgets.QDialogButtonBox(self)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Ok)
self.verticalLayout.addWidget(self.buttonBox)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.setWindowTitle(title)
if type(text) in (list, tuple):
text = "\n".join(text)
self.textBrowser.setText(text)
class Spoiler(QtWidgets.QWidget):
def __init__(self, parent=None, title='', animationDuration=300):
"""
References:
# Adapted from c++ version
http://stackoverflow.com/questions/32476006/how-to-make-an-expandable-collapsable-section-widget-in-qt
"""
super(Spoiler, self).__init__(parent=parent)
self.animationDuration = 300
self.toggleAnimation = QtCore.QParallelAnimationGroup()
self.contentArea = QtWidgets.QScrollArea()
self.headerLine = QtWidgets.QFrame()
self.toggleButton = QtWidgets.QToolButton()
self.mainLayout = QtWidgets.QGridLayout()
toggleButton = self.toggleButton
toggleButton.setStyleSheet("QToolButton { border: none; }")
toggleButton.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
toggleButton.setArrowType(QtCore.Qt.RightArrow)
toggleButton.setText(str(title))
toggleButton.setCheckable(True)
toggleButton.setChecked(False)
headerLine = self.headerLine
headerLine.setFrameShape(QtWidgets.QFrame.HLine)
headerLine.setFrameShadow(QtWidgets.QFrame.Sunken)
headerLine.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Maximum)
self.contentArea.setStyleSheet("QScrollArea { background-color: white; border: none; }")
self.contentArea.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
# start out collapsed
self.contentArea.setMaximumHeight(0)
self.contentArea.setMinimumHeight(0)
# let the entire widget grow and shrink with its content
toggleAnimation = self.toggleAnimation
toggleAnimation.addAnimation(QtCore.QPropertyAnimation(self, "minimumHeight"))
toggleAnimation.addAnimation(QtCore.QPropertyAnimation(self, "maximumHeight"))
toggleAnimation.addAnimation(QtCore.QPropertyAnimation(self.contentArea, "maximumHeight"))
# don't waste space
mainLayout = self.mainLayout
mainLayout.setVerticalSpacing(0)
mainLayout.setContentsMargins(0, 0, 0, 0)
row = 0
mainLayout.addWidget(self.toggleButton, row, 0, 1, 1, QtCore.Qt.AlignLeft)
mainLayout.addWidget(self.headerLine, row, 2, 1, 1)
row += 1
mainLayout.addWidget(self.contentArea, row, 0, 1, 3)
self.setLayout(self.mainLayout)
def start_animation(checked):
arrow_type = QtCore.Qt.DownArrow if checked else QtCore.Qt.RightArrow
direction = QtCore.QAbstractAnimation.Forward if checked else QtCore.QAbstractAnimation.Backward
toggleButton.setArrowType(arrow_type)
self.toggleAnimation.setDirection(direction)
self.toggleAnimation.start()
self.toggleButton.clicked.connect(start_animation)
def setContentLayout(self, contentLayout):
# Not sure if this is equivalent to self.contentArea.destroy()
self.contentArea.destroy()
self.contentArea.setLayout(contentLayout)
collapsedHeight = self.sizeHint().height() - self.contentArea.maximumHeight()
contentHeight = contentLayout.sizeHint().height()
for i in range(self.toggleAnimation.animationCount()-1):
spoilerAnimation = self.toggleAnimation.animationAt(i)
spoilerAnimation.setDuration(self.animationDuration)
spoilerAnimation.setStartValue(collapsedHeight)
spoilerAnimation.setEndValue(collapsedHeight + contentHeight)
contentAnimation = self.toggleAnimation.animationAt(self.toggleAnimation.animationCount() - 1)
contentAnimation.setDuration(self.animationDuration)
contentAnimation.setStartValue(0)
contentAnimation.setEndValue(contentHeight)
class LineEdit(QtWidgets.QLineEdit):
def __init__(self, heightx=None, widthx=None, parent=None):
super(LineEdit, self).__init__(parent)
self.heightx = 1 if heightx is None else heightx
self.widthx = 1 if widthx is None else widthx
self.size = super(LineEdit, self).sizeHint()
self.size.setHeight(self.size.height() * self.heightx)
self.size.setWidth(self.size.width() * self.widthx)
def sizeHint(self):
return self.size
class LineEditNumeric(LineEdit):
def __init__(self, parent=None):
super(LineEditNumeric, self).__init__(widthx=0.5, parent=parent)
self.setValidator(QtGui.QDoubleValidator())
def value(self):
return float(self.text())
def setValue(self, val):
self.setText("{:0.08g}".format(val))
| {
"repo_name": "vincentchevrier/dataquick",
"path": "dataquick/qt/classes.py",
"copies": "1",
"size": "7327",
"license": "mit",
"hash": -2620503415611130000,
"line_mean": 40.395480226,
"line_max": 114,
"alpha_frac": 0.6833629043,
"autogenerated": false,
"ratio": 4.02803738317757,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.521140028747757,
"avg_score": null,
"num_lines": null
} |
"""A MongoDB-based caching core for cachier."""
# This file is part of Cachier.
# https://github.com/shaypal5/cachier
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT-license
# Copyright (c) 2016, Shay Palachy <shaypal5@gmail.com>
import sys # to make sure that pymongo was imported
import pickle # for serialization of python objects
from datetime import datetime
import time # to sleep when waiting on Mongo cache\
import warnings # to warn if pymongo is missing
try:
from pymongo import (
IndexModel,
ASCENDING
)
from pymongo.errors import OperationFailure
from bson.binary import Binary # to save binary data to mongodb
except ImportError: # pragma: no cover
pass
from .base_core import _BaseCore
MONGO_SLEEP_DURATION_IN_SEC = 1
class RecalculationNeeded(Exception):
pass
class _MongoCore(_BaseCore):
_INDEX_NAME = 'func_1_key_1'
def __init__(self, mongetter, stale_after, next_time):
if 'pymongo' not in sys.modules:
warnings.warn((
"Cachier warning: pymongo was not found. "
"MongoDB cores will not function."))
_BaseCore.__init__(self, stale_after, next_time)
self.mongetter = mongetter
self.mongo_collection = self.mongetter()
index_inf = self.mongo_collection.index_information()
if _MongoCore._INDEX_NAME not in index_inf:
func1key1 = IndexModel(
keys=[('func', ASCENDING), ('key', ASCENDING)],
name=_MongoCore._INDEX_NAME)
self.mongo_collection.create_indexes([func1key1])
@staticmethod
def _get_func_str(func):
return '.{}.{}'.format(func.__module__, func.__name__)
def get_entry_by_key(self, key):
res = self.mongo_collection.find_one({
'func': _MongoCore._get_func_str(self.func),
'key': key
})
if res:
try:
entry = {
'value': pickle.loads(res['value']),
'time': res.get('time', None),
'stale': res.get('stale', False),
'being_calculated': res.get('being_calculated', False)
}
except KeyError:
entry = {
'value': None,
'time': res.get('time', None),
'stale': res.get('stale', False),
'being_calculated': res.get('being_calculated', False)
}
return key, entry
return key, None
def get_entry(self, args, kwds, hash_params):
key = pickle.dumps(args + tuple(sorted(kwds.items())) if hash_params is None else hash_params(args, kwds))
return self.get_entry_by_key(key)
def set_entry(self, key, func_res):
thebytes = pickle.dumps(func_res)
self.mongo_collection.update_one(
filter={
'func': _MongoCore._get_func_str(self.func),
'key': key
},
update={
'$set': {
'func': _MongoCore._get_func_str(self.func),
'key': key,
'value': Binary(thebytes),
'time': datetime.now(),
'stale': False,
'being_calculated': False
}
},
upsert=True
)
def mark_entry_being_calculated(self, key):
self.mongo_collection.update_one(
filter={
'func': _MongoCore._get_func_str(self.func),
'key': key
},
update={
'$set': {'being_calculated': True}
},
upsert=True
)
def mark_entry_not_calculated(self, key):
try:
self.mongo_collection.update_one(
filter={
'func': _MongoCore._get_func_str(self.func),
'key': key
},
update={
'$set': {'being_calculated': False}
},
upsert=False # should not insert in this case
)
except OperationFailure:
pass # don't care in this case
def wait_on_entry_calc(self, key):
while True:
time.sleep(MONGO_SLEEP_DURATION_IN_SEC)
key, entry = self.get_entry_by_key(key)
if entry is None:
raise RecalculationNeeded()
if entry is not None and not entry['being_calculated']:
return entry['value']
def clear_cache(self):
self.mongo_collection.delete_many(
filter={'func': _MongoCore._get_func_str(self.func)}
)
def clear_being_calculated(self):
self.mongo_collection.update_many(
filter={
'func': _MongoCore._get_func_str(self.func),
'being_calculated': True
},
update={
'$set': {'being_calculated': False}
}
)
| {
"repo_name": "shaypal5/cachier",
"path": "cachier/mongo_core.py",
"copies": "1",
"size": "5054",
"license": "mit",
"hash": 1581484316163732500,
"line_mean": 31.3974358974,
"line_max": 114,
"alpha_frac": 0.5197863079,
"autogenerated": false,
"ratio": 4.075806451612904,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5095592759512904,
"avg_score": null,
"num_lines": null
} |
# A monkey patch of the base distutils.ccompiler to use parallel builds
# Tested on 2.7, looks to be identical to 3.3.
from multiprocessing import Pool, cpu_count
from distutils.ccompiler import CCompiler
import os, sys
try:
MAX_PROCS = int(os.environ.get('MAX_CONCURRENCY', min(4, cpu_count())))
except NotImplementedError:
MAX_PROCS = None
# hideous monkeypatching. but. but. but.
def _mp_compile_one(tp):
(self, obj, build, cc_args, extra_postargs, pp_opts) = tp
try:
src, ext = build[obj]
except KeyError:
return
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
return
def _mp_compile(self, sources, output_dir=None, macros=None,
include_dirs=None, debug=0, extra_preargs=None,
extra_postargs=None, depends=None):
"""Compile one or more source files.
see distutils.ccompiler.CCompiler.compile for comments.
"""
# A concrete compiler class can either override this method
# entirely or implement _compile().
macros, objects, extra_postargs, pp_opts, build = self._setup_compile(
output_dir, macros, include_dirs, sources, depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
pool = Pool(MAX_PROCS)
try:
print ("Building using %d processes" % pool._processes)
except:
pass
arr = [(self, obj, build, cc_args, extra_postargs, pp_opts)
for obj in objects]
pool.map_async(_mp_compile_one, arr)
pool.close()
pool.join()
# Return *all* object filenames, not just the ones we just built.
return objects
# explicitly don't enable if environment says 1 processor
if MAX_PROCS != 1 and not sys.platform.startswith('win'):
try:
# bug, only enable if we can make a Pool. see issue #790 and
# http://stackoverflow.com/questions/6033599/oserror-38-errno-38-with-multiprocessing
pool = Pool(2)
CCompiler.compile = _mp_compile
except Exception as msg:
print("Exception installing mp_compile, proceeding without: %s" % msg)
else:
print("Single threaded build, not installing mp_compile: %s processes" %
MAX_PROCS)
| {
"repo_name": "1upon0/rfid-auth-system",
"path": "GUI/printer/Pillow-2.7.0/mp_compile.py",
"copies": "1",
"size": "2192",
"license": "apache-2.0",
"hash": 1990287201610395000,
"line_mean": 33.7936507937,
"line_max": 93,
"alpha_frac": 0.6656021898,
"autogenerated": false,
"ratio": 3.6533333333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4818935523133333,
"avg_score": null,
"num_lines": null
} |
'''A month, as in 200703; immutable
Copyright 2017 Roy E. Lowrance
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on as "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing premission and
limitation under the license.
'''
import datetime
import pdb
import unittest
class Month(object):
def __init__(self, value1, value2=None):
'''Constructors
Month('200703')
Month(200703)
Month(2007, 3)
Month(datetime.date)
'''
if value2 is None:
if isinstance(value1, str):
self.year = int(value1[:4])
self.month = int(value1[4:])
elif isinstance(value1, int):
self.month = value1 % 100
self.year = (value1 - self.month) / 100
elif isinstance(value1, Month):
self.month = value1.month
self.year = value1.year
elif isinstance(value1, datetime.date):
self.month = value1.month
self.year = value1.year
else:
print('construction error: value1 is of type %s' % type(value1), value1)
else:
self.year = int(value1)
self.month = int(value2)
# enforce invariant (other methods depend on this)
assert self.year > 0, self
assert 1 <= self.month <= 12, self
def __repr__(self):
return 'Month(year=%d, month=%d)' % (self.year, self.month)
def increment(self, by=1):
'return new Month one month after self'
assert by >= 0, by
month = self.month + by
if month > 12:
delta_years = month // 12
month = month - 12 * delta_years
year = self.year + delta_years
return Month(year, month)
else:
return Month(self.year, month)
def decrement(self, by=1):
'return new Month one month before self'
assert by >= 0, by
month = self.month - by
year = self.year
while month <= 0:
month += 12
year -= 1
return Month(year, month)
def as_str(self):
return '%04d%02d' % (self.year, self.month)
def as_int(self):
return self.year * 100 + self.month
def equal(self, other):
return self.year == other.year and self.month == other.month
def __eq__(self, other):
result = self.year == other.year and self.month == other.month
return result
def __hash__(self):
return hash((self.year, self.month)) # hash of tuple of properties
class TestMonth(unittest.TestCase):
def test_from_datetimedate(self):
pdb.set_trace()
dt = datetime.date(2007, 5, 2)
m = Month(dt)
self.assertTrue(m.year == 2007)
self.assertTrue(m.month == 5)
def test_eq_2_with_same_content(self):
a = Month(2003, 1)
b = Month(2003, 1)
self.assertTrue(a == b)
def test_eq_copy(self):
a = Month(2003, 1)
b = a
self.assertTrue(a == b)
def test_neq_different(self):
a = Month(2003, 1)
b = Month(2003, 2)
self.assertFalse(a == b)
self.assertFalse(b == a)
c = Month(2004, 1)
self.assertFalse(a == c)
self.assertFalse(c == a)
def test_eq_same(self):
a = Month(2003, 1)
self.assertTrue(a == a)
def test_set_with_same_element_len_2b(self):
a = Month(2003, 1)
b = Month(2003, 2)
c = Month(2003, 1)
assert a == c, (a, c)
s = set([a, b, c])
assert len(s) == 2
self.assertEqual(len(s), 2)
return
def test_set_with_2_element_len_2(self):
a = Month(2003, 1)
b = Month(2003, 2)
s = set([a, b])
self.assertEqual(len(s), 2)
return
def test_set_with_same_element_len_1b(self):
a = Month(2003, 1)
a2 = a
s = set([a, a2])
self.assertEqual(len(s), 1)
return
def test_set_with_same_element_len_1(self):
a = Month(2003, 1)
s = set([a, a])
self.assertEqual(len(s), 1)
return
def test_eq_based_on_content(self):
a = Month(2003, 1)
b = Month(2003, 1)
c = Month(2003, 2)
self.assertTrue(a == b)
self.assertFalse(a == c)
self.assertTrue(a == a)
def test_constructor(self):
self.assertTrue(Month('200703').equal(Month(2007, 0o3)))
self.assertTrue(Month(200703).equal(Month(2007, 0o3)))
self.assertTrue(Month(200712).equal(Month(2007, 12)))
m1 = Month(2007, 3)
m2 = Month(m1)
self.assertTrue(m1 != m2)
def test_as_str(self):
self.assertTrue(Month(200703).as_str() == '200703')
def test_as_int(self):
self.assertTrue(Month(200703).as_int() == 200703)
def test_equal(self):
self.assertTrue(Month('200703').equal(Month(2007, 0o3)))
def test_increment(self):
self.assertTrue(Month(200612).increment().equal(Month(200701)))
self.assertTrue(Month(200612).increment(1).equal(Month(200701)))
self.assertTrue(Month(200612).increment(2).equal(Month(200702)))
self.assertTrue(Month(200612).increment(14).equal(Month(200802)))
self.assertTrue(Month(200701).increment().equal(Month(200702)))
self.assertTrue(Month(200712).increment().equal(Month(200801)))
def test_decrement(self):
self.assertTrue(Month(200701).decrement().equal(Month(200612)))
self.assertTrue(Month(200701).decrement(1).equal(Month(200612)))
self.assertTrue(Month(200701).decrement(2).equal(Month(200611)))
self.assertTrue(Month(200701).decrement(14).equal(Month(200511)))
self.assertTrue(Month(200712).decrement().equal(Month(200711)))
self.assertTrue(Month(200701).decrement(1).equal(Month(200612)))
self.assertTrue(Month(200701).decrement(2).equal(Month(200611)))
self.assertTrue(Month(200701).decrement(12).equal(Month(200601)))
self.assertTrue(Month(200701).decrement(13).equal(Month(200512)))
self.assertTrue(Month(200701).decrement(120).equal(Month(199701)))
if __name__ == '__main__':
unittest.main()
if False:
pdb.set_trace()
| {
"repo_name": "rlowrance/python_lib",
"path": "applied_data_science/Month.py",
"copies": "2",
"size": "6644",
"license": "apache-2.0",
"hash": -2165888901364996900,
"line_mean": 31.0966183575,
"line_max": 88,
"alpha_frac": 0.5782661048,
"autogenerated": false,
"ratio": 3.5836030204962244,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5161869125296225,
"avg_score": null,
"num_lines": null
} |
""" A monthly/quarterly/yearly report of incomes and expenses
"""
from reports import Report
class Books(Report):
description = "A monthly/quarterly/yearly report of incomes and " \
"expenses"
special_args = True
want_csv = True
want_scope = True
def get_args(self, parser):
super(Books, self).get_args(parser)
parser.add_argument(
"-c",
"--combine",
dest="combine",
action="store_true",
help="Combine expenses and income in one report"
)
parser.add_argument(
"--sort",
dest="sort",
default="date",
help="Key to sort report (date, paid_date, name, title, subtotal, "
"total or a VAT value like 19.00) [date]"
)
parser.add_argument(
"--reverse",
dest="reverse",
action="store_true",
help="Reverse sort?"
)
def report(self):
# Create scope filter
scope_filters = self.get_scope_filters()
# Get invoices of current scope
incomes = []
income_vats = []
expenses = []
expense_vats = []
for scope_filter in scope_filters:
tmp = self.client.invoice_get(
filter=scope_filter
)
for invoice in tmp["INVOICES"]:
if invoice["PAID_DATE"] == "0000-00-00 00:00:00":
# Skip unpaid invoices
continue
if invoice["IS_CANCELED"] == "1":
# Skip canceled invoices
continue
name = " ".join([invoice["FIRST_NAME"], invoice["LAST_NAME"]])
if "ORGANIZATION" in invoice and invoice["ORGANIZATION"] != "":
name = "%s (%s)" % (invoice["ORGANIZATION"], name)
vat_sum = {
}
for vat_item in invoice["VAT_ITEMS"]:
vat_item["VAT_PERCENT"] = str(round(float(vat_item["VAT_PERCENT"])))
if vat_item["VAT_PERCENT"] not in vat_sum:
vat_sum[vat_item["VAT_PERCENT"]] = 0
if vat_item["VAT_PERCENT"] not in income_vats:
income_vats.append(vat_item["VAT_PERCENT"])
vat_sum[vat_item["VAT_PERCENT"]] += vat_item["VAT_VALUE"]
incomes.append({
"date": invoice["INVOICE_DATE"],
"paid_date": invoice["PAID_DATE"],
"name": name,
"title": invoice["INVOICE_TITLE"],
"subtotal": invoice["SUB_TOTAL"],
"total": invoice["TOTAL"],
"vat": vat_sum,
"note": invoice["NOTE"]
})
tmp = self.client.expense_get(filter=scope_filter)
for expense in tmp["EXPENSES"]:
vat_sum = {
}
for vat_item in expense["VAT_ITEMS"]:
vat_item["VAT_PERCENT"] = str(round(float(vat_item["VAT_PERCENT"])))
if vat_item["VAT_PERCENT"] not in vat_sum:
vat_sum[vat_item["VAT_PERCENT"]] = 0
if vat_item["VAT_PERCENT"] not in expense_vats:
expense_vats.append(vat_item["VAT_PERCENT"])
vat_sum[vat_item["VAT_PERCENT"]] += float(
vat_item["VAT_VALUE"]
)
expenses.append({
"date": expense["INVOICE_DATE"],
"paid_date": expense["PAID_DATE"],
"name": expense["ORGANIZATION"],
"title": "",
"subtotal": float(expense["SUB_TOTAL"]),
"total": float(expense["TOTAL"]),
"vat": vat_sum,
"note": expense["NOTE"]
})
# Output report
# Incomes
report = []
if self.args.combine:
combined = incomes + expenses
combined.sort(key=lambda sort_row: sort_row[self.args.sort],
reverse=self.args.reverse)
report_data = {
_("INCOME/EXPENSES"): {
"data": combined,
"vats": list(set(income_vats + expense_vats))
}
}
else:
incomes.sort(key=lambda sort_row: sort_row[self.args.sort],
reverse=self.args.reverse)
expenses.sort(key=lambda sort_row: sort_row[self.args.sort],
reverse=self.args.reverse)
report_data = {
_("INCOME"): {
"data": incomes,
"vats": income_vats
},
_("EXPENSES"): {
"data": expenses,
"vats": expense_vats
}
}
for key, data in report_data.iteritems():
report.append(key)
header = self.report_args["csv_delimiter"].join(
[_("date"), _("paid_date"), _("name"), _("title")] +
data["vats"] +
[_("subtotal"), _("total"), _("note")]
)
report.append(header)
data_sum = {
"subtotal": 0,
"total": 0
}
for row in data["data"]:
columns = [
row["date"],
row["paid_date"],
row["name"],
row["title"]
]
for vat in data["vats"]:
if not vat in data_sum:
data_sum[vat] = 0
if vat in row["vat"]:
data_sum[vat] += row["vat"][vat]
columns.append(str(self.moneyfmt(float(row["vat"][vat]))))
else:
columns.append("0")
columns.append(str(self.moneyfmt(float(row["subtotal"]))))
data_sum["subtotal"] += row["subtotal"]
columns.append(str(self.moneyfmt(float(row["total"]))))
data_sum["total"] += float(row["total"])
columns.append(row["note"])
report.append(self.report_args["csv_delimiter"].join(columns))
sum_row = [
"",
"",
"",
""
]
for vat in data["vats"]:
if vat in data_sum:
sum_row.append(str(self.moneyfmt(float(data_sum[vat]))))
sum_row.append(str(self.moneyfmt(float(data_sum["subtotal"]))))
sum_row.append(str(self.moneyfmt(float(data_sum["total"]))))
sum_row.append("")
report.append(self.report_args["csv_delimiter"].join(sum_row))
report.append("")
return report
| {
"repo_name": "dploeger/fastbillreport",
"path": "reports/books.py",
"copies": "1",
"size": "7082",
"license": "mit",
"hash": -2434888576352505300,
"line_mean": 26.7725490196,
"line_max": 88,
"alpha_frac": 0.4286924598,
"autogenerated": false,
"ratio": 4.307785888077859,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006602293596934422,
"num_lines": 255
} |
# a more accurate name for this would be timer.py, however the "spec"
# calls for the pi based timer to be called piMC.py. Who am I to argue.
import RPi.GPIO as GPIO, time, os, random
from vhsled_spi import *
from vhsled_text import *
from vhsled_colour import *
from vhsled_rain import *
def countdown_timer(pixels, c, time_s):
for i in range (0,width):
for j in range(0,height):
c = Color(255,0,0) if (j+i)%2>0 else Color(0,255,0)
setpixelcolor(pixels,i,j,c)
for i in range (0,width):
for j in range(0,height):
setpixelcolor(pixels,i,j,Color(0,0,0))
writestrip(pixels,spidev)
try:
time.sleep(time_s/(width*height))
except:
return
GPIO.setmode(GPIO.BCM)
width = 42
height = 10
ledpixels = []
for i in range(0,width):
ledpixels.append([0]*height)
spidev = file("/dev/spidev0.0", "w")
random.seed()
c = randomColor()
setFullColor(ledpixels,spidev,c)
# a few nice and bright colours with at least one channel at full.
bright_colors = [Color(255,0,0),Color(0,255,0),Color(0,0,255),Color(255,255,255),Color(255,255,0),Color(255,0,255),Color(0,255,255)]
while True:
text = raw_input("display string (blank to start countdown, 'exit' to quit):")
if len(text) > 0:
if text == "flash": #undocumented mode to strobe the display
try:
while True:
colorFlashMode(ledpixels,spidev,10,0.1)
except:
print "done flashing"
elif text =="exit":
break
else:
scrollText(ledpixels,spidev,characters, text, random.choice(bright_colors),Color(0,0,0),0.01)
else:
countdown_timer(ledpixels, random.choice(bright_colors),90.0)
#setFullColor(ledpixels,spidev,Color(0,0,0))
try:
rain(ledpixels,spidev,randomColor(),0.05,1000000000)
except:
print "ready"
spidev.close
| {
"repo_name": "ScienceWorldCA/domelights",
"path": "backend/examples/VHSled/piMC.py",
"copies": "1",
"size": "1730",
"license": "apache-2.0",
"hash": -782133927828071600,
"line_mean": 25.6153846154,
"line_max": 132,
"alpha_frac": 0.6936416185,
"autogenerated": false,
"ratio": 2.6132930513595167,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38069346698595163,
"avg_score": null,
"num_lines": null
} |
# a more accurate name for this would be timer.py, however the "spec"
# calls for the pi based timer to be called piMC.py. Who am I to argue.
import RPi.GPIO as GPIO, time, os, random
import datetime
from vhsled_spi import *
from vhsled_text import *
from vhsled_colour import *
GPIO.setmode(GPIO.BCM)
width = 42
height = 10
strings = ["VHS ! VHS !", "Welcome to the Bunker","drink beer", "hack the planet", "42", "feed donatio", "go hack something", "the cake is a lie !"]
oddstrings = ["subliminal message","They Live","fight the power","buy our stuff!"]
ledpixels = []
for i in range(0,width):
ledpixels.append([0]*height)
spidev = file("/dev/spidev0.0", "w")
random.seed()
debug = 0
###c = randomColor()
setFullColor(ledpixels,spidev,Color(0,0,0))
# a few nice and bright colours with at least one channel at full.
bright_colors = [Color(0,255,0),Color(0,0,255),Color(255,255,0),Color(255,0,255),Color(0,255,255)]
# clock interval
clockInterval = 0.125
brightnessmod = 0.1
stepinterval = 8
red_color = random.randint(0,255)
green_color = random.randint(0,255)
blue_color = random.randint(0,255)
# modifiers
red_mod = random.randint(1,stepinterval)
green_mod = random.randint(1,stepinterval)
blue_mod = random.randint(1,stepinterval)
# shift modifiers
if random.randint(0,1) == 1:
red_mod = 0 - random.randint(1,stepinterval)
if random.randint(0,1) == 1:
green_mod = 0 - random.randint(1,stepinterval)
if random.randint(0,1) == 1:
green_mod = 0 - random.randint(1,stepinterval)
while True:
if debug == 1:
print "===== Run ====="
print "red_color: %s" % red_color
print "green_color: %s" % green_color
print "blue_color: %s" % blue_color
print "red_mod: %s" % red_mod
print "green_mod: %s" % green_mod
print "blue_mod: %s" % blue_mod
## Shift red
if (red_color+red_mod) < 0:
red_mod = random.randint(1,stepinterval)
elif (red_color+red_mod) > 255:
red_mod = 0 - random.randint(1,stepinterval)
red_color = red_color + red_mod;
## Shift green
if (green_color+green_mod) < 0:
green_mod = random.randint(1,stepinterval)
elif (green_color+green_mod) > 255:
green_mod = 0 - random.randint(1,stepinterval)
green_color = green_color + green_mod;
## Shift blue
if (blue_color+blue_mod) < 0:
blue_mod = random.randint(1,stepinterval)
elif (blue_color+blue_mod) > 255:
blue_mod = 0 - random.randint(1,stepinterval)
blue_color = blue_color + blue_mod;
###background_color = Color( int((255-red_color)*brightnessmod), int((255-green_color)*brightnessmod), int((255-blue_color)*brightnessmod) )
background_color = Color( 0, 0, int((255-green_color)*brightnessmod) )
clockTextOnce(ledpixels,spidev,characters,":",Color(red_color,green_color,blue_color),background_color)
###clockTextOnce(ledpixels,spidev,characters,":",Color(0,0,0),Color(red_color,green_color,blue_color))
time.sleep(0.250)
###clockText(ledpixels,spidev,characters,":",Color(0,255,0),Color(0,0,0),0.01)
| {
"repo_name": "ScienceWorldCA/domelights",
"path": "backend/examples/VHSled/vhsled_fadeclock2.py",
"copies": "1",
"size": "2965",
"license": "apache-2.0",
"hash": -4581882270612198000,
"line_mean": 28.9494949495,
"line_max": 148,
"alpha_frac": 0.6863406408,
"autogenerated": false,
"ratio": 2.7428307123034226,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3929171353103422,
"avg_score": null,
"num_lines": null
} |
"""A more complex example of do/undo capability with PyTables.
Here, names has been assigned to the marks, and jumps are done between
marks.
"""
import tables
# Create an HDF5 file
fileh = tables.open_file('tutorial3-2.h5', 'w', title='Undo/Redo demo 2')
#'-**-**-**-**-**-**- enable undo/redo log -**-**-**-**-**-**-**-'
fileh.enable_undo()
# Start undoable operations
fileh.create_array('/', 'otherarray1', [3, 4], 'Another array 1')
fileh.create_group('/', 'agroup', 'Group 1')
# Create a 'first' mark
fileh.mark('first')
fileh.create_array('/agroup', 'otherarray2', [4, 5], 'Another array 2')
fileh.create_group('/agroup', 'agroup2', 'Group 2')
# Create a 'second' mark
fileh.mark('second')
fileh.create_array('/agroup/agroup2', 'otherarray3', [5, 6], 'Another array 3')
# Create a 'third' mark
fileh.mark('third')
fileh.create_array('/', 'otherarray4', [6, 7], 'Another array 4')
fileh.create_array('/agroup', 'otherarray5', [7, 8], 'Another array 5')
# Now go to mark 'first'
fileh.goto('first')
assert '/otherarray1' in fileh
assert '/agroup' in fileh
assert '/agroup/agroup2' not in fileh
assert '/agroup/otherarray2' not in fileh
assert '/agroup/agroup2/otherarray3' not in fileh
assert '/otherarray4' not in fileh
assert '/agroup/otherarray5' not in fileh
# Go to mark 'third'
fileh.goto('third')
assert '/otherarray1' in fileh
assert '/agroup' in fileh
assert '/agroup/agroup2' in fileh
assert '/agroup/otherarray2' in fileh
assert '/agroup/agroup2/otherarray3' in fileh
assert '/otherarray4' not in fileh
assert '/agroup/otherarray5' not in fileh
# Now go to mark 'second'
fileh.goto('second')
assert '/otherarray1' in fileh
assert '/agroup' in fileh
assert '/agroup/agroup2' in fileh
assert '/agroup/otherarray2' in fileh
assert '/agroup/agroup2/otherarray3' not in fileh
assert '/otherarray4' not in fileh
assert '/agroup/otherarray5' not in fileh
# Go to the end
fileh.goto(-1)
assert '/otherarray1' in fileh
assert '/agroup' in fileh
assert '/agroup/agroup2' in fileh
assert '/agroup/otherarray2' in fileh
assert '/agroup/agroup2/otherarray3' in fileh
assert '/otherarray4' in fileh
assert '/agroup/otherarray5' in fileh
# Check that objects have come back to life in a sane state
assert fileh.root.otherarray1.read() == [3, 4]
assert fileh.root.agroup.otherarray2.read() == [4, 5]
assert fileh.root.agroup.agroup2.otherarray3.read() == [5, 6]
assert fileh.root.otherarray4.read() == [6, 7]
assert fileh.root.agroup.otherarray5.read() == [7, 8]
#'-**-**-**-**-**-**- disable undo/redo log -**-**-**-**-**-**-**-'
fileh.disable_undo()
# Close the file
fileh.close()
| {
"repo_name": "jennolsen84/PyTables",
"path": "examples/tutorial3-2.py",
"copies": "13",
"size": "2611",
"license": "bsd-3-clause",
"hash": -8551970128654826000,
"line_mean": 32.0506329114,
"line_max": 79,
"alpha_frac": 0.6966679433,
"autogenerated": false,
"ratio": 2.8566739606126914,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011423175980138005,
"num_lines": 79
} |
"""A more liberal autolinker
Inspired by Django's urlize function.
Positive examples:
>>> import markdown
>>> md = markdown.Markdown(extensions=['urlize'])
>>> md.convert('http://example.com/')
u'<p><a href="http://example.com/">http://example.com/</a></p>'
>>> md.convert('go to http://example.com')
u'<p>go to <a href="http://example.com">http://example.com</a></p>'
>>> md.convert('example.com')
u'<p><a href="http://example.com">example.com</a></p>'
>>> md.convert('example.net')
u'<p><a href="http://example.net">example.net</a></p>'
>>> md.convert('www.example.us')
u'<p><a href="http://www.example.us">www.example.us</a></p>'
>>> md.convert('(www.example.us/path/?name=val)')
u'<p>(<a href="http://www.example.us/path/?name=val">www.example.us/path/?name=val</a>)</p>'
>>> md.convert('go to <http://example.com> now!')
u'<p>go to <a href="http://example.com">http://example.com</a> now!</p>'
Negative examples:
>>> md.convert('del.icio.us')
u'<p>del.icio.us</p>'
"""
import markdown
# Global Vars
URLIZE_RE = '(%s)' % '|'.join([
r'<(?:f|ht)tps?://[^>]*>',
r'\b(?:f|ht)tps?://[^)<>\s]+[^.,)<>\s]',
r'\bwww\.[^)<>\s]+[^.,)<>\s]',
r'[^(<\s]+\.(?:com|net|org)\b',
])
class UrlizePattern(markdown.inlinepatterns.Pattern):
""" Return a link Element given an autolink (`http://example/com`). """
def handleMatch(self, m):
url = m.group(2)
if url.startswith('<'):
url = url[1:-1]
text = url
if not url.split('://')[0] in ('http','https','ftp'):
if '@' in url and not '/' in url:
url = 'mailto:' + url
else:
url = 'http://' + url
el = markdown.util.etree.Element("a")
el.set('href', url)
el.text = markdown.util.AtomicString(text)
return el
class UrlizeExtension(markdown.Extension):
""" Urlize Extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Replace autolink with UrlizePattern """
md.inlinePatterns['autolink'] = UrlizePattern(URLIZE_RE, md)
def makeExtension(configs=None):
return UrlizeExtension(configs=configs)
if __name__ == "__main__":
import doctest
doctest.testmod()
| {
"repo_name": "dreikanter/public-static",
"path": "publicstatic/urlize.py",
"copies": "1",
"size": "2224",
"license": "bsd-3-clause",
"hash": -6062903256282514000,
"line_mean": 26.4567901235,
"line_max": 92,
"alpha_frac": 0.5737410072,
"autogenerated": false,
"ratio": 3.0217391304347827,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9084747774490645,
"avg_score": 0.002146472628827517,
"num_lines": 81
} |
"""A more or less complete dictionary like interface for the RISC OS environment."""
import riscos
class _Environ:
def __init__(self, initial = None):
pass
def __repr__(self):
return repr(riscos.getenvdict())
def __cmp__(self, dict):
return cmp(riscos.getenvdict(), dict)
def __len__(self):
return len(riscos.getenvdict())
def __getitem__(self, key):
ret = riscos.getenv(key)
if ret<>None:
return ret
else:
raise KeyError
def __setitem__(self, key, item):
riscos.putenv(key, item)
def __delitem__(self, key):
riscos.delenv(key)
def clear(self):
# too dangerous on RISC OS
pass
def copy(self):
return riscos.getenvdict()
def keys(self): return riscos.getenvdict().keys()
def items(self): return riscos.getenvdict().items()
def values(self): return riscos.getenvdict().values()
def has_key(self, key):
value = riscos.getenv(key)
return value<>None
def update(self, dict):
for k, v in dict.items():
riscos.putenv(k, v)
def get(self, key, failobj=None):
value = riscos.getenv(key)
if value<>None:
return value
else:
return failobj
| {
"repo_name": "MalloyPower/parsing-python",
"path": "front-end/testsuite-python-lib/Python-2.2/Lib/plat-riscos/riscosenviron.py",
"copies": "1",
"size": "1293",
"license": "mit",
"hash": 2431785879574984700,
"line_mean": 29.0697674419,
"line_max": 84,
"alpha_frac": 0.5715390565,
"autogenerated": false,
"ratio": 3.6732954545454546,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47448345110454543,
"avg_score": null,
"num_lines": null
} |
"""A more or less complete user-defined wrapper around dictionary objects."""
class UserDict:
def __init__(*args, **kwargs):
if not args:
raise TypeError("descriptor '__init__' of 'UserDict' object "
"needs an argument")
self = args[0]
args = args[1:]
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
if args:
dict = args[0]
elif 'dict' in kwargs:
dict = kwargs.pop('dict')
import warnings
warnings.warn("Passing 'dict' as keyword argument is "
"deprecated", PendingDeprecationWarning,
stacklevel=2)
else:
dict = None
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __repr__(self): return repr(self.data)
def __cmp__(self, dict):
if isinstance(dict, UserDict):
return cmp(self.data, dict.data)
else:
return cmp(self.data, dict)
__hash__ = None # Avoid Py3k warning
def __len__(self): return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def clear(self): self.data.clear()
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
def keys(self): return self.data.keys()
def items(self): return self.data.items()
def iteritems(self): return self.data.iteritems()
def iterkeys(self): return self.data.iterkeys()
def itervalues(self): return self.data.itervalues()
def values(self): return self.data.values()
def has_key(self, key): return key in self.data
def update(*args, **kwargs):
if not args:
raise TypeError("descriptor 'update' of 'UserDict' object "
"needs an argument")
self = args[0]
args = args[1:]
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
if args:
dict = args[0]
elif 'dict' in kwargs:
dict = kwargs.pop('dict')
import warnings
warnings.warn("Passing 'dict' as keyword argument is deprecated",
PendingDeprecationWarning, stacklevel=2)
else:
dict = None
if dict is None:
pass
elif isinstance(dict, UserDict):
self.data.update(dict.data)
elif isinstance(dict, type({})) or not hasattr(dict, 'items'):
self.data.update(dict)
else:
for k, v in dict.items():
self[k] = v
if len(kwargs):
self.data.update(kwargs)
def get(self, key, failobj=None):
if key not in self:
return failobj
return self[key]
def setdefault(self, key, failobj=None):
if key not in self:
self[key] = failobj
return self[key]
def pop(self, key, *args):
return self.data.pop(key, *args)
def popitem(self):
return self.data.popitem()
def __contains__(self, key):
return key in self.data
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
class IterableUserDict(UserDict):
def __iter__(self):
return iter(self.data)
import _abcoll
_abcoll.MutableMapping.register(IterableUserDict)
class DictMixin:
# Mixin defining all dictionary methods for classes that already have
# a minimum dictionary interface including getitem, setitem, delitem,
# and keys. Without knowledge of the subclass constructor, the mixin
# does not define __init__() or copy(). In addition to the four base
# methods, progressively more efficiency comes with defining
# __contains__(), __iter__(), and iteritems().
# second level definitions support higher levels
def __iter__(self):
for k in self.keys():
yield k
def has_key(self, key):
try:
self[key]
except KeyError:
return False
return True
def __contains__(self, key):
return self.has_key(key)
# third level takes advantage of second level definitions
def iteritems(self):
for k in self:
yield (k, self[k])
def iterkeys(self):
return self.__iter__()
# fourth level uses definitions from lower levels
def itervalues(self):
for _, v in self.iteritems():
yield v
def values(self):
return [v for _, v in self.iteritems()]
def items(self):
return list(self.iteritems())
def clear(self):
for key in self.keys():
del self[key]
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def pop(self, key, *args):
if len(args) > 1:
raise TypeError, "pop expected at most 2 arguments, got "\
+ repr(1 + len(args))
try:
value = self[key]
except KeyError:
if args:
return args[0]
raise
del self[key]
return value
def popitem(self):
try:
k, v = self.iteritems().next()
except StopIteration:
raise KeyError, 'container is empty'
del self[k]
return (k, v)
def update(self, other=None, **kwargs):
# Make progressively weaker assumptions about "other"
if other is None:
pass
elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups
for k, v in other.iteritems():
self[k] = v
elif hasattr(other, 'keys'):
for k in other.keys():
self[k] = other[k]
else:
for k, v in other:
self[k] = v
if kwargs:
self.update(kwargs)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __repr__(self):
return repr(dict(self.iteritems()))
def __cmp__(self, other):
if other is None:
return 1
if isinstance(other, DictMixin):
other = dict(other.iteritems())
return cmp(dict(self.iteritems()), other)
def __len__(self):
return len(self.keys())
| {
"repo_name": "JVenberg/PokemonGo-Bot-Desktop",
"path": "pywin/Lib/UserDict.py",
"copies": "62",
"size": "7060",
"license": "mit",
"hash": 5384610070043947000,
"line_mean": 32.1455399061,
"line_max": 79,
"alpha_frac": 0.5402266289,
"autogenerated": false,
"ratio": 4.278787878787878,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005861008251014284,
"num_lines": 213
} |
"""A more or less complete user-defined wrapper around dictionary objects."""
class UserDict:
def __init__(self, dict=None, **kwargs):
self.data = {}
if dict is not None:
if not hasattr(dict,'keys'):
dict = type({})(dict) # make mapping from a sequence
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __repr__(self): return repr(self.data)
def __cmp__(self, dict):
if isinstance(dict, UserDict):
return cmp(self.data, dict.data)
else:
return cmp(self.data, dict)
def __len__(self): return len(self.data)
def __getitem__(self, key): return self.data[key]
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def clear(self): self.data.clear()
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data)
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
def keys(self): return self.data.keys()
def items(self): return self.data.items()
def iteritems(self): return self.data.iteritems()
def iterkeys(self): return self.data.iterkeys()
def itervalues(self): return self.data.itervalues()
def values(self): return self.data.values()
def has_key(self, key): return self.data.has_key(key)
def update(self, dict):
if isinstance(dict, UserDict):
self.data.update(dict.data)
elif isinstance(dict, type(self.data)):
self.data.update(dict)
else:
for k, v in dict.items():
self[k] = v
def get(self, key, failobj=None):
if not self.has_key(key):
return failobj
return self[key]
def setdefault(self, key, failobj=None):
if not self.has_key(key):
self[key] = failobj
return self[key]
def pop(self, key, *args):
return self.data.pop(key, *args)
def popitem(self):
return self.data.popitem()
def __contains__(self, key):
return key in self.data
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
fromkeys = classmethod(fromkeys)
class IterableUserDict(UserDict):
def __iter__(self):
return iter(self.data)
class DictMixin:
# Mixin defining all dictionary methods for classes that already have
# a minimum dictionary interface including getitem, setitem, delitem,
# and keys. Without knowledge of the subclass constructor, the mixin
# does not define __init__() or copy(). In addition to the four base
# methods, progressively more efficiency comes with defining
# __contains__(), __iter__(), and iteritems().
# second level definitions support higher levels
def __iter__(self):
for k in self.keys():
yield k
def has_key(self, key):
try:
value = self[key]
except KeyError:
return False
return True
def __contains__(self, key):
return self.has_key(key)
# third level takes advantage of second level definitions
def iteritems(self):
for k in self:
yield (k, self[k])
def iterkeys(self):
return self.__iter__()
# fourth level uses definitions from lower levels
def itervalues(self):
for _, v in self.iteritems():
yield v
def values(self):
return [v for _, v in self.iteritems()]
def items(self):
return list(self.iteritems())
def clear(self):
for key in self.keys():
del self[key]
def setdefault(self, key, default):
try:
return self[key]
except KeyError:
self[key] = default
return default
def pop(self, key, *args):
if len(args) > 1:
raise TypeError, "pop expected at most 2 arguments, got "\
+ repr(1 + len(args))
try:
value = self[key]
except KeyError:
if args:
return args[0]
raise
del self[key]
return value
def popitem(self):
try:
k, v = self.iteritems().next()
except StopIteration:
raise KeyError, 'container is empty'
del self[k]
return (k, v)
def update(self, other):
# Make progressively weaker assumptions about "other"
if hasattr(other, 'iteritems'): # iteritems saves memory and lookups
for k, v in other.iteritems():
self[k] = v
elif hasattr(other, '__iter__'): # iter saves memory
for k in other:
self[k] = other[k]
else:
for k in other.keys():
self[k] = other[k]
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __repr__(self):
return repr(dict(self.iteritems()))
def __cmp__(self, other):
if other is None:
return 1
if isinstance(other, DictMixin):
other = dict(other.iteritems())
return cmp(dict(self.iteritems()), other)
def __len__(self):
return len(self.keys())
def __nonzero__(self):
return bool(self.iteritems())
| {
"repo_name": "ilc/imgserv",
"path": "paste/util/UserDict24.py",
"copies": "28",
"size": "5516",
"license": "mit",
"hash": 5416914502308920000,
"line_mean": 32.0299401198,
"line_max": 77,
"alpha_frac": 0.5549311095,
"autogenerated": false,
"ratio": 4.150489089541009,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008526898636754475,
"num_lines": 167
} |
"""A more or less complete user-defined wrapper around dictionary objects."""
class UserDict:
def __init__(self, dict=None):
self.data = {}
if dict is not None: self.update(dict)
def __repr__(self): return repr(self.data)
def __cmp__(self, dict):
if isinstance(dict, UserDict):
return cmp(self.data, dict.data)
else:
return cmp(self.data, dict)
def __len__(self): return len(self.data)
def __getitem__(self, key): return self.data[key]
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def clear(self): self.data.clear()
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data)
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
def keys(self): return self.data.keys()
def items(self): return self.data.items()
def iteritems(self): return self.data.iteritems()
def iterkeys(self): return self.data.iterkeys()
def itervalues(self): return self.data.itervalues()
def values(self): return self.data.values()
def has_key(self, key): return self.data.has_key(key)
def update(self, dict):
if isinstance(dict, UserDict):
self.data.update(dict.data)
elif isinstance(dict, type(self.data)):
self.data.update(dict)
else:
for k, v in dict.items():
self[k] = v
def get(self, key, failobj=None):
if not self.has_key(key):
return failobj
return self[key]
def setdefault(self, key, failobj=None):
if not self.has_key(key):
self[key] = failobj
return self[key]
def popitem(self):
return self.data.popitem()
def __contains__(self, key):
return key in self.data
class IterableUserDict(UserDict):
def __iter__(self):
return iter(self.data)
| {
"repo_name": "aptana/Pydev",
"path": "bundles/org.python.pydev.jython/Lib/UserDict.py",
"copies": "9",
"size": "2065",
"license": "epl-1.0",
"hash": -1940962510418171400,
"line_mean": 33.4166666667,
"line_max": 77,
"alpha_frac": 0.5791767554,
"autogenerated": false,
"ratio": 3.761384335154827,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008299928451172098,
"num_lines": 60
} |
"""A more or less complete user-defined wrapper around dictionary objects."""
class UserDict(object):
def __init__(*args, **kwargs):
if not args:
raise TypeError("descriptor '__init__' of 'UserDict' object "
"needs an argument")
self = args[0]
args = args[1:]
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
if args:
dict = args[0]
elif 'dict' in kwargs:
dict = kwargs.pop('dict')
import warnings
warnings.warn("Passing 'dict' as keyword argument is "
"deprecated", PendingDeprecationWarning,
stacklevel=2)
else:
dict = None
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __repr__(self): return repr(self.data)
def __cmp__(self, dict):
if isinstance(dict, UserDict):
return cmp(self.data, dict.data)
else:
return cmp(self.data, dict)
__hash__ = None # Avoid Py3k warning
def __len__(self): return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def clear(self): self.data.clear()
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
def keys(self): return self.data.keys()
def items(self): return self.data.items()
def iteritems(self): return self.data.iteritems()
def iterkeys(self): return self.data.iterkeys()
def itervalues(self): return self.data.itervalues()
def values(self): return self.data.values()
def has_key(self, key): return key in self.data
def update(*args, **kwargs):
if not args:
raise TypeError("descriptor 'update' of 'UserDict' object "
"needs an argument")
self = args[0]
args = args[1:]
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
if args:
dict = args[0]
elif 'dict' in kwargs:
dict = kwargs.pop('dict')
import warnings
warnings.warn("Passing 'dict' as keyword argument is deprecated",
PendingDeprecationWarning, stacklevel=2)
else:
dict = None
if dict is None:
pass
elif isinstance(dict, UserDict):
self.data.update(dict.data)
elif isinstance(dict, type({})) or not hasattr(dict, 'items'):
self.data.update(dict)
else:
for k, v in dict.items():
self[k] = v
if len(kwargs):
self.data.update(kwargs)
def get(self, key, failobj=None):
if key not in self:
return failobj
return self[key]
def setdefault(self, key, failobj=None):
if key not in self:
self[key] = failobj
return self[key]
def pop(self, key, *args):
return self.data.pop(key, *args)
def popitem(self):
return self.data.popitem()
def __contains__(self, key):
return key in self.data
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
class IterableUserDict(UserDict):
def __iter__(self):
return iter(self.data)
import _abcoll
_abcoll.MutableMapping.register(IterableUserDict)
class DictMixin(object):
# Mixin defining all dictionary methods for classes that already have
# a minimum dictionary interface including getitem, setitem, delitem,
# and keys. Without knowledge of the subclass constructor, the mixin
# does not define __init__() or copy(). In addition to the four base
# methods, progressively more efficiency comes with defining
# __contains__(), __iter__(), and iteritems().
# second level definitions support higher levels
def __iter__(self):
for k in self.keys():
yield k
def has_key(self, key):
try:
self[key]
except KeyError:
return False
return True
def __contains__(self, key):
return self.has_key(key)
# third level takes advantage of second level definitions
def iteritems(self):
for k in self:
yield (k, self[k])
def iterkeys(self):
return self.__iter__()
# fourth level uses definitions from lower levels
def itervalues(self):
for _, v in self.iteritems():
yield v
def values(self):
return [v for _, v in self.iteritems()]
def items(self):
return list(self.iteritems())
def clear(self):
for key in self.keys():
del self[key]
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def pop(self, key, *args):
if len(args) > 1:
raise TypeError, "pop expected at most 2 arguments, got "\
+ repr(1 + len(args))
try:
value = self[key]
except KeyError:
if args:
return args[0]
raise
del self[key]
return value
def popitem(self):
try:
k, v = self.iteritems().next()
except StopIteration:
raise KeyError, 'container is empty'
del self[k]
return (k, v)
def update(self, other=None, **kwargs):
# Make progressively weaker assumptions about "other"
if other is None:
pass
elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups
for k, v in other.iteritems():
self[k] = v
elif hasattr(other, 'keys'):
for k in other.keys():
self[k] = other[k]
else:
for k, v in other:
self[k] = v
if kwargs:
self.update(kwargs)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __repr__(self):
return repr(dict(self.iteritems()))
def __cmp__(self, other):
if other is None:
return 1
if isinstance(other, DictMixin):
other = dict(other.iteritems())
return cmp(dict(self.iteritems()), other)
def __len__(self):
return len(self.keys())
| {
"repo_name": "AlexEKoren/grumpy",
"path": "third_party/stdlib/UserDict.py",
"copies": "1",
"size": "7076",
"license": "apache-2.0",
"hash": -593545378568599700,
"line_mean": 32.220657277,
"line_max": 79,
"alpha_frac": 0.540700961,
"autogenerated": false,
"ratio": 4.2781136638452235,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005763199174331967,
"num_lines": 213
} |
"""A more or less complete user-defined wrapper around dictionary objects."""
import riscos
class _Environ:
def __init__(self, initial = None):
pass
def __repr__(self):
return repr(riscos.getenvdict())
def __cmp__(self, dict):
if isinstance(dict, UserDict):
return cmp(riscos.getenvdict(), dict)
def __len__(self):
return len(riscos.getenvdict())
def __getitem__(self, key):
ret = riscos.getenv(key)
if ret<>None:
return ret
else:
raise KeyError
def __setitem__(self, key, item):
riscos.putenv(key, item)
def __delitem__(self, key):
riscos.delenv(key)
def clear(self):
# too dangerous on RISC OS
pass
def copy(self):
return riscos.getenvdict()
def keys(self): return riscos.getenvdict().keys()
def items(self): return riscos.getenvdict().items()
def values(self): return riscos.getenvdict().values()
def has_key(self, key):
value = riscos.getenv(key)
return value<>None
def update(self, dict):
for k, v in dict.items():
riscos.putenv(k, v)
def get(self, key, failobj=None):
value = riscos.getenv(key)
if value<>None:
return value
else:
return failobj
| {
"repo_name": "MalloyPower/parsing-python",
"path": "front-end/testsuite-python-lib/Python-2.1/Lib/plat-riscos/riscosenviron.py",
"copies": "1",
"size": "1330",
"license": "mit",
"hash": -4022340047597947000,
"line_mean": 28.5555555556,
"line_max": 77,
"alpha_frac": 0.569924812,
"autogenerated": false,
"ratio": 3.7150837988826817,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9573086371977662,
"avg_score": 0.042384447781003885,
"num_lines": 45
} |
"""A more or less complete user-defined wrapper around dictionary objects."""
class UserDict:
def __init__(self, dict=None, **kwargs):
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __repr__(self):
return repr(self.data)
def __cmp__(self, dict):
if isinstance(dict, UserDict):
return cmp(self.data, dict.data)
else:
return cmp(self.data, dict)
__hash__ = None # Avoid Py3k warning
def __len__(self):
return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item):
self.data[key] = item
def __delitem__(self, key):
del self.data[key]
def clear(self):
self.data.clear()
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
def keys(self):
return self.data.keys()
def items(self):
return self.data.items()
def iteritems(self):
return self.data.iteritems()
def iterkeys(self):
return self.data.iterkeys()
def itervalues(self):
return self.data.itervalues()
def values(self):
return self.data.values()
def has_key(self, key):
return key in self.data
def update(self, dict=None, **kwargs):
if dict is None:
pass
elif isinstance(dict, UserDict):
self.data.update(dict.data)
elif isinstance(dict, type({})) or not hasattr(dict, 'items'):
self.data.update(dict)
else:
for k, v in dict.items():
self[k] = v
if len(kwargs):
self.data.update(kwargs)
def get(self, key, failobj=None):
if key not in self:
return failobj
return self[key]
def setdefault(self, key, failobj=None):
if key not in self:
self[key] = failobj
return self[key]
def pop(self, key, *args):
return self.data.pop(key, *args)
def popitem(self):
return self.data.popitem()
def __contains__(self, key):
return key in self.data
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
class IterableUserDict(UserDict):
def __iter__(self):
return iter(self.data)
import _abcoll
_abcoll.MutableMapping.register(IterableUserDict)
class DictMixin:
# Mixin defining all dictionary methods for classes that already have
# a minimum dictionary interface including getitem, setitem, delitem,
# and keys. Without knowledge of the subclass constructor, the mixin
# does not define __init__() or copy(). In addition to the four base
# methods, progressively more efficiency comes with defining
# __contains__(), __iter__(), and iteritems().
# second level definitions support higher levels
def __iter__(self):
for k in self.keys():
yield k
def has_key(self, key):
try:
self[key]
except KeyError:
return False
return True
def __contains__(self, key):
return self.has_key(key)
# third level takes advantage of second level definitions
def iteritems(self):
for k in self:
yield (k, self[k])
def iterkeys(self):
return self.__iter__()
# fourth level uses definitions from lower levels
def itervalues(self):
for _, v in self.iteritems():
yield v
def values(self):
return [v for _, v in self.iteritems()]
def items(self):
return list(self.iteritems())
def clear(self):
for key in self.keys():
del self[key]
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def pop(self, key, *args):
if len(args) > 1:
raise TypeError, "pop expected at most 2 arguments, got " \
+ repr(1 + len(args))
try:
value = self[key]
except KeyError:
if args:
return args[0]
raise
del self[key]
return value
def popitem(self):
try:
k, v = self.iteritems().next()
except StopIteration:
raise KeyError, 'container is empty'
del self[k]
return (k, v)
def update(self, other=None, **kwargs):
# Make progressively weaker assumptions about "other"
if other is None:
pass
elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups
for k, v in other.iteritems():
self[k] = v
elif hasattr(other, 'keys'):
for k in other.keys():
self[k] = other[k]
else:
for k, v in other:
self[k] = v
if kwargs:
self.update(kwargs)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __repr__(self):
return repr(dict(self.iteritems()))
def __cmp__(self, other):
if other is None:
return 1
if isinstance(other, DictMixin):
other = dict(other.iteritems())
return cmp(dict(self.iteritems()), other)
def __len__(self):
return len(self.keys())
| {
"repo_name": "ppyordanov/HCI_4_Future_Cities",
"path": "Server/src/virtualenv/Lib/UserDict.py",
"copies": "1",
"size": "5950",
"license": "mit",
"hash": 3342588567657734000,
"line_mean": 24.4273504274,
"line_max": 79,
"alpha_frac": 0.5401680672,
"autogenerated": false,
"ratio": 4.213881019830028,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005609690133499657,
"num_lines": 234
} |
"""A more or less complete user-defined wrapper around dictionary objects."""
class UserDict:
def __init__(*args, **kwargs):
if not args:
raise TypeError("descriptor '__init__' of 'UserDict' object "
"needs an argument")
self = args[0]
args = args[1:]
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
if args:
dict = args[0]
elif 'dict' in kwargs:
dict = kwargs.pop('dict')
import warnings
warnings.warn("Passing 'dict' as keyword argument is "
"deprecated", PendingDeprecationWarning,
stacklevel=2)
else:
dict = None
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __repr__(self): return repr(self.data)
def __cmp__(self, dict):
if isinstance(dict, UserDict):
return cmp(self.data, dict.data)
else:
return cmp(self.data, dict)
__hash__ = None # Avoid Py3k warning
def __len__(self): return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def clear(self): self.data.clear()
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
def keys(self): return self.data.keys()
def items(self): return self.data.items()
def iteritems(self): return self.data.iteritems()
def iterkeys(self): return self.data.iterkeys()
def itervalues(self): return self.data.itervalues()
def values(self): return self.data.values()
def has_key(self, key): return key in self.data
def update(*args, **kwargs):
if not args:
raise TypeError("descriptor 'update' of 'UserDict' object "
"needs an argument")
self = args[0]
args = args[1:]
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
if args:
dict = args[0]
elif 'dict' in kwargs:
dict = kwargs.pop('dict')
import warnings
warnings.warn("Passing 'dict' as keyword argument is deprecated",
PendingDeprecationWarning, stacklevel=2)
else:
dict = None
if dict is None:
pass
elif isinstance(dict, UserDict):
self.data.update(dict.data)
elif isinstance(dict, type({})) or not hasattr(dict, 'items'):
self.data.update(dict)
else:
for k, v in dict.items():
self[k] = v
if len(kwargs):
self.data.update(kwargs)
def get(self, key, failobj=None):
if key not in self:
return failobj
return self[key]
def setdefault(self, key, failobj=None):
if key not in self:
self[key] = failobj
return self[key]
def pop(self, key, *args):
return self.data.pop(key, *args)
def popitem(self):
return self.data.popitem()
def __contains__(self, key):
return key in self.data
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
class IterableUserDict(UserDict):
def __iter__(self):
return iter(self.data)
import _abcoll
_abcoll.MutableMapping.register(IterableUserDict)
class DictMixin:
# Mixin defining all dictionary methods for classes that already have
# a minimum dictionary interface including getitem, setitem, delitem,
# and keys. Without knowledge of the subclass constructor, the mixin
# does not define __init__() or copy(). In addition to the four base
# methods, progressively more efficiency comes with defining
# __contains__(), __iter__(), and iteritems().
# second level definitions support higher levels
def __iter__(self):
for k in self.keys():
yield k
def has_key(self, key):
try:
self[key]
except KeyError:
return False
return True
def __contains__(self, key):
return self.has_key(key)
# third level takes advantage of second level definitions
def iteritems(self):
for k in self:
yield (k, self[k])
def iterkeys(self):
return self.__iter__()
# fourth level uses definitions from lower levels
def itervalues(self):
for _, v in self.iteritems():
yield v
def values(self):
return [v for _, v in self.iteritems()]
def items(self):
return list(self.iteritems())
def clear(self):
for key in self.keys():
del self[key]
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def pop(self, key, *args):
if len(args) > 1:
raise TypeError, "pop expected at most 2 arguments, got "\
+ repr(1 + len(args))
try:
value = self[key]
except KeyError:
if args:
return args[0]
raise
del self[key]
return value
def popitem(self):
try:
k, v = self.iteritems().next()
except StopIteration:
raise KeyError, 'container is empty'
del self[k]
return (k, v)
def update(self, other=None, **kwargs):
# Make progressively weaker assumptions about "other"
if other is None:
pass
elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups
for k, v in other.iteritems():
self[k] = v
elif hasattr(other, 'keys'):
for k in other.keys():
self[k] = other[k]
else:
for k, v in other:
self[k] = v
if kwargs:
self.update(kwargs)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __repr__(self):
return repr(dict(self.iteritems()))
def __cmp__(self, other):
if other is None:
return 1
if isinstance(other, DictMixin):
other = dict(other.iteritems())
return cmp(dict(self.iteritems()), other)
def __len__(self):
return len(self.keys())
| {
"repo_name": "nmercier/linux-cross-gcc",
"path": "win32/bin/Lib/UserDict.py",
"copies": "5",
"size": "7273",
"license": "bsd-3-clause",
"hash": 1512766881308138000,
"line_mean": 32.1455399061,
"line_max": 79,
"alpha_frac": 0.5244053348,
"autogenerated": false,
"ratio": 4.394561933534743,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005631977013070057,
"num_lines": 213
} |
"""A more or less complete user-defined wrapper around dictionary objects."""
class UserDict:
def __init__(self, dict=None, **kwargs):
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __repr__(self): return repr(self.data)
def __cmp__(self, dict):
if isinstance(dict, UserDict):
return cmp(self.data, dict.data)
else:
return cmp(self.data, dict)
def __len__(self): return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def clear(self): self.data.clear()
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
def keys(self): return self.data.keys()
def items(self): return self.data.items()
def iteritems(self): return self.data.iteritems()
def iterkeys(self): return self.data.iterkeys()
def itervalues(self): return self.data.itervalues()
def values(self): return self.data.values()
def has_key(self, key): return self.data.has_key(key)
def update(self, dict=None, **kwargs):
if dict is None:
pass
elif isinstance(dict, UserDict):
self.data.update(dict.data)
elif isinstance(dict, type({})) or not hasattr(dict, 'items'):
self.data.update(dict)
else:
for k, v in dict.items():
self[k] = v
if len(kwargs):
self.data.update(kwargs)
def get(self, key, failobj=None):
if not self.has_key(key):
return failobj
return self[key]
def setdefault(self, key, failobj=None):
if not self.has_key(key):
self[key] = failobj
return self[key]
def pop(self, key, *args):
return self.data.pop(key, *args)
def popitem(self):
return self.data.popitem()
def __contains__(self, key):
return key in self.data
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
class IterableUserDict(UserDict):
def __iter__(self):
return iter(self.data)
class DictMixin:
# Mixin defining all dictionary methods for classes that already have
# a minimum dictionary interface including getitem, setitem, delitem,
# and keys. Without knowledge of the subclass constructor, the mixin
# does not define __init__() or copy(). In addition to the four base
# methods, progressively more efficiency comes with defining
# __contains__(), __iter__(), and iteritems().
# second level definitions support higher levels
def __iter__(self):
for k in self.keys():
yield k
def has_key(self, key):
try:
value = self[key]
except KeyError:
return False
return True
def __contains__(self, key):
return self.has_key(key)
# third level takes advantage of second level definitions
def iteritems(self):
for k in self:
yield (k, self[k])
def iterkeys(self):
return self.__iter__()
# fourth level uses definitions from lower levels
def itervalues(self):
for _, v in self.iteritems():
yield v
def values(self):
return [v for _, v in self.iteritems()]
def items(self):
return list(self.iteritems())
def clear(self):
for key in self.keys():
del self[key]
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def pop(self, key, *args):
if len(args) > 1:
raise TypeError, "pop expected at most 2 arguments, got "\
+ repr(1 + len(args))
try:
value = self[key]
except KeyError:
if args:
return args[0]
raise
del self[key]
return value
def popitem(self):
try:
k, v = self.iteritems().next()
except StopIteration:
raise KeyError, 'container is empty'
del self[k]
return (k, v)
def update(self, other=None, **kwargs):
# Make progressively weaker assumptions about "other"
if other is None:
pass
elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups
for k, v in other.iteritems():
self[k] = v
elif hasattr(other, 'keys'):
for k in other.keys():
self[k] = other[k]
else:
for k, v in other:
self[k] = v
if kwargs:
self.update(kwargs)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __repr__(self):
return repr(dict(self.iteritems()))
def __cmp__(self, other):
if other is None:
return 1
if isinstance(other, DictMixin):
other = dict(other.iteritems())
return cmp(dict(self.iteritems()), other)
def __len__(self):
return len(self.keys())
| {
"repo_name": "ericlink/adms-server",
"path": "playframework-dist/play-1.1/python/Lib/UserDict.py",
"copies": "2",
"size": "5904",
"license": "mit",
"hash": -8293407340814465000,
"line_mean": 31.7371428571,
"line_max": 79,
"alpha_frac": 0.5337059621,
"autogenerated": false,
"ratio": 4.272069464544139,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5805775426644139,
"avg_score": null,
"num_lines": null
} |
"""A more or less complete user-defined wrapper around list objects."""
class UserList:
def __init__(self, initlist=None):
self.data = []
if initlist is not None:
# XXX should this accept an arbitrary sequence?
if type(initlist) == type(self.data):
self.data[:] = initlist
elif isinstance(initlist, UserList):
self.data[:] = initlist.data[:]
else:
self.data = list(initlist)
def __repr__(self): return repr(self.data)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __ne__(self, other): return self.data != self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cast(self, other):
if isinstance(other, UserList): return other.data
else: return other
def __cmp__(self, other):
raise RuntimeError, "UserList.__cmp__() is obsolete"
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
def __getitem__(self, i): return self.data[i]
def __setitem__(self, i, item): self.data[i] = item
def __delitem__(self, i): del self.data[i]
def __getslice__(self, i, j):
i = max(i, 0); j = max(j, 0)
return self.__class__(self.data[i:j])
def __setslice__(self, i, j, other):
i = max(i, 0); j = max(j, 0)
if isinstance(other, UserList):
self.data[i:j] = other.data
elif isinstance(other, type(self.data)):
self.data[i:j] = other
else:
self.data[i:j] = list(other)
def __delslice__(self, i, j):
i = max(i, 0); j = max(j, 0)
del self.data[i:j]
def __add__(self, other):
if isinstance(other, UserList):
return self.__class__(self.data + other.data)
elif isinstance(other, type(self.data)):
return self.__class__(self.data + other)
else:
return self.__class__(self.data + list(other))
def __radd__(self, other):
if isinstance(other, UserList):
return self.__class__(other.data + self.data)
elif isinstance(other, type(self.data)):
return self.__class__(other + self.data)
else:
return self.__class__(list(other) + self.data)
def __iadd__(self, other):
if isinstance(other, UserList):
self.data += other.data
elif isinstance(other, type(self.data)):
self.data += other
else:
self.data += list(other)
return self
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
return self
def append(self, item): self.data.append(item)
def insert(self, i, item): self.data.insert(i, item)
def pop(self, i=-1): return self.data.pop(i)
def remove(self, item): self.data.remove(item)
def count(self, item): return self.data.count(item)
def index(self, item): return self.data.index(item)
def reverse(self): self.data.reverse()
def sort(self, *args): apply(self.data.sort, args)
def extend(self, other):
if isinstance(other, UserList):
self.data.extend(other.data)
else:
self.data.extend(other)
| {
"repo_name": "Integral-Technology-Solutions/ConfigNOW",
"path": "Lib/UserList.py",
"copies": "4",
"size": "3526",
"license": "mit",
"hash": -8729001291695121000,
"line_mean": 40.4823529412,
"line_max": 71,
"alpha_frac": 0.5609756098,
"autogenerated": false,
"ratio": 3.6164102564102563,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008004223034525741,
"num_lines": 85
} |
"""A more or less complete user-defined wrapper around list objects."""
#Imported from Python 2.3.5 and added _fixindex
class UserList:
def __init__(self, initlist=None):
self.data = []
if initlist is not None:
# XXX should this accept an arbitrary sequence?
if type(initlist) == type(self.data):
self.data[:] = initlist
elif isinstance(initlist, UserList):
self.data[:] = initlist.data[:]
else:
self.data = list(initlist)
def __repr__(self): return repr(self.data)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __ne__(self, other): return self.data != self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cast(self, other):
if isinstance(other, UserList): return other.data
else: return other
def __cmp__(self, other):
return cmp(self.data, self.__cast(other))
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
def __getitem__(self, i): return self.data[i]
def __setitem__(self, i, item): self.data[i] = item
def __delitem__(self, i): del self.data[i]
def __getslice__(self, i, j):
i = self._fixindex(i); j = self._fixindex(j)
return self.__class__(self.data[i:j])
def __setslice__(self, i, j, other):
i = self._fixindex(i); j = self._fixindex(j)
if isinstance(other, UserList):
self.data[i:j] = other.data
elif isinstance(other, type(self.data)):
self.data[i:j] = other
else:
self.data[i:j] = list(other)
def __delslice__(self, i, j):
i = self._fixindex(i); j = self._fixindex(j)
del self.data[i:j]
def __add__(self, other):
if isinstance(other, UserList):
return self.__class__(self.data + other.data)
elif isinstance(other, type(self.data)):
return self.__class__(self.data + other)
else:
return self.__class__(self.data + list(other))
def __radd__(self, other):
if isinstance(other, UserList):
return self.__class__(other.data + self.data)
elif isinstance(other, type(self.data)):
return self.__class__(other + self.data)
else:
return self.__class__(list(other) + self.data)
def __iadd__(self, other):
if isinstance(other, UserList):
self.data += other.data
elif isinstance(other, type(self.data)):
self.data += other
else:
self.data += list(other)
return self
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
return self
def append(self, item): self.data.append(item)
def insert(self, i, item): self.data.insert(i, item)
def pop(self, i=-1): return self.data.pop(i)
def remove(self, item): self.data.remove(item)
def count(self, item): return self.data.count(item)
def index(self, item, *args): return self.data.index(item, *args)
def reverse(self): self.data.reverse()
def sort(self, *args): self.data.sort(*args)
def extend(self, other):
if isinstance(other, UserList):
self.data.extend(other.data)
else:
self.data.extend(other)
def _fixindex(self, index):
if index < 0:
index += len(self.data)
elif index > len(self.data):
index = len(self.data)
index = max(index, 0)
return index
| {
"repo_name": "smkr/pyclipse",
"path": "plugins/org.python.pydev.jython/Lib/UserList.py",
"copies": "8",
"size": "3841",
"license": "epl-1.0",
"hash": -1162657071319461000,
"line_mean": 39.8617021277,
"line_max": 71,
"alpha_frac": 0.5631346004,
"autogenerated": false,
"ratio": 3.630434782608696,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008539177151702194,
"num_lines": 94
} |
"""A more or less complete user-defined wrapper around list objects."""
class UserList:
def __init__(self, initlist=None):
self.data = []
if initlist is not None:
# XXX should this accept an arbitrary sequence?
if type(initlist) == type(self.data):
self.data[:] = initlist
elif isinstance(initlist, UserList):
self.data[:] = initlist.data[:]
else:
self.data = list(initlist)
def __repr__(self): return repr(self.data)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __ne__(self, other): return self.data != self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cast(self, other):
if isinstance(other, UserList): return other.data
else: return other
def __cmp__(self, other):
raise RuntimeError, "UserList.__cmp__() is obsolete"
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
def __getitem__(self, i): return self.data[i]
def __setitem__(self, i, item): self.data[i] = item
def __delitem__(self, i): del self.data[i]
def __getslice__(self, i, j):
i = max(i, 0); j = max(j, 0)
return self.__class__(self.data[i:j])
def __setslice__(self, i, j, other):
i = max(i, 0); j = max(j, 0)
if isinstance(other, UserList):
self.data[i:j] = other.data
elif isinstance(other, type(self.data)):
self.data[i:j] = other
else:
self.data[i:j] = list(other)
def __delslice__(self, i, j):
i = max(i, 0); j = max(j, 0)
del self.data[i:j]
def __add__(self, other):
if isinstance(other, UserList):
return self.__class__(self.data + other.data)
elif isinstance(other, type(self.data)):
return self.__class__(self.data + other)
else:
return self.__class__(self.data + list(other))
def __radd__(self, other):
if isinstance(other, UserList):
return self.__class__(other.data + self.data)
elif isinstance(other, type(self.data)):
return self.__class__(other + self.data)
else:
return self.__class__(list(other) + self.data)
def __iadd__(self, other):
if isinstance(other, UserList):
self.data += other.data
elif isinstance(other, type(self.data)):
self.data += other
else:
self.data += list(other)
return self
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
return self
def append(self, item): self.data.append(item)
def insert(self, i, item): self.data.insert(i, item)
def pop(self, i=-1): return self.data.pop(i)
def remove(self, item): self.data.remove(item)
def count(self, item): return self.data.count(item)
def index(self, item): return self.data.index(item)
def reverse(self): self.data.reverse()
def sort(self, *args): apply(self.data.sort, args)
def extend(self, other):
if isinstance(other, UserList):
self.data.extend(other.data)
else:
self.data.extend(other)
| {
"repo_name": "ai-ku/langvis",
"path": "dependencies/jython-2.1/Lib/UserList.py",
"copies": "4",
"size": "3611",
"license": "mit",
"hash": 3017948976506187000,
"line_mean": 40.4823529412,
"line_max": 71,
"alpha_frac": 0.5477707006,
"autogenerated": false,
"ratio": 3.7035897435897436,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6251360444189743,
"avg_score": null,
"num_lines": null
} |
"""A more or less complete user-defined wrapper around list objects."""
import collections
class UserList(collections.MutableSequence):
def __init__(self, initlist=None):
self.data = []
if initlist is not None:
# XXX should this accept an arbitrary sequence?
if type(initlist) == type(self.data):
self.data[:] = initlist
elif isinstance(initlist, UserList):
self.data[:] = initlist.data[:]
else:
self.data = list(initlist)
def __repr__(self): return repr(self.data)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __ne__(self, other): return self.data != self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cast(self, other):
if isinstance(other, UserList): return other.data
else: return other
def __cmp__(self, other):
return cmp(self.data, self.__cast(other))
__hash__ = None # Mutable sequence, so not hashable
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
def __getitem__(self, i): return self.data[i]
def __setitem__(self, i, item): self.data[i] = item
def __delitem__(self, i): del self.data[i]
def __getslice__(self, i, j):
i = max(i, 0); j = max(j, 0)
return self.__class__(self.data[i:j])
def __setslice__(self, i, j, other):
i = max(i, 0); j = max(j, 0)
if isinstance(other, UserList):
self.data[i:j] = other.data
elif isinstance(other, type(self.data)):
self.data[i:j] = other
else:
self.data[i:j] = list(other)
def __delslice__(self, i, j):
i = max(i, 0); j = max(j, 0)
del self.data[i:j]
def __add__(self, other):
if isinstance(other, UserList):
return self.__class__(self.data + other.data)
elif isinstance(other, type(self.data)):
return self.__class__(self.data + other)
else:
return self.__class__(self.data + list(other))
def __radd__(self, other):
if isinstance(other, UserList):
return self.__class__(other.data + self.data)
elif isinstance(other, type(self.data)):
return self.__class__(other + self.data)
else:
return self.__class__(list(other) + self.data)
def __iadd__(self, other):
if isinstance(other, UserList):
self.data += other.data
elif isinstance(other, type(self.data)):
self.data += other
else:
self.data += list(other)
return self
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
return self
def append(self, item): self.data.append(item)
def insert(self, i, item): self.data.insert(i, item)
def pop(self, i=-1): return self.data.pop(i)
def remove(self, item): self.data.remove(item)
def count(self, item): return self.data.count(item)
def index(self, item, *args): return self.data.index(item, *args)
def reverse(self): self.data.reverse()
def sort(self, *args, **kwds): self.data.sort(*args, **kwds)
def extend(self, other):
if isinstance(other, UserList):
self.data.extend(other.data)
else:
self.data.extend(other)
| {
"repo_name": "google/google-ctf",
"path": "third_party/edk2/AppPkg/Applications/Python/Python-2.7.2/Lib/UserList.py",
"copies": "67",
"size": "3732",
"license": "apache-2.0",
"hash": -1574053064305598500,
"line_mean": 40.4090909091,
"line_max": 71,
"alpha_frac": 0.551982851,
"autogenerated": false,
"ratio": 3.7023809523809526,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006897019876272493,
"num_lines": 88
} |
""" A more realistic example of exfoliate being used for web scraping.
Exfoliate was designed with web scraping in mind. Although Reddit has an API, the has
generous robots.txt permissions and also makes for a good demonstration because there are multiple
types of requests that can be made (article lists, links to the articles, comment pages per article,
metadata per article, etc).
Specifically, this script starts at HOST of the Python subreddit landing page and uses an exfoliate
client to GET the content. Then, it parses the HTML to extract relevant metadata about the list of
articles returned and requests each of those pages. It also parses out the URL for the next list
page in the subreddit and repeats this process for 5 list pages.
Aftewards, it waits on the articles to resolve. At this point, it's up to you to do something
**amazing** with the scraped data.
"""
import time
import lxml.html
import exfoliate
import pprint
START = time.time()
HOST = 'https://www.reddit.com/r/Python/'
MAX_ARTICLE_LISTS = 5 # note, be considerate!
client = exfoliate.Client()
# keep track of the list futures and the article futures separately
list_futures = exfoliate.Futures()
article_futures = exfoliate.Futures()
article_metadata_by_future = {}
# make first request
list_futures.add(client.get(HOST))
for list_future in list_futures:
if len(list_futures) >= MAX_ARTICLE_LISTS:
break
try:
response = list_future.response()
response.raise_for_status()
# parse html and extract article information
root = lxml.html.fromstring(response.content)
article_titles = root.xpath('//*[@class="top-matter"]/p[1]/a/text()')
article_urls = root.xpath('//*[@class="top-matter"]/p[1]/a/@href')
article_scores = root.xpath('//*[@class="score unvoted"]/text()')
datetimes_articles_submitted = root.xpath('//*[@class="top-matter"]/p[2]/time/@datetime')
# iterate over article details, making requests through client and saving futures and metadata
for details in zip(article_titles, article_urls, datetimes_articles_submitted, article_scores):
title, url, datetime, score = details
# skip relative links
if (url.startswith('http://') or url.startswith('https://')) == False:
continue
article_future = client.get(url)
article_futures.add(article_future)
article_metadata_by_future[article_future] = {
'title': title,
'url': url,
'when_submitted': datetime,
'score': score,
}
# request next list page
next_article_list_url, = root.xpath('//*[@class="next-button"]/a/@href')
list_futures.add(client.get(next_article_list_url))
except HTTPError:
# guard against a 429 Too Many Requests rate limiting response and attempt to wait at least
# 10 seconds or the retry-after header, if supplied
if response.status_code == 429:
retry_after = response.headers.get('retry-after', 10)
time.sleep(int(retry_after) + 1)
list_futures.add(list_future.retry())
except:
# an unknown error has occurred
list_futures.add(list_future.retry())
articles = []
for article_future in article_futures:
try:
response = article_future.response()
metadata = article_metadata_by_future[article_future]
metadata['response'] = response
articles.append(metadata)
except:
article_futures.add(article_future.retry())
pprint.pprint(articles)
STOP = time.time()
print(f'{len(articles)} articles scraped in {round(STOP - START, 1)} seconds')
| {
"repo_name": "brianjpetersen/exfoliate",
"path": "example.py",
"copies": "1",
"size": "3722",
"license": "mit",
"hash": -8225017613256445000,
"line_mean": 38.1789473684,
"line_max": 103,
"alpha_frac": 0.6698011822,
"autogenerated": false,
"ratio": 3.877083333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5046884515533333,
"avg_score": null,
"num_lines": null
} |
"""A more realtime netcat"""
import sys
import select
import socket
from curtsies import FullscreenWindow, Input, FSArray
from curtsies.formatstring import linesplit
from curtsies.fmtfuncs import blue, red, green
class Connection:
def __init__(self, sock):
self.sock = sock
self.received = []
def fileno(self):
return self.sock.fileno()
def on_read(self):
self.received.append(self.sock.recv(50))
def render(self):
return linesplit(green(''.join(s.decode('latin-1') for s in self.received)), 80) if self.received else ['']
def main(host, port):
client = socket.socket()
client.connect((host, port))
client.setblocking(False)
conn = Connection(client)
keypresses = []
with FullscreenWindow() as window:
with Input() as input_generator:
while True:
a = FSArray(10, 80)
in_text = ''.join(keypresses)[:80]
a[9:10, 0:len(in_text)] = [red(in_text)]
for i, line in zip(reversed(range(2,7)), reversed(conn.render())):
a[i:i+1, 0:len(line)] = [line]
text = 'connected to %s:%d' % (host if len(host) < 50 else host[:50]+'...', port)
a[0:1, 0:len(text)] = [blue(text)]
window.render_to_terminal(a)
ready_to_read, _, _ = select.select([conn, input_generator], [], [])
for r in ready_to_read:
if r is conn:
r.on_read()
else:
e = input_generator.send(0)
if e == '<ESC>':
return
elif e == '<Ctrl-j>':
keypresses.append('\n')
client.send((''.join(keypresses)).encode('latin-1'))
keypresses = []
elif e == '<SPACE>':
keypresses.append(' ')
elif e in ('<DELETE>', '<BACKSPACE>'):
keypresses = keypresses[:-1]
elif e is not None:
keypresses.append(e)
if __name__ == '__main__':
try:
host, port = sys.argv[1:3]
except ValueError:
print('usage: python chat.py google.com 80')
print('(if you use this example, try typing')
print('GET /')
print('and then hitting enter twice.)')
else:
main(host, int(port))
| {
"repo_name": "sebastinas/curtsies",
"path": "examples/chat.py",
"copies": "1",
"size": "2519",
"license": "mit",
"hash": 1486726391699776000,
"line_mean": 35.5072463768,
"line_max": 115,
"alpha_frac": 0.4847161572,
"autogenerated": false,
"ratio": 4.004769475357711,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4989485632557711,
"avg_score": null,
"num_lines": null
} |
# a more robust batch generator that only has one font open at the time.
from robofab.interface.all.dialogs import GetFolder
from robofab.world import RFont, OpenFont
import os
def collectSources(root):
files = []
ext = ['.vfb']
names = os.listdir(root)
for n in names:
if os.path.splitext(n)[1] in ext:
files.append(os.path.join(root, n))
return files
# A little function for making folders. we'll need it later.
def makeFolder(path):
#if the path doesn't exist, make it!
if not os.path.exists(path):
os.makedirs(path)
def makeDestination(root):
macPath = os.path.join(root, 'FabFonts', 'ForMac')
makeFolder(macPath)
return macPath
def generateOne(f, dstDir):
print("generating %s"%f.info.postscriptFullName)
f.generate('mactype1', dstDir)
f = GetFolder()
if f is not None:
paths = collectSources(f)
dstDir = makeDestination(f)
for f in paths:
font = None
try:
font = OpenFont(f)
generateOne(font, dstDir)
finally:
if font is not None:
font.close(False)
print('done') | {
"repo_name": "adrientetar/robofab",
"path": "Scripts/RoboFabUtils/RobustBatchGenerate.py",
"copies": "1",
"size": "1024",
"license": "bsd-3-clause",
"hash": 3258946483289881600,
"line_mean": 19.9183673469,
"line_max": 72,
"alpha_frac": 0.70703125,
"autogenerated": false,
"ratio": 2.852367688022284,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4059398938022284,
"avg_score": null,
"num_lines": null
} |
"""Amorphous silica surface."""
import mbuild as mb
class AmorphousSilicaSurface(mb.Compound):
"""Amorphous silica surface."""
def __init__(self, surface_roughness=1.0):
super(AmorphousSilicaSurface, self).__init__()
if surface_roughness == 1.0:
mb.load(
"amorphous_silica_sr1.0.pdb",
compound=self,
relative_to_module=self.__module__,
)
self.periodicity = (True, True, False)
self.box = mb.Box([5.4366, 4.7082, 1.0])
else:
raise ValueError(
"Amorphous silica input file with surface "
"roughness of {0:.1f} does not exist. If you have "
"this structure, please submit a pull request to "
"add it! ".format(surface_roughness)
)
count = 0
for particle in list(self.particles()):
if particle.name == "OB":
count += 1
port = mb.Port(
anchor=particle, orientation=[0, 0, 1], separation=0.1
)
self.add(port, "port_{}".format(count))
if __name__ == "__main__":
from mbuild.lib.recipes import TiledCompound
single = AmorphousSilicaSurface()
multiple = TiledCompound(single, n_tiles=(2, 1, 1), name="tiled")
multiple.save("amorphous_silica_surface.mol2")
| {
"repo_name": "mosdef-hub/mbuild",
"path": "mbuild/lib/surfaces/amorphous_silica_surface.py",
"copies": "2",
"size": "1398",
"license": "mit",
"hash": 8993806892370564,
"line_mean": 33.0975609756,
"line_max": 74,
"alpha_frac": 0.5350500715,
"autogenerated": false,
"ratio": 3.7580645161290325,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5293114587629032,
"avg_score": null,
"num_lines": null
} |
""" A mostly direct translation of rdradcp.m to python.
1/3/2013: Updated with DKR changes to rdradcp.m
"""
import sys,os,re,math
from numpy import *
import scipy.stats.stats as sp
import scipy.stats.morestats as ssm
from matplotlib.dates import date2num,num2date
import datetime
import pdb
cfac=180.0/(2**31)
def msg_print(s):
sys.stdout.write(s)
sys.stdout.flush()
def get_ens_dtype(sourceprog = 'WINRIVER'):
ens_dtype = [('mtime',float64),
('number',int32),
('pitch',float64), ('roll',float64), ('heading',float64),
('pitch_std',float64), ('roll_std',float64), ('heading_std',float64),
('depth',float64),
('temperature',float64),
('salinity',float64),
('pressure',float64),('pressure_std',float64),
('bt_mode',float64),
('bt_range',(float64,4)),
('bt_vel',(float64,4)),
('bt_corr',(float64,4)),
('bt_ampl',(float64,4)),
('bt_perc_good',(float64,4))
]
if sourceprog == 'WINRIVER':
# if cfg.sourceprog in ['WINRIVER']:
# if cfg.sourceprog in ('WINRIVER',):
ens_dtype += [('nav_mtime',float64),
('nav_longitude',float64),
('nav_latitude',float64)]
elif sourceprog == 'VMDAS':
ens_dtype += [('nav_smtime',float64),
('nav_emtime',float64),
('nav_slongitude',float64),
('nav_elongitude',float64),
('nav_slatitude',float64),
('nav_elatitude',float64),
('nav_mtime',float64)]
else:
pass
return ens_dtype
def get_bin_dtype():
# things of the shape [n_cells,n]
return [('east_vel',float64),
('north_vel',float64),
('vert_vel',float64),
('error_vel',float64),
('corr',(float64,4)),
('status',(float64,4)),
('intens',(float64,4)),
('perc_good',(float64,4))
]
class Adcp(object):
pass
#function [adcp,cfg,ens,hdr]=rdradcp(name,varargin);
#
def rdradcp(name,
num_av=5,
nens=-1, # or [start,stop] as 1-based, inclusive
baseyear=2000,
despike='no',
log_fp=None):
"""
RDRADCP Read (raw binary) RDI ADCP files,
ADCP=RDRADCP(NAME) reads the raw binary RDI BB/Workhorse ADCP file NAME and
puts all the relevant configuration and measured data into a data structure
ADCP (which is self-explanatory). This program is designed for handling data
recorded by moored instruments (primarily Workhorse-type but can also read
Broadband) and then downloaded post-deployment. For vessel-mount data I
usually make p-files (which integrate nav info and do coordinate transformations)
and then use RDPADCP.
This current version does have some handling of VMDAS, WINRIVER, and WINRIVER2 output
files, but it is still 'beta'. There are (inadequately documented) timestamps
of various kinds from VMDAS, for example, and caveat emptor on WINRIVER2 NMEA data.
[ADCP,CFG]=RDRADCP(...) returns configuration data in a
separate data structure.
Various options can be specified on input:
[..]=RDRADCP(NAME,NUMAV) averages NUMAV ensembles together in the result.
[..]=RDRADCP(NAME,NUMAV,NENS) reads only NENS ensembles (-1 for all).
[..]=RDRADCP(NAME,NUMAV,[NFIRST NEND]) reads only the specified range
of ensembles. This is useful if you want to get rid of bad data before/after
the deployment period.
Notes- sometimes the ends of files are filled with garbage. In this case you may
have to rerun things explicitly specifying how many records to read (or the
last record to read). I don't handle bad data very well. Also - in Aug/2007
I discovered that WINRIVER-2 files can have a varying number of bytes per
ensemble. Thus the estimated number of ensembles in a file (based on the
length of the first ensemble and file size) can be too high or too low.
- I don't read in absolutely every parameter stored in the binaries;
just the ones that are 'most' useful. Look through the code if
you want to get other things.
- chaining of files does not occur (i.e. read .000, .001, etc.). Sometimes
a ping is split between the end of one file and the beginning of another.
The only way to get this data is to concatentate the files, using
cat file1.000 file1.001 > file1 (unix)
copy file1.000/B+file2.001/B file3.000/B (DOS/Windows)
(as of Dec 2005 we can probably read a .001 file)
- velocity fields are always called east/north/vertical/error for all
coordinate systems even though they should be treated as
1/2/3/4 in beam coordinates etc.
String parameter/option pairs can be added after these initial parameters:
'baseyear' : Base century for BB/v8WH firmware (default to 2000).
'despike' : [ 'no' | 'yes' | 3-element vector ]
Controls ensemble averaging. With 'no' a simple mean is used
(default). With 'yes' a mean is applied to all values that fall
within a window around the median (giving some outlier rejection).
This is useful for noisy data. Window sizes are [.3 .3 .3] m/s
for [ horiz_vel vert_vel error_vel ] values. If you want to
change these values, set 'despike' to the 3-element vector.
R. Pawlowicz (rich@eos.ubc.ca) - 17/09/99
R. Pawlowicz - 17/Oct/99
5/july/00 - handled byte offsets (and mysterious 'extra" bytes) slightly better, Y2K
5/Oct/00 - bug fix - size of ens stayed 2 when NUMAV==1 due to initialization,
hopefully this is now fixed.
10/Mar/02 - #bytes per record changes mysteriously,
tried a more robust workaround. Guess that we have an extra
2 bytes if the record length is even?
28/Mar/02 - added more firmware-dependent changes to format; hopefully this
works for everything now (put previous changes on firmer footing?)
30/Mar/02 - made cfg output more intuitive by decoding things.
- An early version of WAVESMON and PARSE which split out this
data from a wave recorder inserted an extra two bytes per record.
I have removed the code to handle this but if you need it see line 509
29/Nov/02 - A change in the bottom-track block for version 4.05 (very old!).
29/Jan/03 - Status block in v4.25 150khzBB two bytes short?
14/Oct/03 - Added code to at least 'ignore' WinRiver GPS blocks.
11/Nov/03 - VMDAS navigation block, added hooks to output
navigation data.
26/Mar/04 - better decoding of nav blocks
- better handling of weird bytes at beginning and end of file
(code fixes due to Matt Drennan).
25/Aug/04 - fixes to "junk bytes" handling.
27/Jan/05 - even more fixed to junk byte handling (move 1 byte at a time rather than
two for odd lengths.
29/Sep/2005 - median windowing done slightly incorrectly in a way which biases
results in a negative way in data is *very* noisy. Now fixed.
28/Dc/2005 - redid code for recovering from ensembles that mysteriously change length, added
'checkheader' to make a complete check of ensembles.
Feb/2006 - handling of firmware version 9 (navigator)
23/Aug/2006 - more firmware updates (16.27)
23/Aug2006 - ouput some bt QC stiff
29/Oct/2006 - winriver bottom track block had errors in it - now fixed.
30/Oct/2006 - pitch_std, roll_std now uint8 and not int8 (thanks Felipe pimenta)
13/Aug/2007 - added Rio Grande (firmware v 10),
better handling of those cursed winriver ASCII NMEA blocks whose
lengths change unpredictably.
skipping the inadequately documented 2022 WINRIVER-2 NMEA block
13/Mar/2010 - firmware version 50 for WH.
31/Aug/2012 - Rusty Holleman / RMA - ported to python
Python port details:
log_fp: a file-like object - the message are the same as in the matlab code,
but this allows them to be redirected elsewhere.
"""
if log_fp is None:
log_fp = sys.stdout
def msg(s):
log_fp.write(s)
log_fp.flush()
century=baseyear # ADCP clock does not have century prior to firmware 16.05.
vels=despike # Default to simple averaging
# Check file information first
if not os.path.exists(name):
msg("ERROR******* Can't find file %s\n"%name)
return None
msg("\nOpening file %s\n\n"%name)
fd=open(name,'rb') # NB: no support at the file level for 'ieee-le'
# Read first ensemble to initialize parameters
[ens,hdr,cfg,pos]=rd_buffer(fd,-2,msg) # Initialize and read first two records
if ens is None: # ~isstruct(ens) & ens==-1,
msg("No Valid data found\n")
return None
fd.seek(pos) # Rewind
if (cfg.prog_ver<16.05 and cfg.prog_ver>5.999) or cfg.prog_ver<5.55:
msg("***** Assuming that the century begins year %d (info not in this firmware version)\n"%century)
else:
century=0 # century included in clock.
def ensemble_dates(ensx):
""" helper routine to extract dates from the given ensemble, return
as an array of datenums
"""
# handle hours, minutes, seconds, 100ths manually, but date with date2num
dats = [date2num(datetime.date(int(century+ensx.rtc[i,0]),
int(ensx.rtc[i,1]),
int(ensx.rtc[i,2]))) \
+ sum( ensx.rtc[i,3:7] * [1./24, 1./(24*60), 1./86400, 1./8640000 ])
for i in range(len(ensx.rtc))]
dats = array(dats)
return dats
dats = ensemble_dates(ens)
t_int=diff(dats)[0]
msg( "Record begins at %s\n"%( num2date(dats[0]).strftime('%c') ))
msg( "Ping interval appears to be %ss\n\n"%( 86400*t_int ))
# Estimate number of records (since I don't feel like handling EOFs correctly,
# we just don't read that far!)
# Now, this is a puzzle - it appears that this is not necessary in
# a firmware v16.12 sent to me, and I can't find any example for
# which it *is* necessary so I'm not sure why its there. It could be
# a leftoever from dealing with the bad WAVESMON/PARSE problem (now
# fixed) that inserted extra bytes.
# ...So its out for now.
#if cfg.prog_ver>=16.05, extrabytes=2 else extrabytes=0 end # Extra bytes
extrabytes=0
naminfo = os.stat(name)
nensinfile=int(naminfo.st_size/(hdr.nbyte+2+extrabytes))
msg("\nEstimating %d ensembles in this file\n"%nensinfile)
# [python] nens, if a sequence, is taken to be 1-based, inclusive indices.
# This is counter to the normal python interpretation, but instead
# consistent with the original matlab.
if isinstance(nens,int) or isinstance(nens,integer):
if nens==-1:
nens=nensinfile
msg(" Reading %d ensembles, reducing by a factor of %d\n"%(nens,num_av) )
else:
msg(" Reading ensembles %d-%d, reducing by a factor of %d\n"%(nens[0],nens[1],num_av) )
fd.seek((hdr.nbyte+2+extrabytes)*(nens[0]-1),os.SEEK_CUR)
nens=nens[1] - nens[0] + 1
# Number of records after averaging.
n=int(nens/num_av)
msg("Final result %d values\n"%n)
if num_av>1:
if type(vels) == str:
msg("\n Simple mean used for ensemble averaging\n")
else:
msg("\n Averaging after outlier rejection with parameters %s\n"%vels)
# Structure to hold all ADCP data
# Note that I am not storing all the data contained in the raw binary file, merely
# things I think are useful.
# types for the data arrays - first, the fields common to all sourceprog:
adcp = Adcp()
adcp.name = 'adcp'
adcp.config=cfg
# things of the shape [1,n]
ens_dtype = get_ens_dtype(cfg.sourceprog)
# things of the shape [n_cells,n]
bin_dtype = get_bin_dtype()
# NB: we may not actually have n ensembles - don't know until
# the whole file is actually read - so at the end of this function
# these arrays may get truncated
adcp.ensemble_data = zeros(n,dtype=ens_dtype)
adcp.bin_data = zeros((n,cfg.n_cells), dtype=bin_dtype)
# Calibration factors for backscatter data
# Loop for all records
ens = None # force it to reinitialize
for k in range(n): # [python] k switched to zero-based
# Gives display so you know something is going on...
if k%50==0:
msg("%d\n"%(k*num_av))
msg(".")
# Read an ensemble
[ens,hdr,cfg1,pos]=rd_buffer(fd,num_av,msg)
if ens is None: # ~isstruct(ens), # If aborting...
msg("Only %d records found..suggest re-running RDRADCP using this parameter\n"%( (k-1)*num_av ))
msg("(If this message preceded by a POSSIBLE PROGRAM PROBLEM message, re-run using %d)\n"%( (k-1)*num_av-1))
n = k
break
dats = ensemble_dates(ens)
adcp.ensemble_data['mtime'][k] =median(dats)
adcp.ensemble_data['number'][k] =ens.number[0]
adcp.ensemble_data['heading'][k] =ssm.circmean(ens.heading*pi/180.)*180/pi
adcp.ensemble_data['pitch'][k] =mean(ens.pitch)
adcp.ensemble_data['roll'][k] =mean(ens.roll)
adcp.ensemble_data['heading_std'][k] =mean(ens.heading_std)
adcp.ensemble_data['pitch_std'][k] =mean(ens.pitch_std)
adcp.ensemble_data['roll_std'][k] =mean(ens.roll_std)
adcp.ensemble_data['depth'][k] =mean(ens.depth)
adcp.ensemble_data['temperature'][k] =mean(ens.temperature)
adcp.ensemble_data['salinity'][k] =mean(ens.salinity)
adcp.ensemble_data['pressure'][k] =mean(ens.pressure)
adcp.ensemble_data['pressure_std'][k]=mean(ens.pressure_std)
# [python] - order of indices for bin data is opposite matlab -
# adcp.east_vel[ ensemble index, bin_index ]
if type(vels) == str:
adcp.bin_data['east_vel'][k,:] =nmean(ens.east_vel ,0) # [python] axis changed to 0-based, and switched!
adcp.bin_data['north_vel'][k,:] =nmean(ens.north_vel,0) # assume ens.east_vel[sample_index,bin_index]
adcp.bin_data['vert_vel'][k,:] =nmean(ens.vert_vel ,0)
adcp.bin_data['error_vel'][k,:] =nmean(ens.error_vel,0)
else:
adcp.bin_data['east_vel'][k,:] =nmedian(ens.east_vel ,vels[0],0)
adcp.bin_data['north_vel'][k,:] =nmedian(ens.north_vel ,vels[0],0)
adcp.bin_data['vert_vel'][k,:] =nmedian(ens.vert_vel ,vels[1],0)
adcp.bin_data['error_vel'][k,:] =nmedian(ens.error_vel ,vels[2],0)
# per-beam, per bin data -
# adcp.corr[ensemble index, bin_index, beam_index ]
adcp.bin_data['corr'][k,:,:] =nmean(ens.corr,0) # added correlation RKD 9/00
adcp.bin_data['status'][k,:,:] =nmean(ens.status,0)
adcp.bin_data['intens'][k,:,:] =nmean(ens.intens,0)
adcp.bin_data['perc_good'][k,:,:] =nmean(ens.percent,0) # felipe pimenta aug. 2006
adcp.ensemble_data['bt_range'][k,:] =nmean(ens.bt_range,0)
adcp.ensemble_data['bt_mode'][k] = nmedian(ens.bt_mode)
adcp.ensemble_data['bt_vel'][k,:] =nmean(ens.bt_vel,0)
adcp.ensemble_data['bt_corr'][k,:]=nmean(ens.bt_corr,0) # felipe pimenta aug. 2006
adcp.ensemble_data['bt_ampl'][k,:]=nmean(ens.bt_ampl,0) # "
adcp.ensemble_data['bt_perc_good'][k,:]=nmean(ens.bt_perc_good,0)# "
if cfg.sourceprog == 'WINRIVER':
#if cfg.sourceprog in ('instrument','WINRIVER'):
adcp.ensemble_data['nav_mtime'][k]=nmean(ens.smtime)
# these are sometimes nan - and note that nmean
# modifies the input, so it looks like it should
adcp.ensemble_data['nav_longitude'][k]=nmean(ens.slongitude)
adcp.ensemble_data['nav_latitude'][k]=nmean(ens.slatitude)
# DBG
#print "nmean(%s) => %s"%(ens.slongitude,adcp.ensemble_data['nav_longitude'][k])
#print "nmean(%s) => %s"%(ens.slatitude,adcp.ensemble_data['nav_latitude'][k])
# out of curiosity, does this ever happen??
#if cfg.sourceprog=='instrument' and isfinite(adcp.nav_latitude[k]) and adcp.nav_latitude[k]!=0:
# print "##################### instrument has some data ###################"
elif cfg.sourceprog == 'VMDAS':
adcp.ensemble_data['nav_smtime'][k] =ens.smtime[0]
adcp.ensemble_data['nav_emtime'][k] =ens.emtime[0]
adcp.ensemble_data['nav_slatitude'][k]=ens.slatitude[0]
adcp.ensemble_data['nav_elatitude'][k]=ens.elatitude[0]
adcp.ensemble_data['nav_slongitude'][k]=ens.slongitude[0]
adcp.ensemble_data['nav_elongitude'][k]=ens.elongitude[0]
adcp.ensemble_data['nav_mtime'][k]=nmean(ens.nmtime)
##
msg("\nRead to byte %d in a file of size %d bytes\n"%( fd.tell(),naminfo.st_size ) )
if fd.tell()+hdr.nbyte<naminfo.st_size:
msg("-->There may be another %d ensembles unread\n" % int((naminfo.st_size-fd.tell())/(hdr.nbyte+2)) )
fd.close()
if n < len(adcp.ensemble_data):
msg("Truncating data to the valid set of records\n")
adcp.ensemble_data = adcp.ensemble_data[:n]
adcp.bin_data = adcp.bin_data[:n]
# and make the fields show up more like the matlab code:
for name,typ in ens_dtype:
setattr(adcp,name,adcp.ensemble_data[name])
for name,typ in bin_dtype:
setattr(adcp,name,adcp.bin_data[name])
# and normalize the latitude/longitude naming:
adcp.latitude = None
adcp.longitude = None
if cfg:
if cfg.sourceprog == 'VMDAS':
# punting on which lat/lon fields to reference
msg("VMDAS input - assuming nav_s* fields are better than nav_e*\n")
adcp.latitude = adcp.nav_slatitude
adcp.longitude = adcp.nav_slongitude
# the arrays are there, but the values aren't set yet
#print("adcp lat/lon %f %f\n"%(adcp.latitude,adcp.longitude))
elif cfg.sourceprog in ('WINRIVER'):
adcp.latitude = adcp.nav_latitude
adcp.longitude = adcp.nav_longitude
# too early to print
#print("adcp lat/lon %f %f\n"%(adcp.latitude[0],adcp.longitude[0]))
return adcp
#----------------------------------------
#function valid=checkheader(fd)
def checkheader(fd):
""" Given an open file object, read the ensemble size, skip
ahead, make sure we can read the cfg bytes of the *next*
ensemble, come back to the starting place, and report success.
"""
valid=0
starting_pos=fd.tell()
try:
# have to use the file descriptor version, since we're just getting
# the file object, not a filename
# info = os.fstat(fd.fileno())
numbytes=fromfile(fd,int16,1) # Following the header bytes is numbytes
if len(numbytes) and numbytes[0]>0: # and we move forward numbytes>0
fd.seek(numbytes[0]-2,os.SEEK_CUR)
cfgid=fromfile(fd,uint8,2) # while return [] if hit EOF
if len(cfgid)==2: # will Skip the last ensemble (sloppy code)
# fprintf([dec2hex(cfgid(1)) ' ' dec2hex(cfgid(2)) '\n'])
if cfgid[0]==0x7F and cfgid[1]==0x7F: # and we have *another* 7F7F
valid=1 # ...ONLY THEN it is valid.
finally:
fd.seek(starting_pos)
return valid
#-------------------------------------
# function [hdr,pos]=rd_hdr(fd)
def rd_hdr(fd,msg=msg_print):
# Read config data
# Changed by Matt Brennan to skip weird stuff at BOF (apparently
# can happen when switching from one flash card to another
# in moored ADCPs).
# on failure, return hdr=None
cfgid=fromfile(fd,uint8,2)
nread=0
# departure from matlab code - check to see if cfgid itself was
# truncated at EOF
while len(cfgid)<2 or (cfgid[0] != 0x7F or cfgid[1]!=0x7F) or not checkheader(fd):
nextbyte=fromfile(fd,uint8,1)
pos=fd.tell()
nread+=1
if len(nextbyte)==0: # End of file
msg('EOF reached before finding valid cfgid\n')
hdr=None
return hdr,pos
# seems backwards, but they're the same value - 0x7F
cfgid[1],cfgid[0] = cfgid[0],nextbyte[0]
if pos % 1000==0:
msg("Still looking for valid cfgid at file position %d...\n"%pos)
#end
#end
pos=fd.tell()-2
if nread>0:
msg("Junk found at BOF...skipping %d bytes until\n"%nread )
msg("cfgid=%x %x at file pos %d\n"%(cfgid[0],cfgid[1],pos))
#end
hdr=rd_hdrseg(fd)
return hdr,pos
#-------------------------------------
#function cfg=rd_fix(fd)
def rd_fix(fd,msg=msg_print):
# Read config data
cfgid=fromfile(fd,uint16,1)
if len(cfgid) == 0:
msg("WARNING: ran into end of file reading Fixed header ID\n")
elif cfgid[0] != 0: # 0x0000
msg("WARNING: Fixed header ID %x incorrect - data corrupted or not a BB/WH raw file?\n"%cfgid[0])
#end
cfg,nbytes=rd_fixseg(fd)
return cfg
#--------------------------------------
#function [hdr,nbyte]=rd_hdrseg(fd)
class Header(object):
pass
def rd_hdrseg(fd):
# Reads a Header
hdr = Header()
hdr.nbyte =fromfile(fd,int16,1)[0]
fd.seek(1,os.SEEK_CUR)
ndat=fromfile(fd,int8,1)[0]
hdr.dat_offsets =fromfile(fd,int16,ndat)
nbyte=4+ndat*2
return hdr,nbyte
#-------------------------------------
#function opt=getopt(val,varargin)
def getopt(val,*args):
# Returns one of a list (0=first in varargin, etc.)
val = int(val) # in case it's a boolean
if val>=len(args):
return 'unknown'
else:
return args[val]
#
#-------------------------------------
# function [cfg,nbyte]=rd_fixseg(fd)
class Config(object):
pass
def rd_fixseg(fd):
""" returns Config, nbyte
Reads the configuration data from the fixed leader
"""
##disp(fread(fd,10,'uint8'))
##fseek(fd,-10,os.SEEK_CUR)
cfg = Config()
cfg.name='wh-adcp'
cfg.sourceprog='instrument' # default - depending on what data blocks are
# around we can modify this later in rd_buffer.
cfg.prog_ver =fromfile(fd,uint8,1)[0]+fromfile(fd,uint8,1)/100.0
# 8,9,16 - WH navigator
# 10 -rio grande
# 15, 17 - NB
# 19 - REMUS, or customer specific
# 11- H-ADCP
# 31 - Streampro
# 34 - NEMO
# 50 - WH, no bottom track (built on 16.31)
# 51 - WH, w/ bottom track
# 52 - WH, mariner
if int(cfg.prog_ver) in (4,5):
cfg.name='bb-adcp'
elif int(cfg.prog_ver) in (8,9,10,16,50,51,52):
cfg.name='wh-adcp'
elif int(cfg.prog_ver) in (14,23): # phase 1 and phase 2
cfg.name='os-adcp'
else:
cfg.name='unrecognized firmware version'
#end
config =fromfile(fd,uint8,2) # Coded stuff
cfg.config ="%2o-%2o"%(config[1],config[0])
cfg.beam_angle =getopt(config[1]&3,15,20,30)
# RCH: int is needed here because the equality evaluates to a boolean, but
# getopt expects an integer which can be used to index the list of arguments.
# in the expression above, config[1]&3 evaluates to an integer
cfg.numbeams =getopt( config[1]&16==16,4,5)
cfg.beam_freq =getopt(config[0]&7,75,150,300,600,1200,2400,38)
cfg.beam_pattern =getopt(config[0]&8==8,'concave','convex') # 1=convex,0=concave
cfg.orientation =getopt(config[0]&128==128,'down','up') # 1=up,0=down
## HERE -
# 8/31/12: code above here has been translated to python
# code below is still matlab.
# a few notes on the translation:
# fread(fd,count,'type') => fromfile(fd,type,count)
# note that fromfile always returns an array - so when count==1, you
# may want fromfile(...)[0] to get back a scalar value
# returns are sneaky - since matlab defines the return values at the beginning
# but python must explicitly specify return values at each return statement
# to get something like a matlab struct, declare an empty class (see Config above)
# then you can do things like cfg.simflag = 123
# RCH: fromfile returns a list, so index it with [0] to get an int
cfg.simflag =getopt(fromfile(fd,uint8,1)[0],'real','simulated') # Flag for simulated data
fd.seek(1,os.SEEK_CUR) # fseek(fd,1,'cof')
cfg.n_beams =fromfile(fd,uint8,1)[0]
cfg.n_cells =fromfile(fd,uint8,1)[0]
cfg.pings_per_ensemble=fromfile(fd,uint16,1)[0]
cfg.cell_size =fromfile(fd,uint16,1)[0]*.01 # meters
cfg.blank =fromfile(fd,uint16,1)[0]*.01 # meters
cfg.prof_mode =fromfile(fd,uint8,1)[0]
cfg.corr_threshold =fromfile(fd,uint8,1)[0]
cfg.n_codereps =fromfile(fd,uint8,1)[0]
cfg.min_pgood =fromfile(fd,uint8,1)[0]
cfg.evel_threshold =fromfile(fd,uint16,1)[0]
cfg.time_between_ping_groups = sum( fromfile(fd,uint8,3) * array([60, 1, .01]) ) # seconds
coord_sys =fromfile(fd,uint8,1)[0] # Lots of bit-mapped info
cfg.coord="%2o"%coord_sys
# just like C...
cfg.coord_sys =getopt( (coord_sys >> 3)&3,'beam','instrument','ship','earth')
# RCH: need into since it's an equality comparison which gives a boolean
cfg.use_pitchroll =getopt(coord_sys&4==4,'no','yes')
cfg.use_3beam =getopt(coord_sys&2==2,'no','yes')
cfg.bin_mapping =getopt(coord_sys&1==1,'no','yes')
cfg.xducer_misalign=fromfile(fd,int16,1)[0]*.01 # degrees
cfg.magnetic_var =fromfile(fd,int16,1)[0]*.01 # degrees
cfg.sensors_src ="%2o"%(fromfile(fd,uint8,1)[0])
cfg.sensors_avail ="%2o"%(fromfile(fd,uint8,1)[0])
cfg.bin1_dist =fromfile(fd,uint16,1)[0]*.01 # meters
cfg.xmit_pulse =fromfile(fd,uint16,1)[0]*.01 # meters
cfg.water_ref_cells=fromfile(fd,uint8,2)
cfg.fls_target_threshold =fromfile(fd,uint8,1)[0]
fd.seek(1,os.SEEK_CUR) # fseek(fd,1,'cof')
cfg.xmit_lag =fromfile(fd,uint16,1)[0]*.01 # meters
nbyte=40
if int(cfg.prog_ver) in (8,10,16,50,51,52):
if cfg.prog_ver>=8.14: # Added CPU serial number with v8.14
cfg.serialnum =fromfile(fd,uint8,8)
nbyte+=8
#end
if cfg.prog_ver>=8.24: # Added 2 more :w bytes with v8.24 firmware
cfg.sysbandwidth =fromfile(fd,uint8,2)
nbyte+=2
#end
if cfg.prog_ver>=16.05: # Added 1 more bytes with v16.05 firmware
cfg.syspower =fromfile(fd,uint8,1)[0]
nbyte+=1
#end
if cfg.prog_ver>=16.27: # Added bytes for REMUS, navigators, and HADCP
cfg.navigator_basefreqindex=fromfile(fd,uint8,1)[0]
nbyte+=1
cfg.remus_serialnum=fromfile(fd,uint8,4)
nbyte+=4
cfg.h_adcp_beam_angle=fromfile(fd,uint8,1)[0]
nbyte+=1
#end
elif int(cfg.prog_ver)==9:
if cfg.prog_ver>=9.10: # Added CPU serial number with v8.14
cfg.serialnum =fromfile(fd,uint8,8)
nbyte+=8
cfg.sysbandwidth =fromfile(fd,uint8,2)
nbyte+=2
end
elif int(cfg.prog_ver) in (14,16):
cfg.serialnum =fromfile(fd,uint8,8) # 8 bytes 'reserved'
nbyte+=8
# It is useful to have this precomputed.
cfg.ranges=cfg.bin1_dist+arange(cfg.n_cells)*cfg.cell_size
if cfg.orientation==1:
cfg.ranges *= -1
return cfg,nbyte
#-----------------------------
#function [ens,hdr,cfg,pos]=rd_buffer(fd,num_av)
ens_alloc = None
ens_alloc_num_av = None
hdr = None
FIXOFFSET = None
SOURCE = None
def rd_buffer(fd,num_av,msg=msg_print):
""" RH: return ens=None, hdr=None if there's a problem
returns [ens,hdr,cfg,pos]
"""
# To save it being re-initialized every time.
# [python] cache the preallocated array in ens_alloc, and remember
# what num_av was, so we can reallocate when called with a different num_av.
# otherwise global/local is too confusing, as other parts of the code use
# ens both for a local variable and a global variable, or kind of appear to do
# so.
global ens_alloc,ens_alloc_num_av, hdr
pos = None
# A fudge to try and read files not handled quite right.
global FIXOFFSET, SOURCE
# If num_av<0 we are reading only 1 element and initializing
if num_av<0:
SOURCE=0
class Ensemble(object):
pass
cfg=None
ens=None
if num_av == ens_alloc_num_av:
ens = ens_alloc
# This reinitializes to whatever length of ens we want to average.
if num_av<0 or ens is None:
FIXOFFSET=0
n=abs(num_av)
[hdr,pos]=rd_hdr(fd,msg)
if hdr is None:
return ens,hdr,cfg,pos
cfg=rd_fix(fd,msg)
fd.seek(pos,os.SEEK_SET)
ens_dtype = [('number',float64),
('rtc',(float64,7)),
('BIT',float64),
('ssp',float64),
('depth',float64),
('pitch',float64),
('roll',float64),
('heading',float64),
('temperature',float64),
('salinity',float64),
('mpt',float64),
('heading_std',float64),
('pitch_std',float64),
('roll_std',float64),
('adc',(float64,8)),
('error_status_wd',float64),
('pressure',float64),
('pressure_std',float64),
('east_vel',(float64,cfg.n_cells)),
('north_vel',(float64,cfg.n_cells)),
('vert_vel',(float64,cfg.n_cells)),
('error_vel',(float64,cfg.n_cells)),
('intens',(float64,(cfg.n_cells,4))),
('percent',(float64,(cfg.n_cells,4))),
('corr',(float64,(cfg.n_cells,4))),
('status',(float64,(cfg.n_cells,4))),
('bt_mode',float64),
('bt_range',(float64,4)),
('bt_vel',(float64,4)),
('bt_corr',(float64,4)),
('bt_ampl',(float64,4)),
('bt_perc_good',(float64,4)),
('smtime',float64),
('emtime',float64),
('slatitude',float64),
('slongitude',float64),
('elatitude',float64),
('elongitude',float64),
('nmtime',float64),
('flags',float64) ]
ens = Ensemble()
ens.ensemble_data = zeros( n, dtype=ens_dtype)
for name,typ in ens_dtype:
setattr(ens,name,ens.ensemble_data[name])
ens_alloc = ens
ens_alloc_num_av = num_av
num_av=abs(num_av)
k=-1 # a bit tricky - it gets incremented at the beginning of an ensemble
while k+1<num_av:
# This is in case junk appears in the middle of a file.
num_search=6000
id1=fromfile(fd,uint8,2)
search_cnt=0
while search_cnt<num_search and \
((id1[0]!=0x7F or id1[1]!=0x7F ) or not checkheader(fd)):
search_cnt+=1
nextbyte=fromfile(fd,uint8,1)
if len(nextbyte)==0: # End of file
msg("EOF reached after %d bytes searched for next valid ensemble start\n"%search_cnt)
ens=None
return ens,hdr,cfg,pos
id1[1]=id1[0]
id1[0]=nextbyte[0]
# fprintf([dec2hex(id1(1)) '--' dec2hex(id1(2)) '\n'])
if search_cnt==num_search:
print "ERROR: Searched %d entries..."%search_cnt
print "Not a workhorse/broadband file or bad data encountered: -> %x%x"%(id1[0],id1[1])
ens = None
return ens,hdr,cfg,pos
elif search_cnt>0:
msg("Searched %d bytes to find next valid ensemble start\n"%search_cnt)
startpos=fd.tell()-2 # Starting position.
# Read the # data types.
[hdr,nbyte]=rd_hdrseg(fd)
byte_offset=nbyte+2
## fprintf('# data types = %d\n ',(length(hdr.dat_offsets)))
## fprintf('Blocklen = %d\n ',hdr.nbyte)
# Read all the data types.
for n in range(len(hdr.dat_offsets)): # n: 1 => 0 based
id_="%04X"%fromfile(fd,uint16,1)[0]
# DBG - second time through the loop we're 3 bytes short.
# print "ID=%s ftell=%d n=%d"%(id_,fd.tell(),n)
# DBG:
#if fd.tell() == 3957:
# pdb.set_trace()
# handle all the various segments of data. Note that since I read the IDs as a two
# byte number in little-endian order the high and low bytes are exchanged compared to
# the values given in the manual.
#
winrivprob=0
#print("n,id = %d %s\n"%(n,id_))
if id_ == '0000':
# case '0000', # Fixed leader
[cfg,nbyte]=rd_fixseg(fd)
nbyte+=2
elif id_ == '0080': # Variable Leader
# So I think that we need to increment k here, as this marks the
# beginning of a record, but we want k to remain 0-based, so above
# it was initialized to -1 (just as in the matlab code it is initialized
# to 0).
k+=1
ens.number[k] =fromfile(fd,uint16,1)[0]
ens.rtc[k,:] =fromfile(fd,uint8,7)
ens.number[k] =ens.number[k]+65536*fromfile(fd,uint8,1)[0]
ens.BIT[k] =fromfile(fd,uint16,1)[0]
ens.ssp[k] =fromfile(fd,uint16,1)[0]
ens.depth[k] =fromfile(fd,uint16,1)[0]*.1 # meters
ens.heading[k] =fromfile(fd,uint16,1)[0]*.01 # degrees
ens.pitch[k] =fromfile(fd,int16,1)[0]*.01 # degrees
ens.roll[k] =fromfile(fd,int16,1)[0]*.01 # degrees
ens.salinity[k] =fromfile(fd,int16,1)[0] # PSU
ens.temperature[k] =fromfile(fd,int16,1)[0]*.01 # Deg C
ens.mpt[k] =sum( fromfile(fd,uint8,3) * array([60,1,.01])) # seconds
ens.heading_std[k] =fromfile(fd,uint8,1)[0] # degrees
ens.pitch_std[k] =fromfile(fd,uint8,1)[0]*.1 # degrees
ens.roll_std[k] =fromfile(fd,uint8,1)[0]*.1 # degrees
ens.adc[k,:] =fromfile(fd,uint8,8)
nbyte=2+40
if cfg.name =='bb-adcp':
if cfg.prog_ver>=5.55:
fd.seek(15,os.SEEK_CUR) # 14 zeros and one byte for number WM4 bytes
cent=fromfile(fd,uint8,1)[0] # possibly also for 5.55-5.58 but
ens.rtc[k,:] = fromfile(fd,uint8,7) # I have no data to test.
ens.rtc[k,0] += cent*100
nbyte+=15+8
# end
elif cfg.name == 'wh-adcp': # for WH versions.
ens.error_status_wd[k]=fromfile(fd,uint32,1)[0]
nbyte+=4
if int(cfg.prog_ver) in (8,10,16,50,51,52):
if cfg.prog_ver>=8.13: # Added pressure sensor stuff in 8.13
fd.seek(2,os.SEEK_CUR)
ens.pressure[k] =fromfile(fd,uint32,1)[0]
ens.pressure_std[k] =fromfile(fd,uint32,1)[0]
nbyte+=10
# end
if cfg.prog_ver>=8.24: # Spare byte added 8.24
fd.seek(1,os.SEEK_CUR)
nbyte+=1
# end
if ( cfg.prog_ver>=10.01 and cfg.prog_ver<=10.99 ) or \
cfg.prog_ver>=16.05: # Added more fields with century in clock 16.05
cent=fromfile(fd,uint8,1)[0]
ens.rtc[k,:]=fromfile(fd,uint8,7)
ens.rtc[k,0]+=cent*100
nbyte+=8
# end
elif int(cfg.prog_ver)==9:
fd.seek(2,os.SEEK_CUR)
ens.pressure[k] =fromfile(fd,uint32,1)[0]
ens.pressure_std[k] =fromfile(fd,uint32,1)[0]
nbyte+=10
if cfg.prog_ver>=9.10: # Spare byte added 8.24
fd.seek(1,os.SEEK_CUR)
nbyte+=1
# end
# end
elif cfg.name=='os-adcp':
fd.seek(16,os.SEEK_CUR) # 30 bytes all set to zero, 14 read above
nbyte+=16
if cfg.prog_ver>23:
fd.seek(2,os.SEEK_CUR)
nbyte+=2
#end
#end
elif id_ == '0100': # Velocities
# RCH: will need to check array ordering on these - may have rows/cols
# switched!
vels=fromfile(fd,int16,4*cfg.n_cells).reshape([cfg.n_cells,4]) * 0.001 # m/s
ens.east_vel[k,:] =vels[:,0]
ens.north_vel[k,:] =vels[:,1]
ens.vert_vel[k,:] =vels[:,2]
ens.error_vel[k,:] =vels[:,3]
nbyte=2+4*cfg.n_cells*2
elif id_ == '0200': # Correlations
# RCH check array ordering:
ens.corr[k,:,:] =fromfile(fd,uint8,4*cfg.n_cells).reshape([cfg.n_cells,4])
nbyte=2+4*cfg.n_cells
elif id_ == '0300': # Echo Intensities
# RCH check array ordering:
ens.intens[k,:,:] =fromfile(fd,uint8,4*cfg.n_cells).reshape([cfg.n_cells,4])
nbyte=2+4*cfg.n_cells
elif id_ == '0400': # Percent good
ens.percent[k,:,:] =fromfile(fd,uint8,4*cfg.n_cells).reshape([cfg.n_cells,4])
nbyte=2+4*cfg.n_cells
elif id_ == '0500': # Status
# RESUME TRANSLATION HERE
# Rusty, I was not consistent about retaining "end" statements
# I noticed after deleting several that you had been keeping
# commented out versions.
if cfg.name=='os-adcp':
# fd.seek(00,os.SEEK_CUR) # zero seek is in the original matlab...
nbyte=2 # +00
else:
# Note in one case with a 4.25 firmware SC-BB, it seems like
# this block was actually two bytes short!
ens.status[k,:,:] =fromfile(fd,uint8,4*cfg.n_cells).reshape([cfg.n_cells,4])
nbyte=2+4*cfg.n_cells
elif id_ == '0600': # Bottom track
# In WINRIVER GPS data is tucked into here in odd ways, as long
# as GPS is enabled.
if SOURCE==2:
fd.seek(2,os.SEEK_CUR)
# Rusty, I added the [0] below and in several other places
long1=fromfile(fd,uint16,1)[0]
# added bt mode extraction - ben
fd.seek(3,os.SEEK_CUR)
ens.bt_mode[k] = float64(fromfile(fd,uint8,1)[0]) # fromfile(fd,uint8,1)[0]
fd.seek(2,os.SEEK_CUR)
#fd.seek(6,os.SEEK_CUR)
ens.slatitude[k] =fromfile(fd,int32,1)[0]*cfac
if ens.slatitude[k]==0:
ens.slatitude[k]=nan
else:
#fd.seek(14,os.SEEK_CUR) # Skip over a bunch of stuff
fd.seek(7,os.SEEK_CUR) # Skip over a bunch of stuff
ens.bt_mode[k] = float64(fromfile(fd,uint8,1)[0])
fd.seek(6,os.SEEK_CUR) # Skip over a bunch of stuff
# end
ens.bt_range[k,:]=fromfile(fd,uint16,4)*.01 #
ens.bt_vel[k,:] =fromfile(fd,int16,4)
ens.bt_corr[k,:] =fromfile(fd,uint8,4) # felipe pimenta aug. 2006
ens.bt_ampl[k,:]=fromfile(fd,uint8,4) # "
ens.bt_perc_good[k,:]=fromfile(fd,uint8,4) # "
if SOURCE==2:
fd.seek(2,os.SEEK_CUR)
# The original rdradcp code:
# ens.slongitude[k]=(long1+65536*fromfile(fd,uint16,1)[0])*cfac
# Fix from DKR:
tmp=(long1+65536*fromfile(fd,uint16,1)[0])*cfac
if long1==0:
ens.slongitude[k]=nan #dkr --> else longitudes bad
else:
ens.slongitude[k]=tmp
#end
#fprintf('\n k %d %8.3f %f ',long1,ens.slongitude(k),(ens.slongitude(k)/cfac-long1)/65536)
if ens.slongitude[k]>180:
ens.slongitude[k]=ens.slongitude[k]-360
if ens.slongitude[k]==0:
ens.slongitude[k]=nan
fd.seek(16,os.SEEK_CUR)
qual=fromfile(fd,uint8,1)
if qual==0:
## fprintf('qual==%d,%f %f',qual,ens.slatitude(k),ens.slongitude(k))
ens.slatitude[k]=nan
ens.slongitude[k]=nan
fd.seek(71-45-21,os.SEEK_CUR)
else:
fd.seek(71-45,os.SEEK_CUR)
# end
nbyte=2+68
if cfg.prog_ver>=5.3: # Version 4.05 firmware seems to be missing these last 11 bytes.
fd.seek(78-71,os.SEEK_CUR)
ens.bt_range[k,:]=ens.bt_range[k,:]+fromfile(fd,uint8,4)*655.36
nbyte+=11
if cfg.name == 'wh-adcp':
if cfg.prog_ver>=16.20: # RDI documentation claims these extra bytes were added in v 8.17
fd.seek(4,os.SEEK_CUR) # but they don't appear in my 8.33 data - conversation with
nbyte+=4 # Egil suggests they were added in 16.20
#end
#end
#end
# end # id_==0600 # bottom track
elif id_ == '2000': # Something from VMDAS.
# The raw files produced by VMDAS contain a binary navigation data
# block.
cfg.sourceprog='VMDAS'
if SOURCE != 1:
msg("\n***** Apparently a VMDAS file \n")
#end
SOURCE=1
utim =fromfile(fd,uint8,4)
mtime =datenum(utim[3]+utim[4]*256,utim[2],utim[1])
ens.smtime[k] =mtime+fromfile(fd,uint32,1)[0]/8640000.
fd.seek(4,os.SEEK_CUR) # PC clock offset from UTC
ens.slatitude[k] =fromfile(fd,int32,1)[0]*cfac
ens.slongitude[k] =fromfile(fd,int32,1)[0]*cfac
ens.emtime[k] =mtime+fromfile(fd,uint32,1)[0]/8640000.
ens.elatitude[k] =fromfile(fd,int32,1)[0]*cfac
ens.elongitude[k] =fromfile(fd,int32,1)[0]*cfac
fd.seek(12,os.SEEK_CUR)
ens.flags[k] =fromfile(fd,uint16,1)[0]
fd.seek(6,os.SEEK_CUR)
utim =fromfile(fd,uint8,4)
mtime =datenum(utim(1)+utim(2)*256,utim(4),utim(3))
ens.nmtime[k] =mtime+fromfile(fd,uint32,1)[0]/8640000.
# in here we have 'ADCP clock' (not sure how this
# differs from RTC (in header) and UTC (earlier in this block).
fd.seek(16,os.SEEK_CUR)
nbyte=2+76
elif id_ == '2022': # New NMEA data block from WInRiverII
cfg.sourceprog='WINRIVER2'
if SOURCE != 2:
msg("\n***** Apparently a WINRIVER file - Raw NMEA data handler not yet implemented\n")
#end
SOURCE=2
specID=fromfile(fd,uint16,1)[0]
msgsiz=fromfile(fd,int16,1)[0]
deltaT=fromfile(fd,uchar,8)
nbyte=2+12
fd.seek(msgsiz,os.SEEK_CUR)
nbyte+=msgsiz
# print "post msgsiz, nbyte=%d"%nbyte
## do nothing code on specID
# fprintf(' %d ',specID)
# switch specID,
# case 100,
# case 101,
# case 102,
# case 103,
# end
# The following blocks come from WINRIVER files, they aparently contain
# the raw NMEA data received from a serial port.
#
# Note that for WINRIVER files somewhat decoded data is also available
# tucked into the bottom track block.
#
# I've put these all into their own block because RDI's software apparently completely ignores the
# stated lengths of these blocks and they very often have to be changed. Rather than relying on the
# error coding at the end of the main block to do this (and to produce an error message) I will
# do it here, without an error message to emphasize that I am kludging the WINRIVER blocks only!
elif id_ in ('2100','2101','2102','2103','2104'):
winrivprob=1
if id_ == '2100': # $xxDBT (Winriver addition) 38
cfg.sourceprog='WINRIVER'
if SOURCE != 2:
msg("\n***** Apparently a WINRIVER file - Raw NMEA data handler not yet implemented\n")
SOURCE=2
str_=fd.read(38) # fromfile(fd,uchar,38)
nbyte=2+38
elif id_ == '2101': # $xxGGA (Winriver addition) 94 in manual but 97 seems to work
# Except for a winriver2 file which seems to use 77.
cfg.sourceprog='WINRIVER'
if SOURCE != 2:
msg("\n***** Apparently a WINRIVER file - Raw NMEA data handler not yet implemented\n")
SOURCE=2
str_=fd.read(97) # setstr(fromfile(fd,uchar,97))
nbyte=2+97
l = str_.find('$GPGGA')
if l >= 0:
# original indices: str(l+7:l+8) str(l+9:l+10) str(l+11:l+12)
# but we are both zero-based, and ending index is exclusive...
# but since l is already zero-based instead of 1 based, we only have to change
# the ending indexes in each case.
try:
# occasionally the GPS will have logged an incomplete reading -
# and this may fail.
hh,mm,ss = int(str_[l+7:l+9]), int(str_[l+9:l+11]), float(str_[l+11:l+13])
ens.smtime[k]=(hh+(mm+ss/60.)/60.)/24.
except ValueError:
msg('Corrupt GPS string - skipping')
# disp(['->' setstr_(str_(1:50)) '<-'])
elif id_ == '2102': # $xxVTG (Winriver addition) 45 (but sometimes 46 and 48)
cfg.sourceprog='WINRIVER'
if SOURCE != 2:
msg("\n***** Apparently a WINRIVER file - Raw NMEA data handler not yet implemented\n")
#end
SOURCE=2
str_=fd.read(45)
nbyte=2+45
#disp(setstr(str_))
elif id_ == '2103': # $xxGSA (Winriver addition) 60
cfg.sourceprog='WINRIVER'
if SOURCE != 2:
msg("\n***** Apparently a WINRIVER file - Raw NMEA data handler not yet implemented\n")
#end
SOURCE=2
str_=fd.read(60)
nbyte=2+60
elif id_ == '2104': #xxHDT or HDG (Winriver addition) 38
cfg.sourceprog='WINRIVER'
if SOURCE != 2:
msg("\n***** Apparently a WINRIVER file - Raw NMEA data handler not yet implemented\n")
#end
SOURCE=2
str_=fd.read(38)
nbyte=2+38
elif id_ == '0701': # Number of good pings
fd.seek(4*cfg.n_cells,os.SEEK_CUR)
nbyte=2+4*cfg.n_cells
elif id_ == '0702': # Sum of squared velocities
fd.seek(4*cfg.n_cells,os.SEEK_CUR)
nbyte=2+4*cfg.n_cells
elif id_ == '0703': # Sum of velocities
fd.seek(4*cfg.n_cells,os.SEEK_CUR)
nbyte=2+4*cfg.n_cells
# These blocks were implemented for 5-beam systems
elif id_ == '0A00': # Beam 5 velocity (not implemented)
fd.seek(cfg.n_cells,os.SEEK_CUR)
nbyte=2+cfg.n_cells
elif id_ == '0301': # Beam 5 Number of good pings (not implemented)
fd.seek(cfg.n_cells,os.SEEK_CUR)
nbyte=2+cfg.n_cells
elif id_ == '0302': # Beam 5 Sum of squared velocities (not implemented)
fd.seek(cfg.n_cells,os.SEEK_CUR)
nbyte=2+cfg.n_cells
elif id_ == '0303': # Beam 5 Sum of velocities (not implemented)
fd.seek(cfg.n_cells,os.SEEK_CUR)
nbyte=2+cfg.n_cells
elif id_ == '020C': # Ambient sound profile (not implemented)
fd.seek(4,os.SEEK_CUR)
nbyte=2+4
elif id_ == '3000': # Fixed attitude data format for OS-ADCPs (not implemented)
fd.seek(32,os.SEEK_CUR)
nbyte=2+32
else:
# This is pretty idiotic - for OS-ADCPs (phase 2) they suddenly decided to code
# the number of bytes into the header ID word. And then they don't really
# document what they did! So, this is cruft of a high order, and although
# it works on the one example I have - caveat emptor....
#
# Anyway, there appear to be codes 0340-03FC to deal with. I am not going to
# decode them but I am going to try to figure out how many bytes to
# skip.
#if strcmp(id(1:2),'30'),
if id_[:2] == '30':
# I want to count the number of 1s in the middle 4 bits of the
# 2nd two bytes.
nflds= bin( int(id_[2:4],16) & 0x3C ).count('1')
# I want to count the number of 1s in the highest 2 bits of byte 3
dfac = bin(int(id_[2],16)&0x0C).count('1')
fd.seek(12*nflds*dfac,os.SEEK_CUR)
nbyte=2+12*nflds*dfac
else:
msg( "Unrecognized ID code: %s"%id_ )
# DBG:
#raise Exception,"STOP"
nbyte=2
ens = None
return ens,hdr,cfg,pos
## ens=-1
##
#end
# here I adjust the number of bytes so I am sure to begin
# reading at the next valid offset. If everything is working right I shouldn't have
# to do this but every so often firware changes result in some differences.
# print '#bytes is %d, original offset is %d'%(nbyte,byte_offset)
byte_offset=byte_offset+nbyte
# both n and hdr.dat_offsets are now 0-based, but len() is unchanged - so be
# careful on comparisons to len(hdr.dat_offsets)
if n+1<len(hdr.dat_offsets):
if hdr.dat_offsets[n+1] != byte_offset:
if not winrivprob:
msg("%s: Adjust location by %d\n"%(id_,hdr.dat_offsets[n+1]-byte_offset) )
fd.seek(hdr.dat_offsets[n+1]-byte_offset,os.SEEK_CUR)
#end
byte_offset=hdr.dat_offsets[n+1]
else:
if hdr.nbyte-2 != byte_offset:
if not winrivprob:
msg("%s: Adjust location by %d\n"%(id_,hdr.nbyte-2-byte_offset))
fd.seek(hdr.nbyte-2-byte_offset,os.SEEK_CUR)
#end
byte_offset=hdr.nbyte-2
#end
#end
# Now at the end of the record we have two reserved bytes, followed
# by a two-byte checksum = 4 bytes to skip over.
readbytes=fd.tell()-startpos
offset=(hdr.nbyte+2)-byte_offset # The 2 is for the checksum
if offset !=4 and FIXOFFSET==0:
# in python, no direct test for eof (annoying), so step back one byte,
# and try to read it. not sure that this will do the right thing if the
# last thing we did was a failed read - it definitely works if the last thing
# was a bad seek
fd.seek(-1,os.SEEK_CUR)
feof = len(fd.read(1)) == 0
msg("\n*****************************************************\n")
if feof:
msg("EOF reached unexpectedly - discarding this last ensemble\n")
ens=-1
else:
msg("Adjust location by %d (readbytes=%d, hdr.nbyte=%d)\n"%(offset,readbytes,hdr.nbyte))
msg(" NOTE - If this appears at the beginning of the read, it is\n")
msg(" is a program problem, possibly fixed by a fudge\n")
msg(" PLEASE REPORT TO rich@eos.ubc.ca WITH DETAILS!!\n\n")
msg(" -If this appears at the end of the file it means\n")
msg(" The file is corrupted and only a partial record has \n ")
msg(" has been read\n")
#end
msg("******************************************************\n")
FIXOFFSET=offset-4
#end
fd.seek(4+FIXOFFSET,os.SEEK_CUR)
# An early version of WAVESMON and PARSE contained a bug which stuck an additional two
# bytes in these files, but they really shouldn't be there
#if cfg.prog_ver>=16.05,
# fd.seek(2,os.SEEK_CUR)
#end
#end
# Blank out stuff bigger than error velocity
# big_err=abs(ens.error_vel)>.2
# big_err=0
# Blank out invalid data
# RCH: removed big_err references
ens.east_vel[ens.east_vel==-32.768]=nan
ens.north_vel[ens.north_vel==-32.768]=nan
ens.vert_vel[ens.vert_vel==-32.768]=nan
ens.error_vel[ens.error_vel==-32.768]=nan
return ens,hdr,cfg,pos
#--------------------------------------
#function y=nmedian(x,window,dim)
def nmedian(x,window=inf,dim=None):
# python: dim is 0-based now!
# Copied from median but with handling of NaN different.
# RH: assume that this means calculate median of x, ignoring
# nans, along the axis given by dim
# window means only consider values which are within window
# of the median in the median (circular, yes)
x = array(x) # probably not necessary
if dim is None:
# choose dim to be the first non-unity dimension of x
long_dims = [d for d in range(x.ndim) if x.shape[d]>1]
# and if none are long, revert to summing over 0
dim = (long_dims + [0])[0]
#end
# Depart slightly from the original matlab for dealing with
# the case when dim>=x.ndim. Make x one dimension bigger,
# and set dim to be that. Then all the computations are simpler,
# and if necessary the dimensions can be remangled at the end
orig_dim = dim
if dim>=x.ndim:
dim = x.ndim
x = x[...,None]
# The original shape of x, but with _one_ extra dimension if
# dim was beyond the original size of x
shape_with_dimpad = x.shape
# siz = x.shape # no longer need to explicitly do this.
n = x.shape[dim]
# Permute and reshape so that DIM becomes the row dimension of a 2-D array
# basically rotate the dimensions so that dim becomes the first:
perm = (arange(x.ndim) - dim) % x.ndim
unperm = (arange(x.ndim) + dim) % x.ndim
# x = reshape(permute(x,perm), n,prod(siz)/n)
x = x.transpose(perm)
dims_permuted = x.shape
x = x.reshape( (n,-1) )
# Sort along first dimension
x.sort(axis=0) # in place sort, and puts nan at the end, while -inf,inf are in order.
[n1,n2]=x.shape
if n1==1: # each column has only one row - no stats to be taken, just copy.
y=x
else:
if n2==1:
# summing booleans is safe -
kk=sum(isfinite(x))
if kk > 0:
# x1,x2: if kk is even, the two middle elements
# if kk is odd, both are set to the middle element
x1=x[ int((kk-1)/2) ]
x2=x[ int(kk/2) ]
deviations = abs(x-(x1+x2)/2.)
x[deviations>window]=nan
#end
x.sort(axis=0)
# repeat once since we may have nan'd some values.
kk=sum(isfinite(x))
x[isnan(x)]=0
y=NaN
if kk>0:
y=sum(x)/kk
#end
else:
# count finite values in each column
kk=sum(isfinite(x),axis=0)
ll = kk<n1-2 # ll is true for rows with at least 2 nans
kk[ll]=0 ; x[:,ll]=nan # in those cases, the answer is nan. seems harsh.
# whoa - presumably want to pull the middle two valid values from
# each row
low_median = ((kk-1)/2).clip(0,inf).astype(int32)
high_median = (kk/2).clip(0,inf).astype(int32)
x1=x[ low_median, range(n2)]
x2=x[ high_median, range(n2)]
# x1,x2 have to get the extra dimension for the broadcast to work
deviations = abs(x - (x1+x2)[None,...]/2.)
x[deviations>window]=nan
x.sort(axis=0)
kk=sum(isfinite(x),axis=0)
x[ isnan(x) ]=0
y=nan+ones(n2)
if any(kk):
valid = kk>0
y[valid] = sum(x[:,valid],axis=0) / kk[valid]
# end
#end
#end
# Now we have y, which has shape x.shape[1:]
# make that back into the shape of x, first by undoing
# the reshape (recalling that we squished the first dimension of x)
y_dims_permuted = list(dims_permuted)
y_dims_permuted[0] = 1
y = y.reshape(y_dims_permuted)
# and then undoing the permute:
y = y.transpose(unperm)
# and finally, pad out some one-entry dimensions in case the user requested
# dim>x.ndim
while x.ndim <= orig_dim:
x = x[...,None]
# note that this will leave a 1 entry dimension along the axis
# of the median - unlike normal functions which lose that dimension
return y
#--------------------------------------
#function y=nmean(x,dim)
def nmean(x,dim=None):
# R_NMEAN Computes the mean of matrix ignoring NaN
# values
# R_NMEAN(X,DIM) takes the mean along the dimension DIM of X.
#
xorig = x
x=x.copy() # to get matlab semantics
kk=isfinite(x)
x[~kk]=0
if dim is None:
# choose dim to be the first non-unity dimension of x
long_dims = [d for d in range(x.ndim) if x.shape[d]>1]
# and if none are long, revert to summing over 0
dim = (long_dims + [0])[0]
#end
if dim >= x.ndim:
y=x # For matlab 5.0 only!!! Later versions have a fixed 'sum'
else:
# it's possible that x is just a vector - in which case
# this sum will return a scalar
ndat=atleast_1d( sum(kk,axis=dim) )
indat=(ndat==0)
# If there are no good data then it doesn't matter what
# we average by - and this avoid div-by-zero warnings.
ndat[indat]=1
# division is always elementwise in numpy
y = atleast_1d(sum(x,axis=dim))/ndat.astype(float64)
y[indat]=nan
#end
return y
| {
"repo_name": "esatel/ADCPy",
"path": "adcpy/rdradcp.py",
"copies": "1",
"size": "62286",
"license": "mit",
"hash": -3457608987792429600,
"line_mean": 41.6616438356,
"line_max": 120,
"alpha_frac": 0.5280319815,
"autogenerated": false,
"ratio": 3.5476448140342884,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45756767955342886,
"avg_score": null,
"num_lines": null
} |
""" A mostly-general Traits UI editor for viewing things in TVTK scenes.
"""
# Authors: Robert Kern <robert.kern [at] gmail.com>
# Prabhu Ramachandran <prabhu [at] aero.iitb.ac.in>
# Copyright (c) 2007, Enthought, Inc.
# License: BSD Style.
# Major library imports.
import os
from pyface.qt import QtGui
# Enthought library imports.
from traits.api import Any, Bool, Callable, Dict, Str
from traitsui.qt4.editor import Editor
from traitsui.basic_editor_factory import BasicEditorFactory
from decorated_scene import DecoratedScene
#####################################################################
# `_ActorEditor` class
#####################################################################
class _ActorEditor(Editor):
""" An editor for TVTK scenes.
"""
# The editor is scrollable, so override the default.
scrollable = Bool(True)
# Internal GUI traits.
_scene = Any()
#### Public 'Editor' interface #############################################
def init(self, parent):
""" Finishes initializing the editor by creating the underlying toolkit
widget.
"""
factory = self.factory
self.control = QtGui.QWidget()
lay = QtGui.QVBoxLayout(self.control)
lay.setContentsMargins(0, 0, 0, 0)
self._create_scene()
def update_editor(self):
""" Updates the editor when the object trait changes external to the
editor.
"""
# Everything should really be handled elsewhere in trait notifications.
# Just pass here.
pass
def dispose(self):
""" Disposes of the contents of an editor.
"""
# Remove notifications.
self._setup_scene_notifications(remove=True)
# Remove the current scene.
if self._scene is not None:
self._scene.close()
self._scene = None
# This will destroy self.control and all of its children, including the
# scene's control.
super(_ActorEditor, self).dispose()
#### Private '_ActorEditor' interface ##################################
def _create_scene(self):
""" Create the TVTK scene widget.
"""
factory = self.factory
self._scene = factory.scene_class(self.control, **factory.scene_kwds)
scene = self._scene
# Disable rendering on the scene until we're finished.
scene.disable_render = True
# Add all of the actors in the current actor map.
for obj, actors in self.value.items():
self._add_actors_widgets(actors)
# Set up Traits notifications.
self._setup_scene_notifications()
# Re-enable rendering.
scene.disable_render = False
self.control.layout().addWidget(scene.control)
# Force a render.
scene.render()
def _setup_scene_notifications(self, remove=False):
""" Set up or remove all of the Trait notifications that control the
scene widget.
"""
self.object.on_trait_change(
self._set_scene_disable_render,
name=self.factory.disable_render_name,
remove=remove,
)
self.object.on_trait_event(
self._scene.render,
name=self.factory.do_render_name,
remove=remove,
)
self.object.on_trait_change(
self._actors_changed,
name=self.name+'_items',
remove=remove,
)
self.object.on_trait_change(
self._actor_map_changed,
name=self.name,
remove=remove,
)
def _set_scene_disable_render(self, new):
""" A callback for Traits notifications.
"""
self._scene.disable_render = new
def _actors_changed(self, event):
""" Handle the event of the actors in the actor map changing.
"""
scene = self._scene
# Temporarily turn off rendering. We (re)store the old value of
# disable_render because it may already be True.
old_disable_render = scene.disable_render
scene.disable_render = True
try:
for obj, actors in event.removed.items():
self._remove_actors_widgets(actors)
for obj, actors in event.added.items():
self._add_actors_widgets(actors)
for obj, actors in event.changed.items():
# The actors in the event are the old ones. Grab the new ones
# from the actor map itself.
self._remove_actors_widgets(actors)
self._add_actors_widgets(self.value[obj])
finally:
scene.disable_render = old_disable_render
scene.render()
def _actor_map_changed(self, object, name, old, new):
""" Handle the case when the entire actor map is set to something else.
"""
scene = self._scene
# Temporarily turn off rendering. We (re)store the old value of
# disable_render because it may already be True.
old_disable_render = scene.disable_render
scene.disable_render = True
try:
for obj, actors in old.items():
self._remove_actors_widgets(actors)
for obj, actors in new.items():
self._add_actors_widgets(actors)
finally:
scene.disable_render = old_disable_render
scene.render()
def _separate_actors_widgets(self, actors_widgets):
"""Given a sequence (or single) of actors or widgets, this returns a
list of just the actors and another of just the widgets.
"""
if not hasattr(actors_widgets, '__getitem__'):
actors_widgets = [actors_widgets]
actors = []
widgets = []
for actor in actors_widgets:
if actor.is_a('vtk3DWidget'):
widgets.append(actor)
else:
actors.append(actor)
return actors, widgets
def _add_actors_widgets(self, actors_widgets):
"""Add actors and widgets to scene."""
scene = self._scene
actors, widgets = self._separate_actors_widgets(actors_widgets)
scene.add_actors(actors)
scene.add_widgets(widgets)
def _remove_actors_widgets(self, actors_widgets):
"""Remove actors and widgets from scene."""
scene = self._scene
actors, widgets = self._separate_actors_widgets(actors_widgets)
scene.remove_actors(actors)
scene.remove_widgets(widgets)
#####################################################################
# `ActorEditor` class
#####################################################################
class ActorEditor(BasicEditorFactory):
""" An editor factory for TVTK scenes.
"""
# The class of the editor object to be constructed.
klass = _ActorEditor
# The class or factory function for creating the actual scene object.
scene_class = Callable(DecoratedScene)
# Keyword arguments to pass to the scene factory.
scene_kwds = Dict()
# The name of the trait used for ITVTKActorModel.disable_render.
disable_render_name = Str('disable_render')
# The name of the trait used for ITVTKActorModel.do_render.
do_render_name = Str('do_render')
#### EOF #######################################################################
| {
"repo_name": "liulion/mayavi",
"path": "tvtk/pyface/ui/qt4/actor_editor.py",
"copies": "2",
"size": "7376",
"license": "bsd-3-clause",
"hash": -4320403083915985000,
"line_mean": 31.2096069869,
"line_max": 80,
"alpha_frac": 0.5694143167,
"autogenerated": false,
"ratio": 4.377448071216617,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5946862387916617,
"avg_score": null,
"num_lines": null
} |
# A motion activated stepper motor
# A combination of switches.py and stepper.py
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
# PIR config:
# red to 5v, black to GND, yellow to #18
pir_pin = 18
# stepper motor config: see adafruit tutorial
# https://learn.adafruit.com/adafruits-raspberry-pi-lesson-10-stepper-motors
# I used a ULN2803 chip, so #18 was free for use by the PIR as input
coil_A_1_pin = 4
coil_A_2_pin = 17
coil_B_1_pin = 23
coil_B_2_pin = 24
GPIO.setup(pir_pin, GPIO.IN)
GPIO.setup(coil_A_1_pin, GPIO.OUT)
GPIO.setup(coil_A_2_pin, GPIO.OUT)
GPIO.setup(coil_B_1_pin, GPIO.OUT)
GPIO.setup(coil_B_2_pin, GPIO.OUT)
def forward(delay, steps):
for i in range(0, steps):
setStep(1,0,1,0)
time.sleep(delay)
setStep(0,1,1,0)
time.sleep(delay)
setStep(0,1,0,1)
time.sleep(delay)
setStep(1,0,0,1)
time.sleep(delay)
def backwards(delay, steps):
for i in range(0, steps):
setStep(1,0,0,1)
time.sleep(delay)
setStep(0,1,0,1)
time.sleep(delay)
setStep(0,1,1,0)
time.sleep(delay)
setStep(1,0,1,0)
time.sleep(delay)
def setStep(w1, w2, w3, w4):
GPIO.output(coil_A_1_pin, w1)
GPIO.output(coil_A_2_pin, w2)
GPIO.output(coil_B_1_pin, w3)
GPIO.output(coil_B_2_pin, w4)
moved = False
while True:
if GPIO.input(pir_pin):
moved = not moved
print("movement toggled " + str(moved))
if moved:
forward(int(2) / 1000.0, int(100))
print("moved forward")
else:
setStep(0,0,0,0)
print("stopped")
time.sleep(0.1)
| {
"repo_name": "egocks/ducking-octo-batman",
"path": "motor_switch.py",
"copies": "1",
"size": "1474",
"license": "mit",
"hash": -1716159232744238800,
"line_mean": 21.3333333333,
"line_max": 76,
"alpha_frac": 0.6824966079,
"autogenerated": false,
"ratio": 2.1644640234948604,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8102743300260247,
"avg_score": 0.04884346622692259,
"num_lines": 66
} |
# amounra 0513 : http://www.aumhaa.com
from __future__ import with_statement
import Live
import time
import math
from itertools import chain
from _Framework.ButtonElement import ButtonElement
from _Framework.ButtonMatrixElement import ButtonMatrixElement
from _Framework.ChannelStripComponent import ChannelStripComponent
from _Framework.ClipSlotComponent import ClipSlotComponent
from _Framework.CompoundComponent import CompoundComponent
from _Framework.ControlElement import ControlElement
from _Framework.ControlSurface import ControlSurface
from _Framework.ControlSurfaceComponent import ControlSurfaceComponent
from _Framework.DeviceComponent import DeviceComponent
from _Framework.DisplayDataSource import DisplayDataSource
from _Framework.EncoderElement import EncoderElement
from _Framework.InputControlElement import *
from VCM600.MixerComponent import MixerComponent
from _Framework.ModeSelectorComponent import ModeSelectorComponent
from _Framework.NotifyingControlElement import NotifyingControlElement
from _Framework.SceneComponent import SceneComponent
from _Framework.SessionComponent import SessionComponent
from _Framework.SessionZoomingComponent import DeprecatedSessionZoomingComponent as SessionZoomingComponent
from _Framework.SliderElement import SliderElement
from VCM600.TrackFilterComponent import TrackFilterComponent
from _Framework.TransportComponent import TransportComponent
from _Framework.ModesComponent import AddLayerMode, LayerMode, MultiEntryMode, ModesComponent, SetAttributeMode, ModeButtonBehaviour, CancellableBehaviour, AlternativeBehaviour, ReenterBehaviour, DynamicBehaviourMixin, ExcludingBehaviourMixin, ImmediateBehaviour, LatchingBehaviour, ModeButtonBehaviour
from _Framework.Layer import Layer
from _Framework.SubjectSlot import SubjectEvent, subject_slot, subject_slot_group
from _Framework.Task import *
from _Framework.M4LInterfaceComponent import M4LInterfaceComponent
from _Framework.ComboElement import ComboElement, DoublePressElement, MultiElement, DoublePressContext
from _Mono_Framework.MonoBridgeElement import MonoBridgeElement
from _Mono_Framework.MonoEncoderElement import MonoEncoderElement
from _Mono_Framework.MonoBridgeElement import MonoBridgeElement
from _Mono_Framework.DetailViewControllerComponent import DetailViewControllerComponent
from _Mono_Framework.MonoButtonElement import MonoButtonElement
from _Mono_Framework.TranslationComponent import TranslationComponent
from _Mono_Framework.LiveUtils import *
from _Mono_Framework.ModDevices import *
from _Mono_Framework.Mod import *
from _Mono_Framework.MonoDeviceComponent import MonoDeviceComponent
from _Mono_Framework.Debug import *
from _Generic.Devices import *
from Map import *
debug = initialize_debug()
session = None
mixer = None
switchxfader = (240, 0, 1, 97, 2, 15, 1, 247)
check_model = (240, 126, 127, 6, 1, 247)
KEYS = [[4, 0],
[4, 1],
[4, 2],
[4, 3],
[4, 4],
[4, 5],
[4, 6],
[4, 7],
[6, 0],
[6, 1],
[6, 2],
[6, 3],
[6, 4],
[6, 5],
[6, 6],
[6, 7],
[5, 0],
[5, 1],
[5, 2],
[5, 3],
[5, 4],
[5, 5],
[5, 6],
[5, 7],
[7, 0],
[7, 1],
[7, 2],
[7, 3],
[7, 4],
[7, 5],
[7, 6],
[7, 7]]
TEMPO_TOP = 200.0
TEMPO_BOTTOM = 60.0
MIDI_NOTE_TYPE = 0
MIDI_CC_TYPE = 1
MIDI_PB_TYPE = 2
MIDI_MSG_TYPES = (MIDI_NOTE_TYPE, MIDI_CC_TYPE, MIDI_PB_TYPE)
MIDI_NOTE_ON_STATUS = 144
MIDI_NOTE_OFF_STATUS = 128
MIDI_CC_STATUS = 176
MIDI_PB_STATUS = 224
INC_DEC = [-1, 1]
class CancellableBehaviourWithRelease(CancellableBehaviour):
def release_delayed(self, component, mode):
component.pop_mode(mode)
def update_button(self, component, mode, selected_mode):
button = component.get_mode_button(mode)
groups = component.get_mode_groups(mode)
selected_groups = component.get_mode_groups(selected_mode)
value = (mode == selected_mode or bool(groups & selected_groups))*10 or 3
button.send_value(value, True)
class OctaveModeComponent(ModeSelectorComponent):
def __init__(self, script, *a, **k):
super(OctaveModeComponent, self).__init__(*a, **k)
self._script = script
self._set_protected_mode_index(3)
def set_mode_buttons(self, buttons):
for button in self._modes_buttons:
button.remove_value_listener(self._mode_value)
self._modes_buttons = []
if (buttons != None):
for button in buttons:
assert isinstance(button, ButtonElement or MonoButtonElement)
identify_sender = True
button.add_value_listener(self._mode_value, identify_sender)
self._modes_buttons.append(button)
if (self._mode_index < 6):
self._modes_buttons[1].turn_on()
else:
self._modes_buttons[1].turn_off()
if (self._mode_index > 0):
self._modes_buttons[0].turn_on()
else:
self._modes_buttons[0].turn_off()
def set_mode(self, mode):
assert isinstance(mode, int)
mode = max(min(self._mode_index + INC_DEC[mode], 7), 0)
if (self._mode_index != mode):
self._mode_index = mode
self.update()
def set_mode_toggle(self, button):
assert ((button == None) or isinstance(button, ButtonElement or MonoButtonElement))
if (self._mode_toggle != None):
self._mode_toggle.remove_value_listener(self._toggle_value)
self._mode_toggle = button
if (self._mode_toggle != None):
self._mode_toggle.add_value_listener(self._toggle_value)
def number_of_modes(self):
return 7
def update(self):
if(self.is_enabled() is True):
for column in range(8):
for row in range(3):
self._script._grid[column][row + 4].set_identifier(int(PAGE1_KEYS_MAP[column][row]) + int(PAGE1_MODES_MAP[self._script._scale_mode._mode_index][column]) + int(self._script._octave_mode._mode_index * 12))
if (self._mode_index < 6):
self._modes_buttons[0].turn_on()
else:
self._modes_buttons[0].turn_off()
if (self._mode_index > 0):
self._modes_buttons[1].turn_on()
else:
self._modes_buttons[1].turn_off()
class ShiftModeComponent(ModeSelectorComponent):
__module__ = __name__
__doc__ = ' Special Class that uses two shift buttons and is lockable '
def __init__(self, script, *a, **k):
super(ShiftModeComponent, self).__init__(*a, **k)
self._script = script
self._mode_toggle1 = None
self._mode_toggle2 = None
self._mode_toggle3 = None
self._set_protected_mode_index(0)
self._last_mode = 0
def set_mode_toggle(self, button1, button2, button3):
assert ((button1 == None) or isinstance(button1, ButtonElement or MonoButtonElement))
if (self._mode_toggle1 != None):
self._mode_toggle1.remove_value_listener(self._toggle_value_left)
self._mode_toggle1 = button1
if (self._mode_toggle1 != None):
self._mode_toggle1.add_value_listener(self._toggle_value_left)
assert ((button2 == None) or isinstance(button2, ButtonElement or MonoButtonElement))
if (self._mode_toggle2 != None):
self._mode_toggle2.remove_value_listener(self._toggle_value_right)
self._mode_toggle2 = button2
if (self._mode_toggle2 != None):
self._mode_toggle2.add_value_listener(self._toggle_value_right)
assert ((button3 == None) or isinstance(button3, ButtonElement or MonoButtonElement))
if (self._mode_toggle3 != None):
self._mode_toggle3.remove_value_listener(self._toggle_value_mod)
self._mode_toggle3 = button3
if (self._mode_toggle3 != None):
self._mode_toggle3.add_value_listener(self._toggle_value_mod)
self._script.request_rebuild_midi_map()
def set_mode_toggle1(self, button):
self._toggle_value_left.subject = button
def set_mode_toggle2(self, button):
self._toggle_value_right.subject = button
def set_mode_toggle3(self, button):
self._toggle_value_mod.subject = button
@subject_slot('value')
def _toggle_value_left(self, value):
if(value>0):
if self._toggle_value_right.subject and self._toggle_value_right.subject.is_pressed():
self._toggle_value(4)
else:
self._toggle_value(1)
@subject_slot('value')
def _toggle_value_right(self, value):
if(value>0):
if self._toggle_value_left.subject and self._toggle_value_left.subject.is_pressed():
self._toggle_value(4)
else:
self._toggle_value(2)
@subject_slot('value')
def _toggle_value_mod(self, value):
if(value>0):
self._toggle_value(3)
def _toggle_value(self, value):
if(value is self._mode_index):
if value is 3:
self.set_mode(self._last_mode)
else:
self.set_mode(0)
elif not self._mode_index is 4:
self.set_mode(value)
def number_of_modes(self):
return 5
def update(self):
self._script.deassign_matrix()
if(self._mode_index is 0):
self._toggle_value_left.subject and self._toggle_value_left.subject.turn_off()
self._toggle_value_right.subject and self._toggle_value_right.subject.turn_off()
self._script.schedule_message(1, self._script.assign_page_0)
#self._script.assign_page_0()
elif(self._mode_index is 1):
self._toggle_value_left.subject and self._toggle_value_left.subject.turn_on()
self._toggle_value_right.subject and self._toggle_value_right.subject.turn_off()
self._script.schedule_message(1, self._script.assign_page_1)
#self._script.assign_page_1()
elif(self._mode_index is 2):
self._toggle_value_left.subject and self._toggle_value_left.subject.turn_off()
self._toggle_value_right.subject and self._toggle_value_right.subject.turn_on()
self._script.schedule_message(1, self._script.assign_page_2)
#self._script.assign_page_2()
elif(self._mode_index is 3):
self._toggle_value_left.subject and self._toggle_value_left.subject.turn_off()
self._toggle_value_right.subject and self._toggle_value_right.subject.turn_off()
self._script.schedule_message(1, self._script.assign_mod)
#self._script.assign_mod()
elif(self._mode_index is 4):
self._toggle_value_left.subject and self._toggle_value_left.subject.turn_on()
self._toggle_value_right.subject and self._toggle_value_right.subject.turn_on()
self._script.schedule_message(1, self._script.assign_translation)
def set_mode(self, mode):
assert isinstance(mode, int)
assert (mode in range(self.number_of_modes()))
if (self._mode_index != mode):
if mode != 3:
self._last_mode = mode
self._mode_index = mode
self.update()
class SpecialMixerComponent(MixerComponent):
' Special mixer class that uses return tracks alongside midi and audio tracks'
__module__ = __name__
def __init__(self, *a, **k):
self._is_locked = False #added
super(SpecialMixerComponent, self).__init__(*a, **k)
def on_selected_track_changed(self):
selected_track = self.song().view.selected_track
if selected_track != None:
if (self._selected_strip != None):
if self._is_locked == False: #added
self._selected_strip.set_track(selected_track)
if self.is_enabled():
if (self._next_track_button != None):
if (selected_track != self.song().master_track):
self._next_track_button.turn_on()
else:
self._next_track_button.turn_off()
if (self._prev_track_button != None):
if (selected_track != self.song().tracks[0]):
self._prev_track_button.turn_on()
else:
self._prev_track_button.turn_off()
def tracks_to_use(self):
return tuple(self.song().visible_tracks) + tuple(self.song().return_tracks)
class ScaleModeComponent(ModeSelectorComponent):
def __init__(self, script, *a, **k):
super(ScaleModeComponent, self).__init__(*a, **k)
self._script = script
self._set_protected_mode_index(0)
def set_mode_buttons(self, buttons):
for button in self._modes_buttons:
button.remove_value_listener(self._mode_value)
self._modes_buttons = []
if (buttons != None):
for button in buttons:
assert isinstance(button, ButtonElement or MonoButtonElement)
identify_sender = True
button.add_value_listener(self._mode_value, identify_sender)
self._modes_buttons.append(button)
for index in range(len(self._modes_buttons)):
if (index == self._mode_index):
self._modes_buttons[index].turn_on()
else:
self._modes_buttons[index].turn_off()
def set_mode_toggle(self, button):
assert ((button == None) or isinstance(button, ButtonElement or MonoButtonElement))
if (self._mode_toggle != None):
self._mode_toggle.remove_value_listener(self._toggle_value)
self._mode_toggle = button
if (self._mode_toggle != None):
self._mode_toggle.add_value_listener(self._toggle_value)
def number_of_modes(self):
return 8
def update(self):
if(self.is_enabled() is True):
for column in range(8):
for row in range(3):
self._script._grid[column][row + 4].set_identifier(int(PAGE1_KEYS_MAP[column][row]) + int(PAGE1_MODES_MAP[self._script._scale_mode._mode_index][column]) + int(self._script._octave_mode._mode_index * 12))
for index in range(len(self._modes_buttons)):
if (index == self._mode_index):
self._modes_buttons[index].turn_on()
else:
self._modes_buttons[index].turn_off()
class OhmModes(ControlSurface):
__module__ = __name__
__doc__ = ' OhmModes controller script '
def __init__(self, c_instance):
super(OhmModes, self).__init__(c_instance)
self._version_check = 'b996'
self._host_name = 'Ohm'
self._color_type = 'OhmRGB'
self._rgb = 0
self._timer = 0
self._touched = 0
self.flash_status = 1
self._backlight = 127
self._backlight_type = 'static'
self._ohm = 127
self._ohm_type = 'static'
self._pad_translations = PAD_TRANSLATION
self._device_selection_follows_track_selection = FOLLOW
self._keys_octave = 5
self._keys_scale = 0
self._tempo_buttons = None
with self.component_guard():
self._setup_monobridge()
self._setup_controls()
self._setup_m4l_interface()
self._setup_transport_control()
self._setup_mixer_control()
self._setup_session_control()
self._setup_device_control()
self._setup_crossfader()
self._setup_translations()
self._setup_mod()
self._setup_modes()
self._assign_page_constants()
self._last_device = None
self.song().view.add_selected_track_listener(self._update_selected_device)
self.show_message('OhmModes Control Surface Loaded')
self._send_midi(tuple(switchxfader))
if FORCE_TYPE is True:
self._rgb = FORCE_COLOR_TYPE
else:
self.schedule_message(10, self.query_ohm, None)
self.log_message('<<<<<<<<<<<<<<<<<<<<<<<<< OhmModes ' + str(self._version_check) + ' log opened >>>>>>>>>>>>>>>>>>>>>>>>>')
debug('DEBUG ON for OhmModes script.')
def query_ohm(self):
self._send_midi(tuple(check_model))
def update_display(self):
super(OhmModes, self).update_display()
self._timer = (self._timer + 1) % 256
self.flash()
self.strobe()
def _setup_monobridge(self):
self._monobridge = MonoBridgeElement(self)
self._monobridge.name = 'MonoBridge'
def get_device_bank(self):
return self._device._bank_index
def _setup_controls(self):
is_momentary = True
self._fader = [ None for index in range(8) ]
self._dial = [ None for index in range(16) ]
self._button = [ None for index in range(8) ]
self._menu = [ None for index in range(6) ]
for index in range(8):
self._fader[index] = MonoEncoderElement(MIDI_CC_TYPE, CHANNEL, OHM_FADERS[index], Live.MidiMap.MapMode.absolute, 'Fader_' + str(index), index, self)
for index in range(8):
self._button[index] = MonoButtonElement(is_momentary, MIDI_NOTE_TYPE, CHANNEL, OHM_BUTTONS[index], 'Button_' + str(index), self)
for index in range(16):
self._dial[index] = MonoEncoderElement(MIDI_CC_TYPE, CHANNEL, OHM_DIALS[index], Live.MidiMap.MapMode.absolute, 'Encoder_' + str(index), index, self)
self._knobs = []
for index in range(12):
self._knobs.append(self._dial[index])
for index in range(6):
self._menu[index] = MonoButtonElement(is_momentary, MIDI_NOTE_TYPE, CHANNEL, OHM_MENU[index], 'Menu_' + str(index), self)
self._crossfader = EncoderElement(MIDI_CC_TYPE, CHANNEL, CROSSFADER, Live.MidiMap.MapMode.absolute)
self._crossfader.name = 'Crossfader'
self._livid = MonoButtonElement(is_momentary, MIDI_NOTE_TYPE, CHANNEL, LIVID, 'Livid_Button', self)
self._shift_l = MonoButtonElement(is_momentary, MIDI_NOTE_TYPE, CHANNEL, SHIFT_L, 'Page_Button_Left', self)
self._shift_r = MonoButtonElement(is_momentary, MIDI_NOTE_TYPE, CHANNEL, SHIFT_R, 'Page_Button_Right', self)
self._matrix = ButtonMatrixElement()
self._matrix.name = 'Matrix'
self._grid = [ None for index in range(8) ]
self._monomod = ButtonMatrixElement()
self._monomod.name = 'Monomod'
for column in range(8):
self._grid[column] = [ None for index in range(8) ]
for row in range(8):
self._grid[column][row] = MonoButtonElement(is_momentary, MIDI_NOTE_TYPE, CHANNEL, column * 8 + row, 'Grid_' + str(column) + '_' + str(row), self)
for row in range(5):
button_row = []
for column in range(7):
button_row.append(self._grid[column][row])
self._matrix.add_row(tuple(button_row))
for row in range(8):
button_row = []
for column in range(8):
button_row.append(self._grid[column][row])
self._monomod.add_row(tuple(button_row))
self._dial_matrix = ButtonMatrixElement()
for row in range(3):
dial_row = []
for column in range(4):
dial_row.append(self._dial[column + (row*4)])
self._dial_matrix.add_row(tuple(dial_row))
self._menu_matrix = ButtonMatrixElement([self._menu])
def _setup_m4l_interface(self):
self._m4l_interface = M4LInterfaceComponent(controls=self.controls, component_guard=self.component_guard)
self.get_control_names = self._m4l_interface.get_control_names
self.get_control = self._m4l_interface.get_control
self.grab_control = self._m4l_interface.grab_control
self.release_control = self._m4l_interface.release_control
def _setup_translations(self):
controls = []
for array in self._grid:
for button in array:
controls.append(button)
if FADER_BANKING:
controls = controls + self._dial
if DIAL_BANKING:
controls = controls + self._dial
self._translations = TranslationComponent(controls, USER_CHANNEL)
self._translations.layer = Layer(priority = 7, channel_selector_buttons = self._menu_matrix)
def _setup_mod(self):
self.monomodular = get_monomodular(self)
self.monomodular.name = 'monomodular_switcher'
self.modhandler = OhmModHandler(self)
self.modhandler.name = 'ModHandler'
self.modhandler.layer = Layer(priority = 5,
grid = self._monomod,
nav_up_button = self._menu[2],
nav_down_button = self._menu[5],
nav_left_button = self._menu[3],
nav_right_button = self._menu[4],
shift_button = self._menu[1],
alt_button = self._menu[0],
parameter_controls = self._dial_matrix)
self.modhandler.legacy_shift_mode = AddLayerMode(self.modhandler, Layer(priority = 6,
channel_buttons = self._monomod.submatrix[:, 1:2],
nav_matrix = self._monomod.submatrix[4:8, 2:6]))
self.modhandler.shift_mode = AddLayerMode(self.modhandler, Layer(priority = 6,
device_selector_matrix = self._monomod.submatrix[:, :1],
lock_button = self._livid,
key_buttons = self._monomod.submatrix[:, 7:8]))
self.modhandler.set_enabled(False)
self.modhandler.set_mod_button(self._livid)
def _setup_modes(self):
self._shift_mode = ShiftModeComponent(self)
self._shift_mode.name = 'Shift_Mode'
#self._shift_mode.set_mode_toggle(self._shift_l, self._shift_r, self._livid)
self._shift_mode.layer = Layer(priority = 4, mode_toggle1 = self._shift_l, mode_toggle2 = self._shift_r, mode_toggle3 = self._livid)
self._shift_mode.set_enabled(True)
self._scale_mode = ScaleModeComponent(self)
self._scale_mode.name = 'Scale_Mode'
self._octave_mode = OctaveModeComponent(self)
self._octave_mode.name = 'Octave_Mode'
def _setup_transport_control(self):
self._transport = TransportComponent()
self._transport.name = 'Transport'
#self._transport.layer = Layer(priority = 4, play_button = self._menu[2], stop_button = self._menu[3])
def _setup_mixer_control(self):
global mixer
is_momentary = True
self._num_tracks = 7
mixer = SpecialMixerComponent(7, 0, True, True)
mixer.name = 'Mixer'
self._mixer = mixer
for index in range(7):
mixer.channel_strip(index).set_volume_control(self._fader[index])
for index in range(7):
mixer.channel_strip(index).name = 'Mixer_ChannelStrip_' + str(index)
mixer.track_eq(index).name = 'Mixer_EQ_' + str(index)
mixer.channel_strip(index)._invert_mute_feedback = True
self.song().view.selected_track = mixer.channel_strip(0)._track
def _setup_session_control(self):
global session
is_momentary = True
num_tracks = 7
num_scenes = 5
session = SessionComponent(num_tracks, num_scenes)
session.name = 'Session'
self._session = session
session.set_offsets(0, 0)
self._scene = [ None for index in range(6) ]
for row in range(num_scenes):
self._scene[row] = session.scene(row)
self._scene[row].name = 'Scene_' + str(row)
for column in range(num_tracks):
clip_slot = self._scene[row].clip_slot(column)
clip_slot.name = str(column) + '_Clip_Slot_' + str(row)
session.set_mixer(self._mixer)
session.set_show_highlight(True)
self._session_zoom = SessionZoomingComponent(session)
self._session_zoom.name = 'Session_Overview'
self.set_highlighting_session_component(self._session)
def _assign_session_colors(self):
self.log_message('assign session colors')
num_tracks = 7
num_scenes = 5
self._session.set_stop_clip_value(STOP_CLIP_COLOR[self._rgb])
for row in range(num_scenes):
for column in range(num_tracks):
self._scene[row].clip_slot(column).set_triggered_to_play_value(CLIP_TRIGD_TO_PLAY_COLOR[self._rgb])
self._scene[row].clip_slot(column).set_triggered_to_record_value(CLIP_TRIGD_TO_RECORD_COLOR[self._rgb])
self._scene[row].clip_slot(column).set_stopped_value(CLIP_STOPPED_COLOR[self._rgb])
self._scene[row].clip_slot(column).set_started_value(CLIP_STARTED_COLOR[self._rgb])
self._scene[row].clip_slot(column).set_recording_value(CLIP_RECORDING_COLOR[self._rgb])
self._session_zoom.set_stopped_value(ZOOM_STOPPED_COLOR[self._rgb])
self._session_zoom.set_playing_value(ZOOM_PLAYING_COLOR[self._rgb])
self._session_zoom.set_selected_value(ZOOM_SELECTED_COLOR[self._rgb])
for row in range(8):
for column in range(8):
self._grid[column][row].set_force_next_value()
self._session.on_scene_list_changed()
self._shift_mode.update()
def _setup_device_control(self):
self._device = DeviceComponent()
self._device.name = 'Device_Component'
self.set_device_component(self._device)
self._device_navigator = DetailViewControllerComponent()
self._device_navigator.name = 'Device_Navigator'
self._device_selection_follows_track_selection = FOLLOW
def device_follows_track(self, val):
self._device_selection_follows_track_selection = val == 1
return self
def _setup_crossfader(self):
self._mixer.set_crossfader_control(self._crossfader)
def disconnect(self):
"""clean things up on disconnect"""
self.song().view.remove_selected_track_listener(self._update_selected_device)
self.log_message(time.strftime('%d.%m.%Y %H:%M:%S', time.localtime()) + '--------------= OhmModes log closed =--------------')
super(OhmModes, self).disconnect()
rebuild_sys()
def _get_num_tracks(self):
return self.num_tracks
def flash(self):
if(self.flash_status > 0):
for control in self.controls:
if isinstance(control, MonoButtonElement):
control.flash(self._timer)
def strobe(self):
if self._backlight_type != 'static':
if self._backlight_type is 'pulse':
self._backlight = int(math.fabs(self._timer * 16 % 64 - 32) + 32)
if self._backlight_type is 'up':
self._backlight = int(self._timer * 8 % 64 + 16)
if self._backlight_type is 'down':
self._backlight = int(math.fabs(int(self._timer * 8 % 64 - 64)) + 16)
self._send_midi(tuple([176, 27, int(self._backlight)]))
if self._ohm_type != 'static':
if self._ohm_type is 'pulse':
self._ohm = int(math.fabs(self._timer * 16 % 64 - 32) + 32)
if self._ohm_type is 'up':
self._ohm = int(self._timer * 8 % 64 + 16)
if self._ohm_type is 'down':
self._ohm = int(math.fabs(int(self._timer * 8 % 64 - 64)) + 16)
self._send_midi(tuple([176, 63, int(self._ohm)]))
self._send_midi(tuple([176, 31, int(self._ohm)]))
def deassign_matrix(self):
with self.component_guard():
self.modhandler.set_enabled(False)
self._translations.set_enabled(False)
#self.assign_alternate_mappings(0)
self._scale_mode.set_mode_buttons(None)
self._scale_mode.set_enabled(False)
self._octave_mode.set_mode_buttons(None)
self._octave_mode.set_enabled(False)
self._session_zoom.set_enabled(False)
self._session_zoom.set_nav_buttons(None, None, None, None)
self._session.set_track_bank_buttons(None, None)
self._session.set_scene_bank_buttons(None, None)
self._transport.set_enabled(False)
for column in range(4):
self._mixer.track_eq(column)._gain_controls = None
self._mixer.track_eq(column).set_enabled(False)
for column in range(7):
self._mixer.channel_strip(column).set_crossfade_toggle(None)
self._mixer.channel_strip(column).set_mute_button(None)
self._mixer.channel_strip(column).set_solo_button(None)
self._mixer.channel_strip(column).set_arm_button(None)
self._mixer.channel_strip(column).set_send_controls(None)
self._mixer.channel_strip(column).set_pan_control(None)
self._mixer.track_eq(column).set_enabled(False)
for row in range(5):
self._scene[row].clip_slot(column).set_launch_button(None)
for column in range(8):
self._button[column]._on_value = SELECT_COLOR[self._rgb]
for row in range(8):
#self._grid[column][row].set_channel(0)
self._grid[column][row].release_parameter()
self._grid[column][row].use_default_message()
self._grid[column][row].set_enabled(True)
self._grid[column][row].send_value(0, True)
self._grid[column][row]._on_value = 127
self._grid[column][row]._off_value = 0
self._grid[column][row].force_next_send()
for index in range(6):
self._menu[index]._on_value = 127
self._menu[index]._off_value = 0
for index in range(16):
self._dial[index].use_default_message()
self._dial[index].release_parameter()
self._device.set_parameter_controls(None)
self._device.set_enabled(False)
self._device_navigator.set_enabled(False)
self._mixer.update()
self._matrix.reset()
self.request_rebuild_midi_map()
def _assign_page_constants(self):
with self.component_guard():
self._session_zoom.set_zoom_button(self._grid[7][7])
self._session_zoom.set_button_matrix(self._matrix)
for column in range(7):
self._mixer.channel_strip(column).set_select_button(self._button[column])
self._mixer.channel_strip(column).set_volume_control(self._fader[column])
self._mixer.master_strip().set_volume_control(self._fader[7])
self._mixer.master_strip().set_select_button(self._button[7])
self._mixer.set_prehear_volume_control(self._dial[15])
self._transport.set_play_button(self._menu[0])
self._menu[0].send_value(PLAY_COLOR[self._rgb], True)
self._menu[0]._on_value = PLAY_COLOR[self._rgb]
self._transport.set_stop_button(self._menu[1])
self._menu[1]._off_value = STOP_COLOR[self._rgb]
self._menu[1]._on_value = STOP_COLOR[self._rgb]
self._menu[1].send_value(STOP_COLOR[self._rgb], True)
self._device_navigator.set_device_nav_buttons(self._menu[3], self._menu[4])
def assign_page_0(self):
with self.component_guard():
self._backlight_type = 'static'
self._session_zoom.set_enabled(True)
for column in range(7):
self._grid[column][5]._on_value = MUTE_COLOR[self._rgb]
self._mixer.channel_strip(column).set_mute_button(self._grid[column][5])
self._grid[column][6]._on_value = SOLO_COLOR[self._rgb]
self._mixer.channel_strip(column).set_solo_button(self._grid[column][6])
self._grid[column][7]._on_value = ARM_COLOR[self._rgb]
self._mixer.channel_strip(column).set_arm_button(self._grid[column][7])
self._mixer.channel_strip(column).set_pan_control(self._dial[column + 8])
for row in range(5):
self._scene[row].clip_slot(column).set_launch_button(self._grid[column][row])
for column in range(4):
self._mixer.channel_strip(column).set_send_controls(tuple([self._dial[column], self._dial[column + 4]]))
for index in range(5):
self._grid[7][index]._off_value = SCENE_LAUNCH_COLOR[self._rgb]
self._scene[index].set_launch_button(self._grid[7][index])
self._grid[7][index].set_force_next_value()
self._grid[7][index].turn_off()
for index in range(4):
self._menu[2 + index]._on_value = NAV_BUTTON_COLOR[self._rgb]
self._session.set_track_bank_buttons(self._menu[4], self._menu[3])
self._session.set_scene_bank_buttons(self._menu[5], self._menu[2])
self._menu[0]._on_value = PLAY_COLOR[self._rgb]
self._menu[1]._off_value = STOP_COLOR[self._rgb]
self._menu[1]._on_value = STOP_COLOR[self._rgb]
self._transport.set_enabled(True)
#self._mixer.update_all()
self.request_rebuild_midi_map()
#self.log_message('assign_page_0')
def assign_page_1(self):
with self.component_guard():
self._backlight_type = 'pulse'
self._session_zoom.set_enabled(False)
for column in range(4):
for row in range(4):
self._grid[column][row].send_value(DRUM_COLOR[self._rgb], True)
self._grid[column + 4][row].send_value(BASS_COLOR[self._rgb], True)
self._grid[column][row].set_enabled(False)
self._grid[column][row]._msg_channel = PAGE1_DRUM_CHANNEL
self._grid[column][row].set_identifier(PAGE1_DRUM_MAP[column][row])
self._grid[column + 4][row].set_enabled(False)
self._grid[column + 4][row]._msg_channel = PAGE1_BASS_CHANNEL
self._grid[column + 4][row].set_identifier(PAGE1_BASS_MAP[column][row])
scale_mode_buttons = []
for column in range(8):
for row in range(3):
self._grid[column][row + 4].set_enabled(False)
self._grid[column][row + 4].send_value(KEYS_COLOR[self._rgb], True)
self._grid[column][row + 4]._msg_channel = PAGE1_KEYS_CHANNEL
self._grid[column][row + 4].set_identifier(int(PAGE1_KEYS_MAP[column][row]) + int(PAGE1_MODES_MAP[self._scale_mode._mode_index][column]) + int(self._octave_mode._mode_index * 12))
for row in range(1):
scale_mode_buttons.append(self._grid[column][7])
self._scale_mode.set_mode_buttons(tuple(scale_mode_buttons))
self._scale_mode.set_enabled(True)
self._octave_mode.set_mode_buttons(tuple([self._menu[5], self._menu[2]]))
self._octave_mode.set_enabled(True)
for column in range(7):
self._mixer.channel_strip(column).set_send_controls(tuple([self._dial[column + 8]]))
self._mixer.channel_strip(column).set_arm_button(self._button[column])
self._device.set_enabled(True)
device_param_controls = []
for index in range(8):
device_param_controls.append(self._dial[index])
self._device.set_parameter_controls(tuple(device_param_controls))
self._menu[0]._on_value = PLAY_COLOR[self._rgb]
for index in range(4):
self._menu[2 + index]._on_value = DEVICE_NAV_COLOR[self._rgb]
self._device_navigator.set_enabled(True)
self._menu[0]._on_value = PLAY_COLOR[self._rgb]
self._menu[1]._off_value = STOP_COLOR[self._rgb]
self._menu[1]._on_value = STOP_COLOR[self._rgb]
self._transport.set_enabled(True)
self.request_rebuild_midi_map()
def assign_page_2(self):
with self.component_guard():
self._backlight_type = 'up'
self._session_zoom.set_enabled(True)
for column in range(7):
self._grid[column][5]._on_value = MUTE_COLOR[self._rgb]
self._mixer.channel_strip(column).set_mute_button(self._grid[column][5])
self._grid[column][6]._on_value = CROSSFADE_ASSIGN_COLOR[self._rgb]
self._mixer.channel_strip(column).set_crossfade_toggle(self._grid[column][6])
self._grid[column][7]._msg_channel = 2
self._grid[column][7].set_identifier(column)
self._grid[column][7].reset()
self._grid[column][7].set_enabled(False)
self._grid[column][7].send_value(4, True)
for row in range(5):
self._scene[row].clip_slot(column).set_launch_button(self._grid[column][row])
for row in range(5):
self._grid[7][row]._off_value = SCENE_LAUNCH_COLOR[self._rgb]
self._scene[row].set_launch_button(self._grid[7][row])
self._grid[7][row].set_force_next_value()
self._grid[7][row].turn_off()
for column in range(4):
self._mixer.track_eq(column).set_gain_controls(tuple([self._dial[column + 8], self._dial[column + 4], self._dial[column]]))
self._mixer.track_eq(column).set_enabled(True)
for column in range(3):
self._mixer.channel_strip(column + 4).set_pan_control(self._dial[column + 12])
for index in range(4):
self._menu[2 + index]._on_value = NAV_BUTTON_COLOR[self._rgb]
self._session.set_track_bank_buttons(self._menu[4], self._menu[3])
self._session.set_scene_bank_buttons(self._menu[5], self._menu[2])
self._set_tempo_buttons([self._grid[7][5], self._grid[7][6]])
self._menu[0]._on_value = PLAY_COLOR[self._rgb]
self._menu[1]._off_value = STOP_COLOR[self._rgb]
self._menu[1]._on_value = STOP_COLOR[self._rgb]
self._transport.set_enabled(True)
#self._mixer.update()
self.request_rebuild_midi_map()
def assign_mod(self):
self.modhandler.set_enabled(True)
def assign_translation(self):
self._translations.set_enabled(True)
def assign_alternate_mappings(self, chan):
for column in range(8):
for row in range(8):
self._grid[column][row].set_channel(chan)
for knob in self._dial:
knob.set_channel(chan)
knob.set_enabled(chan is 0)
self.request_rebuild_midi_map()
def _update_selected_device(self):
if self._device_selection_follows_track_selection is True:
self._update_device_selection()
def handle_sysex(self, midi_bytes):
#self.log_message('sysex: ' + str(midi_bytes))
if len(midi_bytes) > 10:
if midi_bytes[:11] == tuple([240,
126,
0,
6,
2,
0,
1,
97,
1,
0,
7]):
self.log_message(str('>>>color detected'))
self._rgb = 0
for button in self._button:
button._color_map = COLOR_MAP
for column in self._grid:
for button in column:
button._color_map = COLOR_MAP
elif midi_bytes[:11] == tuple([240,
126,
0,
6,
2,
0,
1,
97,
1,
0,
2]):
self.log_message(str('>>>mono detected'))
self._rgb = 1
for button in self._button:
button._color_map = [127 for index in range(0, 7)]
for column in self._grid:
for button in column:
button._color_map = [127 for index in range(0, 7)]
self._assign_session_colors()
def to_encoder(self, num, val):
rv = int(val * 127)
self._device._parameter_controls[num].receive_value(rv)
p = self._device._parameter_controls[num]._parameter_to_map_to
newval = val * (p.max - p.min) + p.min
p.value = newval
def _set_tempo_buttons(self, buttons):
if self._tempo_buttons != None:
self._tempo_buttons[0].remove_value_listener(self._tempo_value)
self._tempo_buttons[1].remove_value_listener(self._tempo_value)
self._tempo_buttons = buttons
if buttons != None:
for button in buttons:
assert isinstance(button, MonoButtonElement)
self._tempo_buttons[0].set_on_off_values(4, 0)
self._tempo_buttons[0].add_value_listener(self._tempo_value, True)
self._tempo_buttons[1].set_on_off_values(4, 0)
self._tempo_buttons[1].add_value_listener(self._tempo_value, True)
self._tempo_buttons[0].turn_on()
self._tempo_buttons[1].turn_on()
def _tempo_value(self, value, sender):
if value > 0 and self._tempo_buttons.index(sender) == 0:
self.song().tempo = round(min(self.song().tempo + 1, 999))
elif value > 0 and self._tempo_buttons.index(sender) == 1:
self.song().tempo = round(max(self.song().tempo - 1, 20))
def generate_strip_string(self, display_string):
NUM_CHARS_PER_DISPLAY_STRIP = 12
if not display_string:
return ' ' * NUM_CHARS_PER_DISPLAY_STRIP
if len(display_string.strip()) > NUM_CHARS_PER_DISPLAY_STRIP - 1 and display_string.endswith('dB') and display_string.find('.') != -1:
display_string = display_string[:-2]
if len(display_string) > NUM_CHARS_PER_DISPLAY_STRIP - 1:
for um in [' ',
'i',
'o',
'u',
'e',
'a']:
while len(display_string) > NUM_CHARS_PER_DISPLAY_STRIP - 1 and display_string.rfind(um, 1) != -1:
um_pos = display_string.rfind(um, 1)
display_string = display_string[:um_pos] + display_string[um_pos + 1:]
else:
display_string = display_string.center(NUM_CHARS_PER_DISPLAY_STRIP - 1)
ret = u''
for i in range(NUM_CHARS_PER_DISPLAY_STRIP - 1):
if ord(display_string[i]) > 127 or ord(display_string[i]) < 0:
ret += ' '
else:
ret += display_string[i]
ret += ' '
return ret
def notification_to_bridge(self, name, value, sender):
if isinstance(sender, tuple([MonoButtonElement, MonoEncoderElement])):
self._monobridge._send(sender.name, 'lcd_name', str(self.generate_strip_string(name)))
self._monobridge._send(sender.name, 'lcd_value', str(self.generate_strip_string(value)))
def touched(self):
if self._touched is 0:
self._monobridge._send('touch', 'on')
self.schedule_message(2, self.check_touch)
self._touched += 1
def check_touch(self):
if self._touched > 5:
self._touched = 5
elif self._touched > 0:
self._touched -= 1
if self._touched is 0:
self._monobridge._send('touch', 'off')
else:
self.schedule_message(2, self.check_touch)
def get_clip_names(self):
clip_names = []
for scene in self._session._scenes:
for clip_slot in scene._clip_slots:
if clip_slot.has_clip() is True:
clip_names.append(clip_slot._clip_slot)
return clip_slot._clip_slot
return clip_names
class OhmModHandler(ModHandler):
def __init__(self, *a, **k):
super(OhmModHandler, self).__init__(*a, **k)
self._shift_mode = ModesComponent()
self._color_type = 'RGB'
self._shift_mode.add_mode('shift', tuple([self._enable_shift, self._disable_shift]), behaviour = CancellableBehaviourWithRelease())
self.nav_box = self.register_component(NavigationBox(self, 16, 16, 8, 8, self.set_offset))
self._mod_button = None
def _enable_shift(self):
self._shift_value(1)
def _disable_shift(self):
self._shift_value(0)
def set_shift_button(self, button):
self._shift_mode.set_mode_button('shift', button)
def set_nav_matrix(self, matrix):
self.nav_box.set_matrix(matrix)
def _receive_grid(self, x, y, value, *a, **k):
#self._receive_grid(x, y, value, *a, **k)
legacy = self.active_mod().legacy
if self._active_mod:
if not self._grid_value.subject is None:
if legacy:
x = x - self.x_offset
y = y - self.y_offset
if x in range(8) and y in range(8):
try:
self._grid_value.subject.send_value(x, y, self._colors[value], True)
except:
pass
def set_device_selector_matrix(self, matrix):
self._device_selector.set_matrix(matrix)
@subject_slot('value')
def _grid_value(self, value, x, y, *a, **k):
#self.log_message('_base_grid_value ' + str(x) + str(y) + str(value))
if self.active_mod():
if self.active_mod().legacy:
x += self.x_offset
y += self.y_offset
self._active_mod.send('grid', x, y, value)
@subject_slot('value')
def _shift_value(self, value, *a, **k):
self._is_shifted = not value is 0
mod = self.active_mod()
if mod:
mod.send('shift', value)
if self._is_shifted:
self.shift_mode.enter_mode()
if mod and mod.legacy:
self.legacy_shift_mode.enter_mode()
else:
self.legacy_shift_mode.leave_mode()
self.shift_mode.leave_mode()
self.update()
def set_mod_button(self, button):
self._mod_button = button
def update(self, *a, **k):
mod = self.active_mod()
if self.is_enabled():
if not mod is None:
mod.restore()
else:
if not self._grid_value.subject is None:
self._grid_value.subject.reset()
if not self._keys_value.subject is None:
self._keys_value.subject.reset()
self._alt_value.subject and self._alt_value.subject.send_value(2 + int(self.is_alted())*7, True)
if self._on_lock_value.subject:
self._on_lock_value.subject.send_value(1 + (int(self.is_locked())*4), True)
else:
self._mod_button and self._mod_button.send_value(7 + (not self.active_mod() is None)*7, True)
else:
self._mod_button and self._mod_button.send_value((not self.active_mod() is None)*3, True)
| {
"repo_name": "LividInstruments/LiveRemoteScripts",
"path": "Livid_OhmModes_2/OhmModes.py",
"copies": "1",
"size": "40399",
"license": "mit",
"hash": 2752653151209535000,
"line_mean": 33.0345408593,
"line_max": 302,
"alpha_frac": 0.6827149187,
"autogenerated": false,
"ratio": 2.9016016663075486,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8573029086335263,
"avg_score": 0.10225749973445691,
"num_lines": 1187
} |
# amounra 0513 : http://www.aumhaa.com
from __future__ import with_statement
import Live
import time
import math
from _Framework.ButtonElement import ButtonElement
from _Framework.ButtonMatrixElement import ButtonMatrixElement
from _Framework.ChannelStripComponent import ChannelStripComponent
from _Framework.ClipSlotComponent import ClipSlotComponent
from _Framework.CompoundComponent import CompoundComponent
from _Framework.ControlElement import ControlElement
from _Framework.ControlSurface import ControlSurface
from _Framework.ControlSurfaceComponent import ControlSurfaceComponent
from _Framework.DeviceComponent import DeviceComponent
from _Framework.DisplayDataSource import DisplayDataSource
from _Framework.EncoderElement import EncoderElement
from _Framework.InputControlElement import *
from VCM600.MixerComponent import MixerComponent
from _Framework.ModeSelectorComponent import ModeSelectorComponent
from _Framework.NotifyingControlElement import NotifyingControlElement
from _Framework.SceneComponent import SceneComponent
from _Framework.SessionComponent import SessionComponent
from _Framework.SessionZoomingComponent import DeprecatedSessionZoomingComponent as SessionZoomingComponent
from _Framework.SliderElement import SliderElement
from VCM600.TrackFilterComponent import TrackFilterComponent
from _Framework.TransportComponent import TransportComponent
from _Framework.M4LInterfaceComponent import M4LInterfaceComponent
from _Mono_Framework.MonoBridgeElement import MonoBridgeElement
from _Mono_Framework.MonoEncoderElement import MonoEncoderElement
from _Mono_Framework.MonoBridgeElement import MonoBridgeElement
from _Mono_Framework.DetailViewControllerComponent import DetailViewControllerComponent
from _Mono_Framework.MonoButtonElement import MonoButtonElement
from _Mono_Framework.MonoClient import MonoClient
from _Mono_Framework.CodecEncoderElement import CodecEncoderElement
from _Mono_Framework.EncoderMatrixElement import EncoderMatrixElement
from _Mono_Framework.LiveUtils import *
from _Mono_Framework.ModDevices import *
from _Mono_Framework.Debug import *
from MonomodComponent import MonomodComponent
from SwitchboardElement import SwitchboardElement
from MonoDeviceComponent import MonoDeviceComponent
from _Generic.Devices import *
from Map import *
session = None
mixer = None
switchxfader = (240, 0, 1, 97, 2, 15, 1, 247)
check_model = (240, 126, 127, 6, 1, 247)
KEYS = [[4, 0],
[4, 1],
[4, 2],
[4, 3],
[4, 4],
[4, 5],
[4, 6],
[4, 7],
[6, 0],
[6, 1],
[6, 2],
[6, 3],
[6, 4],
[6, 5],
[6, 6],
[6, 7],
[5, 0],
[5, 1],
[5, 2],
[5, 3],
[5, 4],
[5, 5],
[5, 6],
[5, 7],
[7, 0],
[7, 1],
[7, 2],
[7, 3],
[7, 4],
[7, 5],
[7, 6],
[7, 7]]
TEMPO_TOP = 200.0
TEMPO_BOTTOM = 60.0
MIDI_NOTE_TYPE = 0
MIDI_CC_TYPE = 1
MIDI_PB_TYPE = 2
MIDI_MSG_TYPES = (MIDI_NOTE_TYPE, MIDI_CC_TYPE, MIDI_PB_TYPE)
MIDI_NOTE_ON_STATUS = 144
MIDI_NOTE_OFF_STATUS = 128
MIDI_CC_STATUS = 176
MIDI_PB_STATUS = 224
INC_DEC = [-1, 1]
class ModNumModeComponent(ModeSelectorComponent):
__module__ = __name__
__doc__ = ' Special Class that selects mode 0 if a mode button thats active is pressed'
def __init__(self, script, callback, *a, **k):
super(ModNumModeComponent, self).__init__(*a, **k)
self._script = script
self.update = callback
self._modes_buttons = []
self._set_protected_mode_index(0)
self._last_mode = 0
def set_mode_buttons(self, buttons):
for button in self._modes_buttons:
button.remove_value_listener(self._mode_value)
self._modes_buttons = []
if (buttons != None):
for button in buttons:
assert isinstance(button, ButtonElement or MonoButtonElement)
identify_sender = True
button.add_value_listener(self._mode_value, identify_sender)
self._modes_buttons.append(button)
def number_of_modes(self):
return 6
def set_mode(self, mode):
assert isinstance(mode, int)
assert (mode in range(self.number_of_modes()))
if (self._mode_index != mode):
self._mode_index = mode
self.update()
class OctaveModeComponent(ModeSelectorComponent):
def __init__(self, script, *a, **k):
super(OctaveModeComponent, self).__init__(*a, **k)
self._script = script
self._set_protected_mode_index(3)
def set_mode_buttons(self, buttons):
for button in self._modes_buttons:
button.remove_value_listener(self._mode_value)
self._modes_buttons = []
if (buttons != None):
for button in buttons:
assert isinstance(button, ButtonElement or MonoButtonElement)
identify_sender = True
button.add_value_listener(self._mode_value, identify_sender)
self._modes_buttons.append(button)
if (self._mode_index < 6):
self._modes_buttons[1].turn_on()
else:
self._modes_buttons[1].turn_off()
if (self._mode_index > 0):
self._modes_buttons[0].turn_on()
else:
self._modes_buttons[0].turn_off()
def set_mode(self, mode):
assert isinstance(mode, int)
mode = max(min(self._mode_index + INC_DEC[mode], 7), 0)
if (self._mode_index != mode):
self._mode_index = mode
self.update()
def set_mode_toggle(self, button):
assert ((button == None) or isinstance(button, ButtonElement or MonoButtonElement))
if (self._mode_toggle != None):
self._mode_toggle.remove_value_listener(self._toggle_value)
self._mode_toggle = button
if (self._mode_toggle != None):
self._mode_toggle.add_value_listener(self._toggle_value)
def number_of_modes(self):
return 7
def update(self):
if(self.is_enabled() is True):
for column in range(8):
for row in range(3):
self._script._grid[column][row + 4].set_identifier(int(PAGE1_KEYS_MAP[column][row]) + int(PAGE1_MODES_MAP[self._script._scale_mode._mode_index][column]) + int(self._script._octave_mode._mode_index * 12))
if (self._mode_index < 6):
self._modes_buttons[0].turn_on()
else:
self._modes_buttons[0].turn_off()
if (self._mode_index > 0):
self._modes_buttons[1].turn_on()
else:
self._modes_buttons[1].turn_off()
class ShiftModeComponent(ModeSelectorComponent):
__module__ = __name__
__doc__ = ' Special Class that uses two shift buttons and is lockable '
def __init__(self, script, *a, **k):
super(ShiftModeComponent, self).__init__(*a, **k)
self._script = script
self._mode_toggle1 = None
self._mode_toggle2 = None
self._mode_toggle3 = None
self._set_protected_mode_index(0)
self._last_mode = 0
def set_mode_toggle(self, button1, button2, button3):
assert ((button1 == None) or isinstance(button1, ButtonElement or MonoButtonElement))
if (self._mode_toggle1 != None):
self._mode_toggle1.remove_value_listener(self._toggle_value_left)
self._mode_toggle1 = button1
if (self._mode_toggle1 != None):
self._mode_toggle1.add_value_listener(self._toggle_value_left)
assert ((button2 == None) or isinstance(button2, ButtonElement or MonoButtonElement))
if (self._mode_toggle2 != None):
self._mode_toggle2.remove_value_listener(self._toggle_value_right)
self._mode_toggle2 = button2
if (self._mode_toggle2 != None):
self._mode_toggle2.add_value_listener(self._toggle_value_right)
assert ((button3 == None) or isinstance(button3, ButtonElement or MonoButtonElement))
if (self._mode_toggle3 != None):
self._mode_toggle3.remove_value_listener(self._toggle_value_mod)
self._mode_toggle3 = button3
if (self._mode_toggle3 != None):
self._mode_toggle3.add_value_listener(self._toggle_value_mod)
self._script.request_rebuild_midi_map()
def _toggle_value_left(self, value):
if(value>0):
self._toggle_value(1)
def _toggle_value_right(self, value):
if(value>0):
self._toggle_value(2)
def _toggle_value_mod(self, value):
if(value>0):
self._toggle_value(3)
def _toggle_value(self, value):
assert (self._mode_toggle1 != None)
assert (self._mode_toggle2 != None)
assert (self._mode_toggle3 != None)
assert isinstance(value, int)
if(value is self._mode_index):
if value is 3:
self.set_mode(self._last_mode)
else:
self.set_mode(0)
else:
self.set_mode(value)
def number_of_modes(self):
return 4
def update(self):
self._script.deassign_matrix()
if(self._mode_index is 0):
self._mode_toggle1.turn_off()
self._mode_toggle2.turn_off()
self._mode_toggle3.turn_off()
self._script.schedule_message(1, self._script.assign_page_0)
#self._script.assign_page_0()
elif(self._mode_index is 1):
self._mode_toggle1.turn_on()
self._mode_toggle2.turn_off()
self._mode_toggle3.turn_off()
self._script.schedule_message(1, self._script.assign_page_1)
#self._script.assign_page_1()
elif(self._mode_index is 2):
self._mode_toggle1.turn_off()
self._mode_toggle2.turn_on()
self._mode_toggle3.turn_off()
self._script.schedule_message(1, self._script.assign_page_2)
#self._script.assign_page_2()
elif(self._mode_index is 3):
self._mode_toggle1.turn_off()
self._mode_toggle2.turn_off()
self._mode_toggle3.turn_on()
self._script.schedule_message(1, self._script.assign_mod)
#self._script.assign_mod()
def set_mode(self, mode):
assert isinstance(mode, int)
assert (mode in range(self.number_of_modes()))
if (self._mode_index != mode):
if mode < 3:
self._last_mode = mode
self._mode_index = mode
self.update()
class SpecialMixerComponent(MixerComponent):
' Special mixer class that uses return tracks alongside midi and audio tracks'
__module__ = __name__
def __init__(self, *a, **k):
self._is_locked = False #added
super(SpecialMixerComponent, self).__init__(*a, **k)
def on_selected_track_changed(self):
selected_track = self.song().view.selected_track
if selected_track != None:
if (self._selected_strip != None):
if self._is_locked == False: #added
self._selected_strip.set_track(selected_track)
if self.is_enabled():
if (self._next_track_button != None):
if (selected_track != self.song().master_track):
self._next_track_button.turn_on()
else:
self._next_track_button.turn_off()
if (self._prev_track_button != None):
if (selected_track != self.song().tracks[0]):
self._prev_track_button.turn_on()
else:
self._prev_track_button.turn_off()
def tracks_to_use(self):
return tuple(self.song().visible_tracks) + tuple(self.song().return_tracks)
class ScaleModeComponent(ModeSelectorComponent):
def __init__(self, script, *a, **k):
super(ScaleModeComponent, self).__init__(*a, **k)
self._script = script
self._set_protected_mode_index(0)
def set_mode_buttons(self, buttons):
for button in self._modes_buttons:
button.remove_value_listener(self._mode_value)
self._modes_buttons = []
if (buttons != None):
for button in buttons:
assert isinstance(button, ButtonElement or MonoButtonElement)
identify_sender = True
button.add_value_listener(self._mode_value, identify_sender)
self._modes_buttons.append(button)
for index in range(len(self._modes_buttons)):
if (index == self._mode_index):
self._modes_buttons[index].turn_on()
else:
self._modes_buttons[index].turn_off()
def set_mode_toggle(self, button):
assert ((button == None) or isinstance(button, ButtonElement or MonoButtonElement))
if (self._mode_toggle != None):
self._mode_toggle.remove_value_listener(self._toggle_value)
self._mode_toggle = button
if (self._mode_toggle != None):
self._mode_toggle.add_value_listener(self._toggle_value)
def number_of_modes(self):
return 8
def update(self):
if(self.is_enabled() is True):
for column in range(8):
for row in range(3):
self._script._grid[column][row + 4].set_identifier(int(PAGE1_KEYS_MAP[column][row]) + int(PAGE1_MODES_MAP[self._script._scale_mode._mode_index][column]) + int(self._script._octave_mode._mode_index * 12))
for index in range(len(self._modes_buttons)):
if (index == self._mode_index):
self._modes_buttons[index].turn_on()
else:
self._modes_buttons[index].turn_off()
class OhmModesMonoClient(MonoClient):
def __init__(self, *a, **k):
super(OhmModesMonoClient, self).__init__(*a, **k)
self._raw = False
def _banner(self):
pass
def disconnect_client(self, *a, **k):
super(CntrlrMonoClient, self).disconnect_client(*a, **k)
if not self._mod_dial == None:
if self._mod_dial._parameter is self._mod_dial_parameter:
self._mod_dial.release_parameter()
"""initiation methods"""
def _create_grid(self):
self._grid = [None for index in range(4)]
for column in range(4):
self._grid[column] = [None for index in range(4)]
for row in range(4):
self._grid[column][row] = 0
def _create_keys(self):
self._key = [None for index in range(32)]
for index in range(32):
self._key[index] = 0
def _create_wheels(self):
self._wheel = [[] for index in range(4)]
for column in range(4):
self._wheel[column] = [[] for index in range(3)]
for row in range(3):
self._wheel[column][row] = {'log': 0, 'value': 0, 'mode':0, 'white': 0, 'green': 0, 'custom':'00000000', 'pn':' ', 'pv': '0'}
def _create_knobs(self):
self._knob = [None for index in range(24)]
for index in range(24):
self._knob[index] = 0
def _send_knob(self, index, value):
self._send('knob', index, value)
def _send_key(self, index, value):
self._send('key', index, value)
if self._raw is True:
control = self._host._host._keys[index]
if control != None:
self._send('raw', control._msg_type + control._original_channel, control._original_identifier, value)
def _send_grid(self, column, row, value):
self._send('grid', column, row, value)
if self._raw is True:
control = self._host._host._grid.get_button(column, row)
if control != None:
self._send('raw', control._msg_type + control._original_channel, control._original_identifier, value)
#self._host.log_message('client ' + str(self._number) + ' received')
def _send_dial(self, column, row, value):
self._send('dial', column, row, value)
if self._raw is True:
control = self._host._host._dial_matrix.get_dial(column, row)
if control != None:
self._send('raw', control._msg_type + control._original_channel, control._original_identifier, value)
def _send_dial_button(self, column, row, value):
if row > 0:
self._send('dial_button', column, row-1, value)
if self._raw is True:
control = self._host._host._dial_button_matrix.get_button(column, row)
if control != None:
self._send('raw', control._msg_type + control._original_channel, control._original_identifier, value)
def receive_wheel(self, number, parameter, value):
column = number%4
row = int(number/4)
#if row > 0:
self._wheel[column][row][parameter] = value
if self.is_active():
if parameter == 'pn' or parameter == 'pv':
for host in self._active_host:
#host._script.log_message(str(column) + str(row) + str(self._wheel[column][row][parameter]))
host._send_to_lcd(column, row, self._wheel[column][row])
if parameter!='white':
for host in self._active_host:
host._send_wheel(column, row, self._wheel[column][row])
elif row > 0:
for host in self._active_host:
host._send_wheel(column, row, self._wheel[column][row])
#elif (column==self._number) and (parameter=='value'):
# self._wheel[column][row][parameter] = value
"""raw data integration"""
def set_raw_enabled(self, value):
self._raw = value > 0
#self._host.log_message('raw enabled' + str(self._raw))
if(self._raw is True):
self._update_controls_dictionary()
def receive_raw(self, Type, Identifier, value):
#self._host.log_message('recieve raw' + str(Type) + str(Identifier) + str(value))
if self._controls[Type]:
if Identifier in self._controls[Type]:
self._controls[Type][Identifier](value)
def _update_controls_dictionary(self):
if self._host._host != None:
self._controls = [{}, {}]
if self._control_defs['grid'] != None:
for column in range(self._control_defs['grid'].width()):
for row in range(self._control_defs['grid'].height()):
button = self._control_defs['grid'].get_button(column, row)
if button != None:
self._controls[0][button._original_identifier]=self._make_grid_call(column, row)
if self._control_defs['keys'] != None:
for index in range(len(self._control_defs['keys'])):
key = self._control_defs['keys'][index]
if key != None:
self._controls[0][key._original_identifier]=self._make_key_call(index)
if self._control_defs['dials'] != None:
for index in range(12):
column = index%4
row = int(index/4)
dial = self._control_defs['dials'].get_dial(column, row)
if dial != None:
self._controls[1][dial._original_identifier]=self._make_dial_call(index)
if self._control_defs['buttons'] != None:
for index in range(8):
column = index%4
row = int(index/4)+1
button = self._control_defs['buttons'].get_button(column, row)
if button != None:
self._controls[0][button._original_identifier]=self._make_dial_button_call(index+4)
def _make_grid_call(self, column, row):
def recieve_grid(value):
#self._host.log_message('receive grid' + str(value) + str(column) + str(row))
self.receive_grid(column, row, value)
return recieve_grid
def _make_key_call(self, number):
def receive_key(value):
#self._host.log_message('receive key' + str(number) + str(value))
self.receive_key(number, value)
return receive_key
def _make_dial_call(self, number):
def receive_wheel(value):
self.receive_wheel(number, 'value', value)
return receive_wheel
def _make_dial_button_call(self, number):
def receive_wheel(value):
self.receive_wheel(number, 'white', value)
return receive_wheel
class OhmModesMonomodComponent(MonomodComponent):
__module__ = __name__
__doc__ = ' Component that encompasses and controls 4 Monomod clients '
def __init__(self, script, *a, **k):
super(OhmModesMonomodComponent, self).__init__(script, *a, **k)
self._host_name = 'Cntrlr'
def disconnect(self):
#self._script.log_message('monomod disconnect')
self.set_allow_update(False) ###added
self._active_client = None
self._set_shift_button(None)
self._set_lock_button(None)
self._set_nav_buttons(None)
self._set_key_buttons(None)
# self._set_dial_matrix(None, None)
self._set_button_matrix(None)
self._client = []
self._script = []
return None
def connect_to_clients(self, monomod):
self._client = monomod._client
self._select_client(0)
#self._active_client._is_active = True
#self._script.log_message('connected to clients')
def _select_client(self, number):
self._active_client = self._client[number]
self._colors = self._color_maps[number]
for client in self._client:
if self in client._active_host:
client._active_host.remove(self)
self._active_client._active_host.append(self)
self._x = self._offsets[number][0]
self._y = self._offsets[number][1]
self._script.set_local_ring_control(self._active_client._local_ring_control)
self._script.schedule_message(5, self._script.set_absolute_mode, self._active_client._absolute_mode)
self._active_client._device_component.set_enabled(self._active_client._device_component._type != None)
#self._active_client.set_channel()
self.update()
def _set_button_matrix(self, grid):
assert isinstance(grid, (ButtonMatrixElement, type(None)))
if grid != self._grid:
if self._grid != None:
self._grid.remove_value_listener(self._matrix_value)
self._grid = grid
if self._grid != None:
self._grid.add_value_listener(self._matrix_value)
for client in self._client:
client._update_controls_dictionary()
self.update()
return None
def _matrix_value(self, value, x, y, is_momentary):
assert (self._grid != None)
assert (value in range(128))
assert isinstance(is_momentary, type(False))
if (self.is_enabled()):
self._active_client._send_grid(x + self._x, y + self._y, value)
def _send_grid(self, column, row, value):
if self.is_enabled() and self._grid != None:
if column in range(self._x, self._x + 4):
if row in range(self._y, self._y + 4):
self._grid.get_button(column - self._x, row - self._y).send_value(int(self._colors[value]))
def _alt_value(self, value):
if self._shift_pressed == 0:
self._alt_pressed = value != 0
self._active_client._send('alt', int(self._alt_pressed))
self.update()
def _update_alt_button(self):
if self._alt_button!=None:
if self._alt_pressed != 0:
self._alt_button.turn_on()
else:
self._alt_button.turn_off()
def _set_key_buttons(self, buttons, *a, **k):
assert (buttons == None) or (isinstance(buttons, tuple))
for key in self._keys:
if key.value_has_listener(self._key_value):
key.remove_value_listener(self._key_value)
self._keys = []
if buttons != None:
assert len(buttons) == 32
for button in buttons:
#assert isinstance(button, MonoButtonElement)
self._keys.append(button)
button.add_value_listener(self._key_value, True)
for client in self._client:
client._update_controls_dictionary()
def _key_value(self, value, sender):
if self.is_enabled():
self._active_client._send_key(self._keys.index(sender), int(value!=0))
def _update_keys(self):
for index in range(32):
self._send_key(index, self._active_client._key[index])
def _send_key(self, index, value):
if self.is_enabled():
#if (self._shift_pressed > 0) or (self._locked > 0):
# self._grid.get_button(index, 7).send_value(int(self._colors[value]))
if self._keys != None and len(self._keys) > index:
self._keys[index].send_value(int(self._colors[value]))
def _set_knobs(self, knobs):
assert (knobs == None) or (isinstance(knobs, tuple))
for knob in self._knobs:
knob.remove_value_listener(self._knob_value)
self._knobs = []
if knobs != None:
assert len(knobs) == 24
for knob in knobs:
assert isinstance(knob, EncoderElement)
self._knobs.append(knob)
knob.add_value_listener(self._knob_value, True)
def _knob_value(self, value, sender):
if self.is_enabled():
self._active_client._send_knob(self._knobs.index(sender), value)
def on_enabled_changed(self):
self._scroll_up_ticks_delay = -1
self._scroll_down_ticks_delay = -1
self._scroll_right_ticks_delay = -1
self._scroll_left_ticks_delay = -1
if self.is_enabled():
self._active_client._device_component.set_enabled(self._active_client._device_component._type!=None)
self._script.set_absolute_mode(self._active_client._absolute_mode)
self._script.set_local_ring_control(self._active_client._local_ring_control)
else:
self._active_client._device_component.set_enabled(False)
self._script.set_absolute_mode(1)
self._script.set_local_ring_control(1)
self.update()
def _set_dial_matrix(self, dial_matrix, button_matrix):
assert isinstance(dial_matrix, (EncoderMatrixElement, type(None)))
if dial_matrix != self._dial_matrix:
if self._dial_matrix != None:
self._dial_matrix.remove_value_listener(self._dial_matrix_value)
self._dial_matrix = dial_matrix
if self._dial_matrix != None:
self._dial_matrix.add_value_listener(self._dial_matrix_value)
assert isinstance(button_matrix, (ButtonMatrixElement, type(None)))
if button_matrix != self._dial_button_matrix:
if self._dial_button_matrix != None:
self._dial_button_matrix.remove_value_listener(self._dial_button_matrix_value)
self._dial_button_matrix = button_matrix
if self._dial_button_matrix != None:
self._dial_button_matrix.add_value_listener(self._dial_button_matrix_value)
for client in self._client:
client._update_controls_dictionary()
self.update()
return None
def _dial_matrix_value(self, value, x, y):
if self.is_enabled() and self._active_client != None:
if self._script._absolute_mode == 0:
value = RELATIVE[int(value == 1)]
self._active_client._send_dial(x, y, value)
def _reset_encoder(self, coord):
self._dial_matrix.get_dial(coord[0], coord[1])._reset_to_center()
def _dial_button_matrix_value(self, value, x, y, force):
if (self.is_enabled()) and (self._active_client != None):
self._active_client._send_dial_button(x, y, value)
def _send_wheel(self, column, row, wheel):
if self.is_enabled() and wheel != None: ##not isinstance(wheel, type(None)):
if column < 4 and row < 3:
dial = self._dial_matrix.get_dial(column, row)
dial._ring_value = int(wheel['value'])
dial._ring_mode = int(wheel['mode'])
dial._ring_green = int(wheel['green']!=0)
dial._ring_log = int(wheel['log'])
#if dial._raw_custom != str(wheel['custom']):
dial._ring_custom = dial._calculate_custom(str(wheel['custom'])) ##comon, really? Everytime??
self._dial_button_matrix.send_value(column, row, wheel['white'])
if(self._script._absolute_mode > 0) and (not self._active_client._device_component.is_enabled()):
dial.send_value(wheel['log'], True)
#elif(self._device.is_enabled()):
# self._dial_matrix.get_dial(column, row).set_value(wheel['value'])
##Need to insert routine for sending to MonoDevice from here, so that parameters can be updated from it.
def _send_to_lcd(self, column, row, wheel):
#self._script.log_message('send lcd ' + str(column) + ' ' + str(row) + ' ' + str(wheel['pn']))
if self.is_enabled() and not self._active_client._device_component.is_enabled():
self._script.notification_to_bridge(str(wheel['pn']), str(wheel['pv']), self._dial_matrix.get_dial(column, row))
def _update_wheel(self):
if self._dial_button_matrix != None:
for column in range(4):
for row in range(3):
self._send_wheel(column, row, self._active_client._wheel[column][row])
if not self._active_client._device_component.is_enabled():
self._send_to_lcd(column, row, self._active_client._wheel[column][row])
#self._script.log_message('dial value update' +str(column) + str(row) + str(self._active_client._wheel[column][row]['value']))
class OhmModes(ControlSurface):
__module__ = __name__
__doc__ = ' OhmModes controller script '
def __init__(self, c_instance):
super(OhmModes, self).__init__(c_instance)
self._version_check = 'b994'
self._host_name = 'Ohm'
self._color_type = 'OhmRGB'
self._hosts = []
self.hosts = []
self._client = [ None for index in range(6) ]
self._active_client = None
self._rgb = 0
self._timer = 0
self._touched = 0
self.flash_status = 1
self._backlight = 127
self._backlight_type = 'static'
self._ohm = 127
self._ohm_type = 'static'
self._pad_translations = PAD_TRANSLATION
self._device_selection_follows_track_selection = FOLLOW
self._keys_octave = 5
self._keys_scale = 0
self._tempo_buttons = None
with self.component_guard():
self._setup_monobridge()
self._setup_controls()
self._setup_m4l_interface()
self._setup_transport_control()
self._setup_mixer_control()
self._setup_session_control()
self._setup_device_control()
self._setup_crossfader()
self._setup_ohmmod()
self._setup_switchboard()
self._setup_modes()
self._assign_page_constants()
self._last_device = None
self.song().view.add_selected_track_listener(self._update_selected_device)
self.show_message('OhmModes Control Surface Loaded')
self._send_midi(tuple(switchxfader))
if FORCE_TYPE is True:
self._rgb = FORCE_COLOR_TYPE
else:
self.schedule_message(10, self.query_ohm, None)
self.log_message('<<<<<<<<<<<<<<<<<<<<<<<<< OhmModes ' + str(self._version_check) + ' log opened >>>>>>>>>>>>>>>>>>>>>>>>>')
def query_ohm(self):
self._send_midi(tuple(check_model))
def update_display(self):
ControlSurface.update_display(self)
self._timer = (self._timer + 1) % 256
self.flash()
self.strobe()
def _setup_monobridge(self):
self._monobridge = MonoBridgeElement(self)
self._monobridge.name = 'MonoBridge'
def get_device_bank(self):
return self._device._bank_index
def _setup_controls(self):
is_momentary = True
self._fader = [ None for index in range(8) ]
self._dial = [ None for index in range(16) ]
self._button = [ None for index in range(8) ]
self._menu = [ None for index in range(6) ]
for index in range(8):
self._fader[index] = MonoEncoderElement(MIDI_CC_TYPE, CHANNEL, OHM_FADERS[index], Live.MidiMap.MapMode.absolute, 'Fader_' + str(index), index, self)
for index in range(8):
self._button[index] = MonoButtonElement(is_momentary, MIDI_NOTE_TYPE, CHANNEL, OHM_BUTTONS[index], 'Button_' + str(index), self)
for index in range(16):
self._dial[index] = CodecEncoderElement(MIDI_CC_TYPE, CHANNEL, OHM_DIALS[index], Live.MidiMap.MapMode.absolute, 'Encoder_' + str(index), index, self)
self._knobs = []
for index in range(12):
self._knobs.append(self._dial[index])
for index in range(6):
self._menu[index] = MonoButtonElement(is_momentary, MIDI_NOTE_TYPE, CHANNEL, OHM_MENU[index], 'Menu_' + str(index), self)
self._crossfader = EncoderElement(MIDI_CC_TYPE, CHANNEL, CROSSFADER, Live.MidiMap.MapMode.absolute)
self._crossfader.name = 'Crossfader'
self._livid = MonoButtonElement(is_momentary, MIDI_NOTE_TYPE, CHANNEL, LIVID, 'Livid_Button', self)
self._shift_l = MonoButtonElement(is_momentary, MIDI_NOTE_TYPE, CHANNEL, SHIFT_L, 'Page_Button_Left', self)
self._shift_r = MonoButtonElement(is_momentary, MIDI_NOTE_TYPE, CHANNEL, SHIFT_R, 'Page_Button_Right', self)
self._matrix = ButtonMatrixElement()
self._matrix.name = 'Matrix'
self._grid = [ None for index in range(8) ]
self._monomod = ButtonMatrixElement()
self._monomod.name = 'Monomod'
for column in range(8):
self._grid[column] = [ None for index in range(8) ]
for row in range(8):
self._grid[column][row] = MonoButtonElement(is_momentary, MIDI_NOTE_TYPE, CHANNEL, column * 8 + row, 'Grid_' + str(column) + '_' + str(row), self)
for row in range(5):
button_row = []
for column in range(7):
button_row.append(self._grid[column][row])
self._matrix.add_row(tuple(button_row))
for row in range(8):
button_row = []
for column in range(8):
button_row.append(self._grid[column][row])
self._monomod.add_row(tuple(button_row))
self._mod_matrix = ButtonMatrixElement()
self._mod_matrix.name = 'Matrix'
self._dial_matrix = EncoderMatrixElement(self)
self._dial_matrix.name = 'Dial_Matrix'
self._dial_button_matrix = ButtonMatrixElement()
self._dial_button_matrix.name = 'Dial_Button_Matrix'
for row in range(4):
button_row = []
for column in range(4):
button_row.append(self._grid[column + 4][row])
self._mod_matrix.add_row(tuple(button_row))
for row in range(3):
dial_row = []
for column in range(4):
dial_row.append(self._dial[row * 4 + column])
self._dial_matrix.add_row(tuple(dial_row))
for row in range(3):
dial_button_row = []
for column in range(4):
dial_button_row.append(self._grid[column][row])
self._dial_button_matrix.add_row(tuple(dial_button_row))
self._key = [ self._grid[KEYS[index][1]][KEYS[index][0]] for index in range(32) ]
self._encoder = [ self._dial[index] for index in range(12) ]
self._key_matrix = ButtonMatrixElement()
button_row = []
for column in range(16):
button_row.append(self._key[16 + column])
self._key_matrix.add_row(tuple(button_row))
def _setup_m4l_interface(self):
self._m4l_interface = M4LInterfaceComponent(controls=self.controls, component_guard=self.component_guard)
self.get_control_names = self._m4l_interface.get_control_names
self.get_control = self._m4l_interface.get_control
self.grab_control = self._m4l_interface.grab_control
self.release_control = self._m4l_interface.release_control
def _setup_ohmmod(self):
self._host = OhmModesMonomodComponent(self)
self._host.name = 'Monomod_Host'
self.hosts = [self._host]
self._hosts = [self._host]
for index in range(6):
self._client[index] = OhmModesMonoClient(self, index)
self._client[index].name = 'Client_' + str(index)
self._client[index]._device_component = MonoDeviceComponent(self._client[index], MOD_BANK_DICT, MOD_TYPES)
#self._client[index]._device_component.set_parameter_controls(tuple([ self._dial[num] for num in range(12) ]))
self._client[index]._control_defs = {'dials': self._dial_matrix,
'buttons': self._dial_button_matrix,
'grid': self._mod_matrix,
'keys': self._key,
'knobs': [ self._dial[num + 12] for num in range(4) ]}
self._host._set_parameter_controls(self._dial)
self._host._active_client = self._client[0]
self._host._active_client._is_active = True
self._host.connect_to_clients(self)
def _setup_switchboard(self):
self._switchboard = SwitchboardElement(self, self._client)
self._switchboard.name = 'Switchboard'
def _setup_modes(self):
self._shift_mode = ShiftModeComponent(self)
self._shift_mode.name = 'Shift_Mode'
self._shift_mode.set_mode_toggle(self._shift_l, self._shift_r, self._livid)
self._scale_mode = ScaleModeComponent(self)
self._scale_mode.name = 'Scale_Mode'
self._octave_mode = OctaveModeComponent(self)
self._octave_mode.name = 'Octave_Mode'
self._modNum = ModNumModeComponent(self, self.modNum_update)
self._modNum.name = 'Mod_Number'
self._modNum.set_mode = self._modNum_set_mode(self._modNum)
self._modNum.set_mode_buttons([ self._menu[index] for index in range(6) ])
def _modNum_set_mode(self, modNum):
def set_mode(mode):
if modNum._is_enabled == True:
assert isinstance(mode, int)
assert (mode in range(modNum.number_of_modes()))
if (modNum._mode_index != mode):
modNum._mode_index = mode
modNum.update()
return set_mode
def _setup_transport_control(self):
self._transport = TransportComponent()
self._transport.name = 'Transport'
def _setup_mixer_control(self):
global mixer
is_momentary = True
self._num_tracks = 7
mixer = SpecialMixerComponent(7, 0, True, False)
mixer.name = 'Mixer'
self._mixer = mixer
for index in range(7):
mixer.channel_strip(index).set_volume_control(self._fader[index])
for index in range(7):
mixer.channel_strip(index).name = 'Mixer_ChannelStrip_' + str(index)
mixer.track_eq(index).name = 'Mixer_EQ_' + str(index)
mixer.channel_strip(index)._invert_mute_feedback = True
self.song().view.selected_track = mixer.channel_strip(0)._track
def _setup_session_control(self):
global session
is_momentary = True
num_tracks = 7
num_scenes = 5
session = SessionComponent(num_tracks, num_scenes)
session.name = 'Session'
self._session = session
session.set_offsets(0, 0)
self._scene = [ None for index in range(6) ]
for row in range(num_scenes):
self._scene[row] = session.scene(row)
self._scene[row].name = 'Scene_' + str(row)
for column in range(num_tracks):
clip_slot = self._scene[row].clip_slot(column)
clip_slot.name = str(column) + '_Clip_Slot_' + str(row)
session.set_mixer(self._mixer)
session.set_show_highlight(True)
self._session_zoom = SessionZoomingComponent(session)
self._session_zoom.name = 'Session_Overview'
self.set_highlighting_session_component(self._session)
def _assign_session_colors(self):
self.log_message('assign session colors')
num_tracks = 7
num_scenes = 5
self._session.set_stop_clip_value(STOP_CLIP_COLOR[self._rgb])
for row in range(num_scenes):
for column in range(num_tracks):
self._scene[row].clip_slot(column).set_triggered_to_play_value(CLIP_TRIGD_TO_PLAY_COLOR[self._rgb])
self._scene[row].clip_slot(column).set_triggered_to_record_value(CLIP_TRIGD_TO_RECORD_COLOR[self._rgb])
self._scene[row].clip_slot(column).set_stopped_value(CLIP_STOPPED_COLOR[self._rgb])
self._scene[row].clip_slot(column).set_started_value(CLIP_STARTED_COLOR[self._rgb])
self._scene[row].clip_slot(column).set_recording_value(CLIP_RECORDING_COLOR[self._rgb])
self._session_zoom.set_stopped_value(ZOOM_STOPPED_COLOR[self._rgb])
self._session_zoom.set_playing_value(ZOOM_PLAYING_COLOR[self._rgb])
self._session_zoom.set_selected_value(ZOOM_SELECTED_COLOR[self._rgb])
for row in range(8):
for column in range(8):
self._grid[column][row].set_force_next_value()
self._session.on_scene_list_changed()
self._shift_mode.update()
def _setup_device_control(self):
self._device = DeviceComponent()
self._device.name = 'Device_Component'
self.set_device_component(self._device)
self._device_navigator = DetailViewControllerComponent()
self._device_navigator.name = 'Device_Navigator'
self._device_selection_follows_track_selection = FOLLOW
def device_follows_track(self, val):
self._device_selection_follows_track_selection = val == 1
return self
def _setup_crossfader(self):
self._mixer.set_crossfader_control(self._crossfader)
def disconnect(self):
"""clean things up on disconnect"""
self.song().view.remove_selected_track_listener(self._update_selected_device)
self.log_message(time.strftime('%d.%m.%Y %H:%M:%S', time.localtime()) + '--------------= OhmModes log closed =--------------')
super(OhmModes, self).disconnect()
rebuild_sys()
def _get_num_tracks(self):
return self.num_tracks
def flash(self):
if(self.flash_status > 0):
for control in self.controls:
if isinstance(control, MonoButtonElement):
control.flash(self._timer)
def strobe(self):
if self._backlight_type != 'static':
if self._backlight_type is 'pulse':
self._backlight = int(math.fabs(self._timer * 16 % 64 - 32) + 32)
if self._backlight_type is 'up':
self._backlight = int(self._timer * 8 % 64 + 16)
if self._backlight_type is 'down':
self._backlight = int(math.fabs(int(self._timer * 8 % 64 - 64)) + 16)
self._send_midi(tuple([176, 27, int(self._backlight)]))
if self._ohm_type != 'static':
if self._ohm_type is 'pulse':
self._ohm = int(math.fabs(self._timer * 16 % 64 - 32) + 32)
if self._ohm_type is 'up':
self._ohm = int(self._timer * 8 % 64 + 16)
if self._ohm_type is 'down':
self._ohm = int(math.fabs(int(self._timer * 8 % 64 - 64)) + 16)
self._send_midi(tuple([176, 63, int(self._ohm)]))
self._send_midi(tuple([176, 31, int(self._ohm)]))
def deassign_matrix(self):
with self.component_guard():
self._host._set_knobs(None)
self._host._set_button_matrix(None)
self._host.set_enabled(False)
self._modNum.set_enabled(False)
self.assign_alternate_mappings(0)
self._scale_mode.set_mode_buttons(None)
self._scale_mode.set_enabled(False)
self._octave_mode.set_mode_buttons(None)
self._octave_mode.set_enabled(False)
self._session_zoom.set_enabled(False)
self._session_zoom.set_nav_buttons(None, None, None, None)
self._session.set_track_bank_buttons(None, None)
self._session.set_scene_bank_buttons(None, None)
self._transport.set_enabled(False)
for column in range(4):
self._mixer.track_eq(column)._gain_controls = None
self._mixer.track_eq(column).set_enabled(False)
for column in range(7):
self._mixer.channel_strip(column).set_crossfade_toggle(None)
self._mixer.channel_strip(column).set_mute_button(None)
self._mixer.channel_strip(column).set_solo_button(None)
self._mixer.channel_strip(column).set_arm_button(None)
self._mixer.channel_strip(column).set_send_controls(None)
self._mixer.channel_strip(column).set_pan_control(None)
self._mixer.track_eq(column).set_enabled(False)
for row in range(5):
self._scene[row].clip_slot(column).set_launch_button(None)
"""for column in range(8):
self._button[column]._on_value = SELECT_COLOR[self._rgb]
for row in range(8):
self._grid[column][row].set_enabled(True)
self._grid[column][row].release_parameter()
self._grid[column][row].use_default_message()
self._grid[column][row].set_on_off_values(127, 0)
self._grid[column][row].send_value(0, True)"""
for column in range(8):
self._button[column]._on_value = SELECT_COLOR[self._rgb]
for row in range(8):
#self._grid[column][row].set_channel(0)
self._grid[column][row].release_parameter()
self._grid[column][row].use_default_message()
self._grid[column][row].set_enabled(True)
self._grid[column][row].send_value(0, True)
self._grid[column][row]._on_value = 127
self._grid[column][row]._off_value = 0
self._grid[column][row].force_next_send()
for index in range(6):
self._menu[index]._on_value = 127
self._menu[index]._off_value = 0
for index in range(16):
self._dial[index].use_default_message()
self._dial[index].release_parameter()
self._device.set_parameter_controls(None)
self._device.set_enabled(False)
self._device_navigator.set_enabled(False)
self._mixer.update()
self._matrix.reset()
self.request_rebuild_midi_map()
def _assign_page_constants(self):
with self.component_guard():
self._session_zoom.set_zoom_button(self._grid[7][7])
self._session_zoom.set_button_matrix(self._matrix)
for column in range(7):
self._mixer.channel_strip(column).set_select_button(self._button[column])
self._mixer.channel_strip(column).set_volume_control(self._fader[column])
self._mixer.master_strip().set_volume_control(self._fader[7])
self._mixer.master_strip().set_select_button(self._button[7])
self._mixer.set_prehear_volume_control(self._dial[15])
self._transport.set_play_button(self._menu[0])
self._menu[0].send_value(PLAY_COLOR[self._rgb], True)
self._menu[0]._on_value = PLAY_COLOR[self._rgb]
self._transport.set_stop_button(self._menu[1])
self._menu[1]._off_value = STOP_COLOR[self._rgb]
self._menu[1]._on_value = STOP_COLOR[self._rgb]
self._menu[1].send_value(STOP_COLOR[self._rgb], True)
self._device_navigator.set_device_nav_buttons(self._menu[3], self._menu[4])
def assign_page_0(self):
with self.component_guard():
self._backlight_type = 'static'
self._session_zoom.set_enabled(True)
for column in range(7):
self._grid[column][5]._on_value = MUTE_COLOR[self._rgb]
self._mixer.channel_strip(column).set_mute_button(self._grid[column][5])
self._grid[column][6]._on_value = SOLO_COLOR[self._rgb]
self._mixer.channel_strip(column).set_solo_button(self._grid[column][6])
self._grid[column][7]._on_value = ARM_COLOR[self._rgb]
self._mixer.channel_strip(column).set_arm_button(self._grid[column][7])
self._mixer.channel_strip(column).set_pan_control(self._dial[column + 8])
for row in range(5):
self._scene[row].clip_slot(column).set_launch_button(self._grid[column][row])
for column in range(4):
self._mixer.channel_strip(column).set_send_controls(tuple([self._dial[column], self._dial[column + 4]]))
for index in range(5):
self._grid[7][index]._off_value = SCENE_LAUNCH_COLOR[self._rgb]
self._scene[index].set_launch_button(self._grid[7][index])
self._grid[7][index].set_force_next_value()
self._grid[7][index].turn_off()
for index in range(4):
self._menu[2 + index]._on_value = NAV_BUTTON_COLOR[self._rgb]
self._session.set_track_bank_buttons(self._menu[4], self._menu[3])
self._session.set_scene_bank_buttons(self._menu[5], self._menu[2])
self._menu[0]._on_value = PLAY_COLOR[self._rgb]
self._menu[1]._off_value = STOP_COLOR[self._rgb]
self._menu[1]._on_value = STOP_COLOR[self._rgb]
self._transport.set_enabled(True)
#self._mixer.update_all()
self.request_rebuild_midi_map()
#self.log_message('assign_page_0')
def assign_page_1(self):
with self.component_guard():
self._backlight_type = 'pulse'
self._session_zoom.set_enabled(False)
for column in range(4):
for row in range(4):
self._grid[column][row].send_value(DRUM_COLOR[self._rgb], True)
self._grid[column + 4][row].send_value(BASS_COLOR[self._rgb], True)
self._grid[column][row].set_enabled(False)
self._grid[column][row]._msg_channel = PAGE1_DRUM_CHANNEL
self._grid[column][row].set_identifier(PAGE1_DRUM_MAP[column][row])
self._grid[column + 4][row].set_enabled(False)
self._grid[column + 4][row]._msg_channel = PAGE1_BASS_CHANNEL
self._grid[column + 4][row].set_identifier(PAGE1_BASS_MAP[column][row])
scale_mode_buttons = []
for column in range(8):
for row in range(3):
self._grid[column][row + 4].set_enabled(False)
self._grid[column][row + 4].send_value(KEYS_COLOR[self._rgb], True)
self._grid[column][row + 4]._msg_channel = PAGE1_KEYS_CHANNEL
self._grid[column][row + 4].set_identifier(int(PAGE1_KEYS_MAP[column][row]) + int(PAGE1_MODES_MAP[self._scale_mode._mode_index][column]) + int(self._octave_mode._mode_index * 12))
for row in range(1):
scale_mode_buttons.append(self._grid[column][7])
self._scale_mode.set_mode_buttons(tuple(scale_mode_buttons))
self._scale_mode.set_enabled(True)
self._octave_mode.set_mode_buttons(tuple([self._menu[5], self._menu[2]]))
self._octave_mode.set_enabled(True)
for column in range(7):
self._mixer.channel_strip(column).set_send_controls(tuple([self._dial[column + 8]]))
self._mixer.channel_strip(column).set_arm_button(self._button[column])
self._device.set_enabled(True)
device_param_controls = []
for index in range(8):
device_param_controls.append(self._dial[index])
self._device.set_parameter_controls(tuple(device_param_controls))
self._menu[0]._on_value = PLAY_COLOR[self._rgb]
for index in range(4):
self._menu[2 + index]._on_value = DEVICE_NAV_COLOR[self._rgb]
self._device_navigator.set_enabled(True)
self._menu[0]._on_value = PLAY_COLOR[self._rgb]
self._menu[1]._off_value = STOP_COLOR[self._rgb]
self._menu[1]._on_value = STOP_COLOR[self._rgb]
self._transport.set_enabled(True)
self.request_rebuild_midi_map()
def assign_page_2(self):
with self.component_guard():
self._backlight_type = 'up'
self._session_zoom.set_enabled(True)
for column in range(7):
self._grid[column][5]._on_value = MUTE_COLOR[self._rgb]
self._mixer.channel_strip(column).set_mute_button(self._grid[column][5])
self._grid[column][6]._on_value = CROSSFADE_ASSIGN_COLOR[self._rgb]
self._mixer.channel_strip(column).set_crossfade_toggle(self._grid[column][6])
self._grid[column][7]._msg_channel = 2
self._grid[column][7].set_identifier(column)
self._grid[column][7].reset()
self._grid[column][7].set_enabled(False)
self._grid[column][7].send_value(4, True)
for row in range(5):
self._scene[row].clip_slot(column).set_launch_button(self._grid[column][row])
for row in range(5):
self._grid[7][row]._off_value = SCENE_LAUNCH_COLOR[self._rgb]
self._scene[row].set_launch_button(self._grid[7][row])
self._grid[7][row].set_force_next_value()
self._grid[7][row].turn_off()
for column in range(4):
self._mixer.track_eq(column).set_gain_controls(tuple([self._dial[column + 8], self._dial[column + 4], self._dial[column]]))
self._mixer.track_eq(column).set_enabled(True)
for column in range(3):
self._mixer.channel_strip(column + 4).set_pan_control(self._dial[column + 12])
for index in range(4):
self._menu[2 + index]._on_value = NAV_BUTTON_COLOR[self._rgb]
self._session.set_track_bank_buttons(self._menu[4], self._menu[3])
self._session.set_scene_bank_buttons(self._menu[5], self._menu[2])
self._set_tempo_buttons([self._grid[7][5], self._grid[7][6]])
self._menu[0]._on_value = PLAY_COLOR[self._rgb]
self._menu[1]._off_value = STOP_COLOR[self._rgb]
self._menu[1]._on_value = STOP_COLOR[self._rgb]
self._transport.set_enabled(True)
#self._mixer.update()
self.request_rebuild_midi_map()
def assign_mod(self):
with self.component_guard():
self.deassign_matrix()
self._host.set_enabled(True)
self._modNum.set_enabled(True)
self._host._set_dial_matrix(self._dial_matrix, self._dial_button_matrix)
self._host._set_button_matrix(self._mod_matrix)
self._host._set_key_buttons(tuple(self._key))
if not self._host._active_client.is_connected():
self.assign_alternate_mappings(self._modNum._mode_index + 1)
def modNum_update(self):
if self._modNum._is_enabled == True:
self.assign_alternate_mappings(0)
self._host._select_client(int(self._modNum._mode_index))
self._host.display_active_client()
if not self._host._active_client.is_connected():
self.assign_alternate_mappings(self._modNum._mode_index + 1)
for button in self._modNum._modes_buttons:
if self._modNum._mode_index == self._modNum._modes_buttons.index(button):
button.send_value(1)
else:
button.send_value(self._client[self._modNum._modes_buttons.index(button)]._mod_color)
def assign_alternate_mappings(self, chan):
for column in range(8):
for row in range(8):
self._grid[column][row].set_channel(chan)
for knob in self._encoder:
knob.set_channel(chan)
knob.set_enabled(chan is 0)
self.request_rebuild_midi_map()
def display_mod_colors(self):
pass
def _update_selected_device(self):
if self._device_selection_follows_track_selection is True:
track = self.song().view.selected_track
device_to_select = track.view.selected_device
if device_to_select == None and len(track.devices) > 0:
device_to_select = track.devices[0]
if device_to_select != None:
self.song().view.select_device(device_to_select)
self.set_appointed_device(device_to_select)
self.request_rebuild_midi_map()
def handle_sysex(self, midi_bytes):
#self.log_message('sysex: ' + str(midi_bytes))
if len(midi_bytes) > 10:
if midi_bytes[:11] == tuple([240,
126,
0,
6,
2,
0,
1,
97,
1,
0,
7]):
self.log_message(str('>>>color detected'))
self._rgb = 0
for button in self._button:
button._color_map = COLOR_MAP
for column in self._grid:
for button in column:
button._color_map = COLOR_MAP
elif midi_bytes[:11] == tuple([240,
126,
0,
6,
2,
0,
1,
97,
1,
0,
2]):
self.log_message(str('>>>mono detected'))
self._rgb = 1
for button in self._button:
button._color_map = [127 for index in range(0, 7)]
for column in self._grid:
for button in column:
button._color_map = [127 for index in range(0, 7)]
self._assign_session_colors()
def to_encoder(self, num, val):
rv = int(val * 127)
self._device._parameter_controls[num].receive_value(rv)
p = self._device._parameter_controls[num]._parameter_to_map_to
newval = val * (p.max - p.min) + p.min
p.value = newval
def set_local_ring_control(self, val = 1):
self._local_ring_control = val != 0
def set_absolute_mode(self, val = 1):
self._absolute_mode = val != 0
def send_ring_leds(self):
pass
def _set_tempo_buttons(self, buttons):
if self._tempo_buttons != None:
self._tempo_buttons[0].remove_value_listener(self._tempo_value)
self._tempo_buttons[1].remove_value_listener(self._tempo_value)
self._tempo_buttons = buttons
if buttons != None:
for button in buttons:
assert isinstance(button, MonoButtonElement)
self._tempo_buttons[0].set_on_off_values(4, 0)
self._tempo_buttons[0].add_value_listener(self._tempo_value, True)
self._tempo_buttons[1].set_on_off_values(4, 0)
self._tempo_buttons[1].add_value_listener(self._tempo_value, True)
self._tempo_buttons[0].turn_on()
self._tempo_buttons[1].turn_on()
def _tempo_value(self, value, sender):
if value > 0 and self._tempo_buttons.index(sender) == 0:
self.song().tempo = round(min(self.song().tempo + 1, 999))
elif value > 0 and self._tempo_buttons.index(sender) == 1:
self.song().tempo = round(max(self.song().tempo - 1, 20))
def generate_strip_string(self, display_string):
NUM_CHARS_PER_DISPLAY_STRIP = 12
if not display_string:
return ' ' * NUM_CHARS_PER_DISPLAY_STRIP
if len(display_string.strip()) > NUM_CHARS_PER_DISPLAY_STRIP - 1 and display_string.endswith('dB') and display_string.find('.') != -1:
display_string = display_string[:-2]
if len(display_string) > NUM_CHARS_PER_DISPLAY_STRIP - 1:
for um in [' ',
'i',
'o',
'u',
'e',
'a']:
while len(display_string) > NUM_CHARS_PER_DISPLAY_STRIP - 1 and display_string.rfind(um, 1) != -1:
um_pos = display_string.rfind(um, 1)
display_string = display_string[:um_pos] + display_string[um_pos + 1:]
else:
display_string = display_string.center(NUM_CHARS_PER_DISPLAY_STRIP - 1)
ret = u''
for i in range(NUM_CHARS_PER_DISPLAY_STRIP - 1):
if ord(display_string[i]) > 127 or ord(display_string[i]) < 0:
ret += ' '
else:
ret += display_string[i]
ret += ' '
return ret
def notification_to_bridge(self, name, value, sender):
if isinstance(sender, tuple([MonoButtonElement, CodecEncoderElement])):
self._monobridge._send(sender.name, 'lcd_name', str(self.generate_strip_string(name)))
self._monobridge._send(sender.name, 'lcd_value', str(self.generate_strip_string(value)))
def touched(self):
if self._touched is 0:
self._monobridge._send('touch', 'on')
self.schedule_message(2, self.check_touch)
self._touched += 1
def check_touch(self):
if self._touched > 5:
self._touched = 5
elif self._touched > 0:
self._touched -= 1
if self._touched is 0:
self._monobridge._send('touch', 'off')
else:
self.schedule_message(2, self.check_touch)
def get_clip_names(self):
clip_names = []
for scene in self._session._scenes:
for clip_slot in scene._clip_slots:
if clip_slot.has_clip() is True:
clip_names.append(clip_slot._clip_slot)
return clip_slot._clip_slot
return clip_names
def shift_update(self):
pass
| {
"repo_name": "LividInstruments/LiveRemoteScripts",
"path": "Livid_OhmModes/OhmModes.py",
"copies": "1",
"size": "53531",
"license": "mit",
"hash": -677872570868913900,
"line_mean": 32.6038920276,
"line_max": 209,
"alpha_frac": 0.675440399,
"autogenerated": false,
"ratio": 2.8990522610343894,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40744926600343895,
"avg_score": null,
"num_lines": null
} |
# amounra 0513 : http://www.aumhaa.com
from __future__ import with_statement
import Live
import math
""" _Framework files """
from _Framework.ButtonElement import ButtonElement # Class representing a button a the controller
from _Framework.ButtonMatrixElement import ButtonMatrixElement # Class representing a 2-dimensional set of buttons
from _Framework.ChannelStripComponent import ChannelStripComponent # Class attaching to the mixer of a given track
from _Framework.ClipSlotComponent import ClipSlotComponent # Class representing a ClipSlot within Live
from _Framework.CompoundComponent import CompoundComponent # Base class for classes encompasing other components to form complex components
from _Framework.ControlElement import ControlElement # Base class for all classes representing control elements on a controller
from _Framework.ControlSurface import ControlSurface # Central base class for scripts based on the new Framework
from _Framework.ControlSurfaceComponent import ControlSurfaceComponent # Base class for all classes encapsulating functions in Live
from _Framework.DeviceComponent import DeviceComponent # Class representing a device in Live
from _Framework.EncoderElement import EncoderElement # Class representing a continuous control on the controller
from _Framework.InputControlElement import * # Base class for all classes representing control elements on a controller
from _Framework.MixerComponent import MixerComponent # Class encompassing several channel strips to form a mixer
from _Framework.ModeSelectorComponent import ModeSelectorComponent # Class for switching between modes, handle several functions with few controls
from _Framework.NotifyingControlElement import NotifyingControlElement # Class representing control elements that can send values
from _Framework.SceneComponent import SceneComponent # Class representing a scene in Live
from _Framework.SessionComponent import SessionComponent # Class encompassing several scene to cover a defined section of Live's session
from _Framework.SessionZoomingComponent import DeprecatedSessionZoomingComponent as SessionZoomingComponent # Class using a matrix of buttons to choose blocks of clips in the session
from _Framework.SliderElement import SliderElement # Class representing a slider on the controller
from VCM600.TrackEQComponent import TrackEQComponent # Class representing a track's EQ, it attaches to the last EQ device in the track
from VCM600.TrackFilterComponent import TrackFilterComponent # Class representing a track's filter, attaches to the last filter in the track
from _Framework.TransportComponent import TransportComponent # Class encapsulating all functions in Live's transport section
from _Framework.M4LInterfaceComponent import M4LInterfaceComponent
"""Custom files, overrides, and files from other scripts"""
from _Mono_Framework.MonoButtonElement import MonoButtonElement
from _Mono_Framework.MonoEncoderElement import MonoEncoderElement
from _Mono_Framework.MonoBridgeElement import MonoBridgeElement
from _Mono_Framework.Debug import *
from Map import *
""" Here we define some global variables """
switchxfader = (240, 00, 01, 97, 02, 15, 01, 247)
switchxfaderrgb = (240, 00, 01, 97, 07, 15, 01, 247)
assigncolors = (240, 00, 01, 97, 07, 34, 00, 07, 03, 06, 05, 01, 02, 04, 247)
assign_default_colors = (240, 00, 01, 97, 07, 34, 00, 07, 06, 05, 01, 04, 03, 02, 247)
check_model = (240, 126, 127, 6, 1, 247)
""" Here we add an override to the MixerComponent to include return channels in our mixer """
class AliasMixerComponent(MixerComponent):
def tracks_to_use(self):
return tuple(self.song().visible_tracks) + tuple(self.song().return_tracks)
class Alias(ControlSurface):
__module__ = __name__
__doc__ = " Alias 8 controller script "
def __init__(self, c_instance):
super(Alias, self).__init__(c_instance)
with self.component_guard():
self._host_name = 'Alias'
self._color_type = 'OhmRGB'
self.log_message("--------------= Alias log opened =--------------")
self._rgb = 0
self._timer = 0
self.flash_status = 1
self._clutch_device_selection = False
self._touched = 0
self._update_linked_device_selection = None
self._setup_monobridge()
self._setup_controls()
self._setup_m4l_interface()
self._setup_mixer_control()
self._setup_session_control()
self._setup_mixer_nav()
"""script initialization methods"""
def _setup_monobridge(self):
self._monobridge = MonoBridgeElement(self)
self._monobridge.name = 'MonoBridge'
def _setup_controls(self):
is_momentary = True
self._fader = [MonoEncoderElement(MIDI_CC_TYPE, CHANNEL, ALIAS_FADERS[index], Live.MidiMap.MapMode.absolute, 'Fader_' + str(index), index, self) for index in range(9)]
self._button = [MonoButtonElement(is_momentary, MIDI_NOTE_TYPE, CHANNEL, ALIAS_BUTTONS[index], 'Button_' + str(index), self) for index in range(16)]
self._dial = [MonoEncoderElement(MIDI_CC_TYPE, CHANNEL, ALIAS_DIALS[index], Live.MidiMap.MapMode.absolute, 'Dial_' + str(index), index + 8, self) for index in range(16)]
self._encoder = MonoEncoderElement(MIDI_CC_TYPE, CHANNEL, ALIAS_ENCODER, Live.MidiMap.MapMode.absolute, 'Encoder', 0, self)
def _setup_m4l_interface(self):
self._m4l_interface = M4LInterfaceComponent(controls=self.controls, component_guard=self.component_guard)
self.get_control_names = self._m4l_interface.get_control_names
self.get_control = self._m4l_interface.get_control
self.grab_control = self._m4l_interface.grab_control
self.release_control = self._m4l_interface.release_control
def _setup_mixer_control(self):
is_momentary = True
self._num_tracks = (8) #A mixer is one-dimensional;
self._mixer = AliasMixerComponent(8, 0, False, False)
self._mixer.name = 'Mixer'
self._mixer.set_track_offset(0) #Sets start point for mixer strip (offset from left)
for index in range(8):
self._mixer.channel_strip(index).set_volume_control(self._fader[index])
self._mixer.channel_strip(index).set_send_controls(tuple([self._dial[index], self._dial[index+8]]))
self._mixer.channel_strip(index).set_mute_button(self._button[index])
self._button[index].set_on_off_values(MUTE_TOG, 0)
self._mixer.channel_strip(index)._invert_mute_feedback = True
self._mixer.channel_strip(index).set_arm_button(self._button[index+8])
self._button[index+8].set_on_off_values(REC_TOG, 0)
self._mixer.channel_strip(index).name = 'Mixer_ChannelStrip_' + str(index)
self._mixer.master_strip().set_volume_control(self._fader[8])
self.song().view.selected_track = self._mixer.channel_strip(0)._track
def _setup_session_control(self):
self._session = SessionComponent(8, 1)
self._session.set_mixer(self._mixer)
self.set_highlighting_session_component(self._session)
def _setup_mixer_nav(self):
if not self._encoder.value_has_listener(self._nav_change):
self._encoder.add_value_listener(self._nav_change)
"""shift/zoom methods"""
def _nav_change(self, value):
self._session.set_offsets(int((float(value)/float(127))*max(8, len(self._mixer.tracks_to_use())-8)), self._session._scene_offset)
"""called on timer"""
def update_display(self):
ControlSurface.update_display(self)
self._timer = (self._timer + 1) % 256
self.flash()
def flash(self):
if(self.flash_status > 0):
for control in self.controls:
if isinstance(control, MonoButtonElement):
control.flash(self._timer)
"""m4l bridge"""
def generate_strip_string(self, display_string):
NUM_CHARS_PER_DISPLAY_STRIP = 12
if (not display_string):
return (' ' * NUM_CHARS_PER_DISPLAY_STRIP)
if ((len(display_string.strip()) > (NUM_CHARS_PER_DISPLAY_STRIP - 1)) and (display_string.endswith('dB') and (display_string.find('.') != -1))):
display_string = display_string[:-2]
if (len(display_string) > (NUM_CHARS_PER_DISPLAY_STRIP - 1)):
for um in [' ',
'i',
'o',
'u',
'e',
'a']:
while ((len(display_string) > (NUM_CHARS_PER_DISPLAY_STRIP - 1)) and (display_string.rfind(um, 1) != -1)):
um_pos = display_string.rfind(um, 1)
display_string = (display_string[:um_pos] + display_string[(um_pos + 1):])
else:
display_string = display_string.center((NUM_CHARS_PER_DISPLAY_STRIP - 1))
ret = u''
for i in range((NUM_CHARS_PER_DISPLAY_STRIP - 1)):
if ((ord(display_string[i]) > 127) or (ord(display_string[i]) < 0)):
ret += ' '
else:
ret += display_string[i]
ret += ' '
ret = ret.replace(' ', '_')
assert (len(ret) == NUM_CHARS_PER_DISPLAY_STRIP)
return ret
def notification_to_bridge(self, name, value, sender):
if isinstance(sender, MonoEncoderElement):
self._monobridge._send(sender.name, 'lcd_name', str(self.generate_strip_string(name)))
self._monobridge._send(sender.name, 'lcd_value', str(self.generate_strip_string(value)))
def touched(self):
if self._touched is 0:
self._monobridge._send('touch', 'on')
self.schedule_message(2, self.check_touch)
self._touched +=1
def check_touch(self):
if self._touched > 5:
self._touched = 5
elif self._touched > 0:
self._touched -= 1
if self._touched is 0:
self._monobridge._send('touch', 'off')
else:
self.schedule_message(2, self.check_touch)
"""general functionality"""
def allow_updates(self, allow_updates):
for component in self.components:
component.set_allow_update(int(allow_updates!=0))
def disconnect(self):
if self._encoder.value_has_listener(self._nav_change):
self._encoder.remove_value_listener(self._nav_change)
self.log_message("--------------= Alias log closed =--------------")
super(Alias, self).disconnect()
rebuild_sys()
def handle_sysex(self, midi_bytes):
pass
def device_follows_track(self, val):
self._device_selection_follows_track_selection = (val == 1)
return self
def assign_alternate_mappings(self):
pass
def _get_num_tracks(self):
return self.num_tracks
def _on_device_changed(self, device):
#self.log_message('new device ' + str(type(device)))
if self._update_linked_device_selection != None:
self._update_linked_device_selection(device)
def _on_session_offset_changes(self):
if self._r_function_mode._mode_index in range(0,3):
self._mem[int(self._r_function_mode._mode_index)] = self._session2.track_offset()
def connect_script_instances(self, instanciated_scripts):
pass
# a | {
"repo_name": "LividInstruments/LiveRemoteScripts",
"path": "Livid_Alias8/Alias.py",
"copies": "1",
"size": "10355",
"license": "mit",
"hash": 2902908666247174700,
"line_mean": 38.6781609195,
"line_max": 182,
"alpha_frac": 0.7262192178,
"autogenerated": false,
"ratio": 3.2748260594560406,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45010452772560405,
"avg_score": null,
"num_lines": null
} |
""" A moveable circle shape. """
from traits.api import Float
from enable.primitives.shape import Shape
class Circle(Shape):
""" A moveable circle shape. """
# The radius of the circle.
radius = Float
# 'CoordinateBox' interface.
#---------------------------
def _bounds_changed(self):
""" Static trait change handler. """
w, h = self.bounds
self.radius = min(w, h) / 2.0
# 'Component' interface.
#-----------------------
def is_in(self, x, y):
""" Return True if a point is considered to be 'in' the component. """
return self._distance_between(self.center, (x, y)) <= self.radius
# Protected 'Component' interface.
#---------------------------------
def _draw_mainlayer(self, gc, view_bounds=None, mode='default'):
""" Draw the component. """
with gc:
gc.set_fill_color(self._get_fill_color(self.event_state))
x, y = self.position
gc.arc(x + self.radius, y + self.radius, self.radius, 0,
2*3.14159, False)
gc.fill_path()
# Draw the shape's text.
self._draw_text(gc)
return
# 'Circle' interface.
#--------------------
def _radius_changed(self):
""" Static trait change handler. """
diameter = self.radius * 2
self.bounds = [diameter, diameter]
| {
"repo_name": "tommy-u/enable",
"path": "examples/enable/shapes/circle.py",
"copies": "1",
"size": "1398",
"license": "bsd-3-clause",
"hash": -8108550993690971000,
"line_mean": 26.4117647059,
"line_max": 78,
"alpha_frac": 0.517167382,
"autogenerated": false,
"ratio": 3.9491525423728815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9953683811331089,
"avg_score": 0.002527222608358511,
"num_lines": 51
} |
"""AMPAL objects that represent ligands."""
from ampal.base_ampal import Polymer, Monomer
class LigandGroup(Polymer):
"""A container for `Ligand` `Monomers`.
Parameters
----------
monomers : Monomer or [Monomer], optional
Monomer or list containing Monomer objects to form the Polymer().
polymer_id : str, optional
An ID that the user can use to identify the `Polymer`. This is
used when generating a pdb file using `Polymer().pdb`.
ampal_parent : ampal.Assembly, optional
Reference to `Assembly` containing the `Polymer`.
sl : int, optional
The default smoothing level used when calculating the
backbone primitive.
"""
def __init__(self, monomers=None, polymer_id=' ', ampal_parent=None, sl=2):
super().__init__(
monomers=monomers, polymer_id=polymer_id, molecule_type='ligands',
ampal_parent=ampal_parent, sl=sl)
def __repr__(self):
return '<Ligands chain containing {} {}>'.format(
len(self._monomers),
'Ligand' if len(self._monomers) == 1 else 'Ligands')
@property
def categories(self):
"""Returns the categories of `Ligands` in `LigandGroup`."""
category_dict = {}
for ligand in self:
if ligand.category in category_dict:
category_dict[ligand.category].append(ligand)
else:
category_dict[ligand.category] = [ligand]
return category_dict
@property
def category_count(self):
"""Returns the number of categories in `categories`."""
category_dict = self.categories
count_dict = {category: len(
category_dict[category]) for category in category_dict}
return count_dict
class Ligand(Monomer):
"""`Monomer` that represents a `Ligand`.
Notes
-----
All `Monomers` that do not have dedicated classes are
represented using the `Ligand` class.
Parameters
----------
mol_code : str
PDB molecule code that represents the monomer.
atoms : OrderedDict, optional
OrderedDict containing Atoms for the Monomer. OrderedDict
is used to maintain the order items were added to the
dictionary.
monomer_id : str, optional
String used to identify the residue.
insertion_code : str, optional
Insertion code of monomer, used if reading from pdb.
is_hetero : bool, optional
True if is a hetero atom in pdb. Helps with PDB formatting.
Attributes
----------
atoms : OrderedDict
OrderedDict containing Atoms for the Monomer. OrderedDict
is used to maintain the order items were added to the
dictionary.
mol_code : str
PDB molecule code that represents the `Ligand`.
insertion_code : str
Insertion code of `Ligand`, used if reading from pdb.
is_hetero : bool
True if is a hetero atom in pdb. Helps with PDB formatting.
self.states : dict
Contains an `OrderedDicts` containing atom information for each
state available for the `Ligand`.
id : str
String used to identify the residue.
ampal_parent : Polymer or None
A reference to the `LigandGroup` containing this `Ligand`.
tags : dict
A dictionary containing information about this AMPAL object.
The tags dictionary is used by AMPAL to cache information
about this object, but is also intended to be used by users
to store any relevant information they have.
"""
def __init__(self, mol_code, atoms=None, monomer_id=' ', insertion_code=' ',
is_hetero=False, ampal_parent=None):
super(Ligand, self).__init__(
atoms, monomer_id, ampal_parent=ampal_parent)
self.mol_code = mol_code
self.insertion_code = insertion_code
self.is_hetero = is_hetero
def __repr__(self):
return '<Ligand containing {} {}. Ligand code: {}>'.format(
len(self.atoms), 'Atom' if len(self.atoms) == 1 else 'Atoms',
self.mol_code)
__author__ = "Christopher W. Wood, Kieran L. Hudson"
| {
"repo_name": "woolfson-group/isambard",
"path": "isambard/ampal/ligands.py",
"copies": "1",
"size": "4151",
"license": "mit",
"hash": -4224857303645106000,
"line_mean": 34.7844827586,
"line_max": 80,
"alpha_frac": 0.6270778126,
"autogenerated": false,
"ratio": 3.995187680461983,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00010642826734780757,
"num_lines": 116
} |
"""AMPAL objects that represent protein."""
from collections import OrderedDict
import warnings
import numpy
from ampal.base_ampal import Polymer, Monomer, Atom
from ampal.ligands import Ligand, LigandGroup
from ampal.assembly import Assembly
from ampal.pseudo_atoms import Primitive
from ampal.analyse_protein import (
make_primitive_extrapolate_ends, measure_torsion_angles, residues_per_turn,
polymer_to_reference_axis_distances, crick_angles, alpha_angles,
sequence_molecular_weight, sequence_molar_extinction_280,
sequence_isoelectric_point, measure_sidechain_torsion_angles)
from ampal.interactions import (
generate_covalent_bond_graph, generate_bond_subgraphs_from_break,
find_covalent_bonds)
from external_programs.dssp import (
extract_all_ss_dssp, run_dssp, extract_solvent_accessibility_dssp)
from external_programs.naccess import run_naccess, extract_residue_accessibility
from external_programs.scwrl import pack_sidechains
from settings import global_settings
from tools.amino_acids import (
get_aa_code, get_aa_letter, ideal_backbone_bond_lengths,
ideal_backbone_bond_angles)
from tools.geometry import (
Quaternion, unit_vector, dihedral, find_transformations, distance,
angle_between_vectors)
from tools.isambard_warnings import MalformedPDBWarning
def find_ss_regions_polymer(polymer, ss):
"""Returns an `Assembly` of regions tagged as secondary structure.
Parameters
----------
polymer : Polypeptide
`Polymer` object to be searched secondary structure regions.
ss : list
List of secondary structure tags to be separate i.e. ['H']
would return helices, ['H', 'E'] would return helices
and strands.
Returns
-------
fragments : Assembly
`Assembly` containing a `Polymer` for each region of specified
secondary structure.
"""
if isinstance(ss, str):
ss = [ss[:]]
tag_key = 'secondary_structure'
monomers = [x for x in polymer if tag_key in x.tags.keys()]
if len(monomers) == 0:
return Assembly()
if (len(ss) == 1) and (all([m.tags[tag_key] == ss[0] for m in monomers])):
return Assembly(polymer)
previous_monomer = None
fragment = Polypeptide(ampal_parent=polymer)
fragments = Assembly()
poly_id = 0
for monomer in monomers:
current_monomer = monomer.tags[tag_key]
if (current_monomer == previous_monomer) or (not previous_monomer):
fragment.append(monomer)
else:
if previous_monomer in ss:
fragment.tags[tag_key] = monomer.tags[tag_key]
fragment.id = chr(poly_id + 65)
fragments.append(fragment)
poly_id += 1
fragment = Polypeptide(ampal_parent=polymer)
fragment.append(monomer)
previous_monomer = monomer.tags[tag_key]
return fragments
def flat_list_to_polymer(atom_list, atom_group_s=4):
"""Takes a flat list of atomic coordinates and converts it to a `Polymer`.
Parameters
----------
atom_list : [Atom]
Flat list of coordinates.
atom_group_s : int, optional
Size of atom groups.
Returns
-------
polymer : Polypeptide
`Polymer` object containing atom coords converted `Monomers`.
Raises
------
ValueError
Raised if `atom_group_s` != 4 or 5
"""
atom_labels = ['N', 'CA', 'C', 'O', 'CB']
atom_elements = ['N', 'C', 'C', 'O', 'C']
atoms_coords = [atom_list[x:x + atom_group_s]
for x in range(0, len(atom_list), atom_group_s)]
atoms = [[Atom(x[0], x[1]) for x in zip(y, atom_elements)]
for y in atoms_coords]
if atom_group_s == 5:
monomers = [Residue(OrderedDict(zip(atom_labels, x)), 'ALA')
for x in atoms]
elif atom_group_s == 4:
monomers = [Residue(OrderedDict(zip(atom_labels, x)), 'GLY')
for x in atoms]
else:
raise ValueError(
'Parameter atom_group_s must be 4 or 5 so atoms can be labeled correctly.')
polymer = Polypeptide(monomers=monomers)
return polymer
def flat_list_to_dummy_chain(atom_list, atom_group_s=1):
"""Converts flat list of coordinates into dummy C-alpha carbons
Parameters
----------
atom_list : [Atom]
Flat list of co-ordinates.
atom_group_s : int, optional
Size of atom groups.
Returns
-------
polymer : Polypeptide
`Polymer` object containing atom coord converted `Monomers`
with 'DUM' atom name.
"""
atom_labels = ['CA']
atom_elements = ['C']
atoms_coords = [atom_list[x:x + atom_group_s]
for x in range(0, len(atom_list), atom_group_s)]
atoms = [[Atom(x[0], x[1]) for x in zip(y, atom_elements)]
for y in atoms_coords]
monomers = [Residue(OrderedDict(zip(atom_labels, x)), 'DUM')
for x in atoms]
polymer = Polypeptide(monomers=monomers)
return polymer
def align(target, mobile, target_i=0, mobile_i=0):
"""Aligns one Polypeptide (mobile) to another (target).
Notes
-----
This function directly modifies atoms of the mobile Polypeptide!
It does not return a new object.
Parameters
----------
target : Polypeptide
Polypeptide to be aligned to.
mobile : Polypeptide
Polypeptide to be moved during alignment.
target_i : int, optional
Index of `Residue` in target to align to.
mobile_i : int, optional
Index of `Residue` in mobile to be aligned.
"""
# First, align N->CA vectors.
s1, e1, s2, e2 = [x._vector
for x in [mobile[mobile_i]['N'], mobile[mobile_i]['CA'],
target[target_i]['N'], target[target_i]['CA']]]
translation, angle, axis, point = find_transformations(
s1, e1, s2, e2, radians=False)
# Rotation first, Then translation.
mobile.rotate(angle=angle, axis=axis, point=point, radians=False)
mobile.translate(vector=translation)
# Second, rotate about N->CA axis to align CA->C vectors.
angle = dihedral(mobile[mobile_i]['C'], mobile[mobile_i]
['N'], mobile[mobile_i]['CA'], target[target_i]['C'])
axis = target[target_i]['CA'] - target[target_i]['N']
point = target[target_i]['N']._vector
mobile.rotate(angle=angle, axis=axis, point=point)
return
class Polypeptide(Polymer):
"""Container for `Residues`, inherits from `Polymer`.
Parameters
----------
monomers : Residue or [Residue], optional
`Residue` or list containing `Residue` objects to form the
`Polypeptide`.
polymer_id : str, optional
An ID that the user can use to identify the `Polypeptide`. This is
used when generating a pdb file using `Polypeptide().pdb`.
ampal_parent : ampal.Assembly, optional
Reference to `Assembly` containing the `Polymer`.
sl : int, optional
The default smoothing level used when calculating the
backbone primitive.
Attributes
----------
id : str
`Polypeptide` ID
ampal_parent : ampal.Assembly or None
Reference to `Assembly` containing the `Polypeptide`
molecule_type : str
A description of the type of `Polymer` i.e. Protein, DNA etc.
ligands : ampal.LigandGroup
A `LigandGroup` containing all the `Ligands` associated with this
`Polypeptide` chain.
tags : dict
A dictionary containing information about this AMPAL object.
The tags dictionary is used by AMPAL to cache information
about this object, but is also intended to be used by users
to store any relevant information they have.
sl : int
The default smoothing level used when calculating the
backbone primitive.
Raises
------
TypeError
`Polymer` type objects can only be initialised empty or using
a `Monomer`.
"""
def __init__(self, monomers=None, polymer_id=' ', ampal_parent=None, sl=2):
super().__init__(
monomers=monomers, polymer_id=polymer_id, molecule_type='protein',
ampal_parent=ampal_parent, sl=sl)
def __add__(self, other):
if isinstance(other, Polymer):
merged_polymer = self._monomers + other._monomers
else:
raise TypeError(
'Only Polymer objects may be merged with a Polymer.')
return Polypeptide(monomers=merged_polymer, polymer_id=self.id)
def __getitem__(self, item):
if isinstance(item, str):
id_dict = {str(m.id): m for m in self._monomers}
return id_dict[item]
elif isinstance(item, int):
return self._monomers[item]
else:
return Polypeptide(self._monomers[item], polymer_id=self.id)
def __repr__(self):
if len(self.sequence) > 15:
seq = self.sequence[:12] + '...'
else:
seq = self.sequence
return '<Polypeptide containing {} {}. Sequence: {}>'.format(
len(self._monomers),
'Residue' if len(self._monomers) == 1 else 'Residues', seq)
def get_slice_from_res_id(self, start, end):
"""Returns a new `Polypeptide` containing the `Residues` in start/end range.
Parameters
----------
start : str
string representing start residue id (PDB numbering)
end : str
string representing end residue id (PDB numbering)
Returns
-------
slice_polymer : Polymer
Polymer containing the residue range specified by start-end
"""
id_dict = {str(m.id): m for m in self._monomers}
slice_polymer = Polypeptide(
[id_dict[str(x)] for x in range(int(start), int(end) + 1)], self.id)
return slice_polymer
@property
def backbone(self):
"""Returns a new `Polymer` containing only the backbone atoms.
Notes
-----
Metadata is not currently preserved from the parent object.
Sequence data is retained, but only the main chain atoms are retained.
Returns
-------
bb_poly : Polypeptide
Polymer containing only the backbone atoms of the original
Polymer.
"""
bb_poly = Polypeptide([x.backbone for x in self._monomers], self.id)
return bb_poly
@property
def primitive(self):
"""Primitive of the backbone.
Notes
-----
This is the average of the positions of all the CAs in frames
of `sl` `Residues`.
"""
cas = self.get_reference_coords()
primitive_coords = make_primitive_extrapolate_ends(
cas, smoothing_level=self.sl)
primitive = Primitive.from_coordinates(primitive_coords)
primitive.relabel_monomers([x.id for x in self])
primitive.id = self.id
primitive.ampal_parent = self
return primitive
@property
def helices(self):
"""Returns a new `Assembly` containing only the alpha-helices.
Notes
-----
Metadata is not currently preserved from the parent object.
Returns
-------
hel_assembly : Assembly
`Assembly` containing only the alpha-helices of the
original `Polymer`.
"""
self.tag_secondary_structure()
hel_assembly = find_ss_regions_polymer(self, 'H')
return hel_assembly
@property
def strands(self):
"""Returns a new `Assembly` containing only the beta-strand atoms.
Notes
-----
Metadata is not currently preserved from the parent object.
Returns
-------
strand_assembly : Assembly
`Assembly` containing only the beta-strand atoms of
the original `Polymer`.
"""
self.tag_secondary_structure()
strand_assembly = find_ss_regions_polymer(self, 'E')
return strand_assembly
@property
def fasta(self):
"""Generates sequence data for the protein in FASTA format."""
max_line_length = 79
fasta_str = '>{0}:{1}|PDBID|CHAIN|SEQUENCE\n'.format(
self.ampal_parent.id.upper(), self.id)
seq = self.sequence
split_seq = [seq[i: i + max_line_length]
for i in range(0, len(seq), max_line_length)]
for seq_part in split_seq:
fasta_str += '{0}\n'.format(seq_part)
return fasta_str
def pack_new_sequence(self, sequence):
"""Packs a new sequence onto the polymer using Scwrl4.
Parameters
----------
sequence : str
String containing the amino acid sequence. This must
be the same length as the Polymer
Raises
------
ValueError
Raised if the sequence length does not match the
number of monomers in the Polymer.
"""
# This import is here to prevent a circular import.
from ampal.pdb_parser import convert_pdb_to_ampal
polymer_bb = self.backbone
if len(sequence) != len(polymer_bb):
raise ValueError(
'Sequence length ({}) does not match Polymer length ({}).'.format(
len(sequence), len(polymer_bb)))
scwrl_out = pack_sidechains(self.backbone.pdb, sequence)
if scwrl_out is None:
return
else:
packed_structure, scwrl_score = scwrl_out
new_assembly = convert_pdb_to_ampal(packed_structure, path=False)
self._monomers = new_assembly[0]._monomers[:]
self.tags['scwrl_score'] = scwrl_score
self.assign_force_field(global_settings['buff']['force_field'])
return
def repack(self):
"""Repacks current side chain sequence using Scwrl4."""
self.pack_new_sequence(self.sequence)
return
@property
def sequence(self):
"""Returns the sequence of the `Polymer` as a string.
Returns
-------
sequence : str
String of the `Residue` sequence of the `Polypeptide`.
"""
seq = [x.mol_letter for x in self._monomers]
return ''.join(seq)
@property
def molecular_weight(self):
"""Returns the molecular weight of the `Assembly` in Daltons."""
return sequence_molecular_weight(self.sequence)
@property
def molar_extinction_280(self):
"""Returns the extinction co-efficient of the `Assembly` at 280 nm."""
return sequence_molar_extinction_280(self.sequence)
@property
def isoelectric_point(self):
"""Returns the isoelectric point of the `Assembly`."""
return sequence_isoelectric_point(self.sequence)
@property
def backbone_bond_lengths(self):
"""Dictionary containing backbone bond lengths as lists of floats.
Returns
-------
bond_lengths : dict
Keys are `n_ca`, `ca_c`, `c_o` and `c_n`, referring to the
N-CA, CA-C, C=O and C-N bonds respectively. Values are
lists of floats : the bond lengths in Angstroms.
The lists of n_ca, ca_c and c_o are of length k for
a Polypeptide containing k Residues. The list of c_n bonds
is of length k-1 for a Polypeptide containing k Residues
(C-N formed between successive `Residue` pairs).
"""
bond_lengths = dict(
n_ca=[distance(r['N'], r['CA'])
for r in self.get_monomers(ligands=False)],
ca_c=[distance(r['CA'], r['C'])
for r in self.get_monomers(ligands=False)],
c_o=[distance(r['C'], r['O'])
for r in self.get_monomers(ligands=False)],
c_n=[distance(r1['C'], r2['N']) for r1, r2 in [
(self[i], self[i + 1]) for i in range(len(self) - 1)]],
)
return bond_lengths
@property
def backbone_bond_angles(self):
"""Dictionary containing backbone bond angles as lists of floats.
Returns
-------
bond_angles : dict
Keys are `n_ca_c`, `ca_c_o`, `ca_c_n` and `c_n_ca`, referring
to the N-CA-C, CA-C=O, CA-C-N and C-N-CA angles respectively.
Values are lists of floats : the bond angles in degrees.
The lists of n_ca_c, ca_c_o are of length k for a `Polypeptide`
containing k `Residues`. The list of ca_c_n and c_n_ca are of
length k-1 for a `Polypeptide` containing k `Residues` (These
angles are across the peptide bond, and are therefore formed
between successive `Residue` pairs).
"""
bond_angles = dict(
n_ca_c=[angle_between_vectors(r['N'] - r['CA'], r['C'] - r['CA'])
for r in self.get_monomers(ligands=False)],
ca_c_o=[angle_between_vectors(r['CA'] - r['C'], r['O'] - r['C'])
for r in self.get_monomers(ligands=False)],
ca_c_n=[angle_between_vectors(r1['CA'] - r1['C'], r2['N'] - r1['C'])
for r1, r2 in [(self[i], self[i + 1]) for i in range(len(self) - 1)]],
c_n_ca=[angle_between_vectors(r1['C'] - r2['N'], r2['CA'] - r2['N'])
for r1, r2 in [(self[i], self[i + 1]) for i in range(len(self) - 1)]],
)
return bond_angles
def c_join(self, other, psi=-40.76, omega=-178.25, phi=-65.07,
o_c_n_angle=None, c_n_ca_angle=None, c_n_length=None,
relabel=True):
"""Joins other to self at the C-terminus via a peptide bond.
Notes
-----
This function directly modifies self. It does not return a new object.
Parameters
----------
other: Residue or Polypeptide
psi: float, optional
Psi torsion angle (degrees) between final `Residue` of self
and first `Residue` of other.
omega: float, optional
Omega torsion angle (degrees) between final `Residue` of
self and first `Residue` of other.
phi: float, optional
Phi torsion angle (degrees) between final `Residue` of self
and first `Residue` of other.
o_c_n_angle: float or None, optional
Desired angle between O, C (final `Residue` of self) and N
(first `Residue` of other) atoms. If `None`, default value is
taken from `ideal_backbone_bond_angles`.
c_n_ca_angle: float or None, optional
Desired angle between C (final `Residue` of self) and N, CA
(first `Residue` of other) atoms. If `None`, default value is
taken from `ideal_backbone_bond_angles`.
c_n_length: float or None, optional
Desired peptide bond length between final `Residue` of self
and first `Residue` of other. If `None`, default value is taken
from `ideal_backbone_bond_lengths`.
relabel: bool, optional
If `True`, `relabel_all` is run on self before returning.
Raises
------
TypeError:
If other is not a `Residue` or a Polypeptide.
"""
if isinstance(other, Residue):
other = Polypeptide([other])
if not isinstance(other, Polypeptide):
raise TypeError(
'Only Polypeptide or Residue objects can be joined to a Polypeptide')
if abs(omega) >= 90:
peptide_conformation = 'trans'
else:
peptide_conformation = 'cis'
if o_c_n_angle is None:
o_c_n_angle = ideal_backbone_bond_angles[peptide_conformation]['o_c_n']
if c_n_ca_angle is None:
c_n_ca_angle = ideal_backbone_bond_angles[peptide_conformation]['c_n_ca']
if c_n_length is None:
c_n_length = ideal_backbone_bond_lengths['c_n']
r1 = self[-1]
r1_ca = r1['CA']._vector
r1_c = r1['C']._vector
r1_o = r1['O']._vector
# p1 is point that will be used to position the N atom of r2.
p1 = r1_o[:]
# rotate p1 by o_c_n_angle, about axis perpendicular to the
# r1_ca, r1_c, r1_o plane, passing through r1_c.
axis = numpy.cross((r1_ca - r1_c), (r1_o - r1_c))
q = Quaternion.angle_and_axis(angle=o_c_n_angle, axis=axis)
p1 = q.rotate_vector(v=p1, point=r1_c)
# Ensure p1 is separated from r1_c by the correct distance.
p1 = r1_c + (c_n_length * unit_vector(p1 - r1_c))
# rotate p1 and r1['O'] by to obtain desired psi value at the join.
measured_psi = dihedral(r1['N'], r1['CA'], r1['C'], p1)
q = Quaternion.angle_and_axis(
angle=(psi - measured_psi), axis=(r1_c - r1_ca))
p1 = q.rotate_vector(v=p1, point=r1_c)
r1['O']._vector = q.rotate_vector(v=r1_o, point=r1_c)
# translate other so that its first N atom is at p1
other.translate(vector=(p1 - other[0]['N']._vector))
# rotate other so that c_n_ca angle is correct.
v1 = r1_c - other[0]['N']._vector
v2 = other[0]['CA']._vector - other[0]['N']._vector
measured_c_n_ca = angle_between_vectors(v1, v2)
axis = numpy.cross(v1, v2)
other.rotate(angle=(c_n_ca_angle - measured_c_n_ca),
axis=axis, point=other[0]['N']._vector)
# rotate other to obtain desired omega and phi values at the join
measured_omega = dihedral(
r1['CA'], r1['C'], other[0]['N'], other[0]['CA'])
other.rotate(angle=(omega - measured_omega),
axis=(other[0]['N'] - r1['C']), point=other[0]['N']._vector)
measured_phi = dihedral(
r1['C'], other[0]['N'], other[0]['CA'], other[0]['C'])
other.rotate(angle=(phi - measured_phi),
axis=(other[0]['CA'] - other[0]['N']), point=other[0]['CA']._vector)
self.extend(other)
if relabel:
self.relabel_all()
self.tags['assigned_ff'] = False
return
def n_join(self, other, psi=-40.76, omega=-178.25, phi=-65.07,
o_c_n_angle=None, c_n_ca_angle=None, c_n_length=None, relabel=True):
"""Joins other to self at the N-terminus via a peptide bond.
Notes
-----
This function directly modifies self. It does not return a new object.
Parameters
----------
other: Residue or Polypeptide
psi: float
Psi torsion angle (degrees) between final `Residue` of other
and first `Residue` of self.
omega: float
Omega torsion angle (degrees) between final `Residue` of
other and first `Residue` of self.
phi: float
Phi torsion angle (degrees) between final `Residue` of other
and first `Residue` of self.
o_c_n_angle: float or None
Desired angle between O, C (final `Residue` of other) and N
(first `Residue` of self) atoms. If `None`, default value is
taken from `ideal_backbone_bond_angles`.
c_n_ca_angle: float or None
Desired angle between C (final `Residue` of other) and N, CA
(first `Residue` of self) atoms. If `None`, default value is taken
from `ideal_backbone_bond_angles`.
c_n_length: float or None
Desired peptide bond length between final `Residue` of other
and first `Residue` of self. If None, default value is taken
from ideal_backbone_bond_lengths.
relabel: bool
If True, relabel_all is run on self before returning.
Raises
------
TypeError:
If other is not a `Residue` or a `Polypeptide`
"""
if isinstance(other, Residue):
other = Polypeptide([other])
if not isinstance(other, Polypeptide):
raise TypeError(
'Only Polypeptide or Residue objects can be joined to a Polypeptide')
if abs(omega) >= 90:
peptide_conformation = 'trans'
else:
peptide_conformation = 'cis'
if o_c_n_angle is None:
o_c_n_angle = ideal_backbone_bond_angles[peptide_conformation]['o_c_n']
if c_n_ca_angle is None:
c_n_ca_angle = ideal_backbone_bond_angles[peptide_conformation]['c_n_ca']
if c_n_length is None:
c_n_length = ideal_backbone_bond_lengths['c_n']
r1 = self[0]
r1_n = r1['N']._vector
r1_ca = r1['CA']._vector
r1_c = r1['C']._vector
# p1 is point that will be used to position the C atom of r2.
p1 = r1_ca[:]
# rotate p1 by c_n_ca_angle, about axis perpendicular to the
# r1_n, r1_ca, r1_c plane, passing through r1_ca.
axis = numpy.cross((r1_ca - r1_n), (r1_c - r1_n))
q = Quaternion.angle_and_axis(angle=c_n_ca_angle, axis=axis)
p1 = q.rotate_vector(v=p1, point=r1_n)
# Ensure p1 is separated from r1_n by the correct distance.
p1 = r1_n + (c_n_length * unit_vector(p1 - r1_n))
# translate other so that its final C atom is at p1
other.translate(vector=(p1 - other[-1]['C']._vector))
# Force CA-C=O-N to be in a plane, and fix O=C-N angle accordingly
measured_dihedral = dihedral(
other[-1]['CA'], other[-1]['C'], other[-1]['O'], r1['N'])
desired_dihedral = 180.0
axis = other[-1]['O'] - other[-1]['C']
other.rotate(angle=(measured_dihedral - desired_dihedral),
axis=axis, point=other[-1]['C']._vector)
axis = (numpy.cross(other[-1]['O'] - other[-1]
['C'], r1['N'] - other[-1]['C']))
measured_o_c_n = angle_between_vectors(
other[-1]['O'] - other[-1]['C'], r1['N'] - other[-1]['C'])
other.rotate(angle=(measured_o_c_n - o_c_n_angle),
axis=axis, point=other[-1]['C']._vector)
# rotate other to obtain desired phi, omega, psi values at the join.
measured_phi = dihedral(other[-1]['C'], r1['N'], r1['CA'], r1['C'])
other.rotate(angle=(phi - measured_phi),
axis=(r1_n - r1_ca), point=r1_ca)
measured_omega = dihedral(
other[-1]['CA'], other[-1]['C'], r1['N'], r1['CA'])
other.rotate(angle=(measured_omega - omega),
axis=(r1['N'] - other[-1]['C']), point=r1_n)
measured_psi = dihedral(
other[-1]['N'], other[-1]['CA'], other[-1]['C'], r1['N'])
other.rotate(angle=-(measured_psi - psi), axis=(other[-1]['CA'] - other[-1]['C']),
point=other[-1]['CA']._vector)
self._monomers = other._monomers + self._monomers
if relabel:
self.relabel_all()
self.tags['assigned_ff'] = False
return
def tag_secondary_structure(self, force=False):
"""Tags each `Residue` of the `Polypeptide` with secondary structure.
Notes
-----
DSSP must be available to call. Check by running
`isambard.external_programs.dssp.test_dssp`. If DSSP is not
available, please follow instruction here to add it:
https://github.com/woolfson-group/isambard#external-programs
For more information on DSSP see [1].
References
----------
.. [1] Kabsch W, Sander C (1983) "Dictionary of protein
secondary structure: pattern recognition of hydrogen-bonded
and geometrical features", Biopolymers, 22, 2577-637.
Parameters
----------
force : bool, optional
If `True` the tag will be run even if `Residues` are
already tagged.
"""
tagged = ['secondary_structure' in x.tags.keys()
for x in self._monomers]
if (not all(tagged)) or force:
dssp_out = run_dssp(self.pdb, path=False)
if dssp_out is None:
return
dssp_ss_list = extract_all_ss_dssp(dssp_out, path=False)
for monomer, dssp_ss in zip(self._monomers, dssp_ss_list):
monomer.tags['secondary_structure'] = dssp_ss[1]
return
def tag_residue_solvent_accessibility(self, tag_type=False, tag_total=False,
force=False, include_hetatms=False):
"""Tags `Residues` wirh relative residue solvent accessibility.
Notes
-----
THIS FUNCTIONALITY REQUIRES NACESS.
This function tags the Monomer with the *relative* RSA of
the *whole side chain*, i.e. column 2 of the .rsa file that
NACCESS writes.
References
----------
.. [1] Hubbard,S.J. & Thornton, J.M. (1993), 'NACCESS',
Computer Program, Department of Biochemistry and Molecular
Biology, University College London.
Parameters
----------
force : bool, optional
If `True`, the ta will be run even if `Residues` are
already tagged.
tag_type : str, optional
Specifies the name of the tag. Defaults to
'residue_solvent_accessibility'. Useful for specifying more
than one tag, e.g. if the Polymer is part of an Assembly.
tag_total : bool, optional
If True then the total rsa of the Polymer will be tagged
in the 'total accessibility' tag.
include_hetatms:bool, optional
If true then NACCESS will run with the -h flag and will
include heteroatom solvent accessibility where it can.
Helpful if your file has MSE residues that you don't
convert to MET, but best check if they are there
before using the flag.
"""
if tag_type:
tag_type = tag_type
else:
tag_type = 'residue_solvent_accessibility'
tagged = [tag_type in x.tags.keys() for x in self._monomers]
if (not all(tagged)) or force:
naccess_rsa_list, total = extract_residue_accessibility(run_naccess(
self.pdb, mode='rsa', path=False,
include_hetatms=include_hetatms), path=False, get_total=tag_total)
for monomer, naccess_rsa in zip(self._monomers, naccess_rsa_list):
monomer.tags[tag_type] = naccess_rsa
if tag_total:
self.tags['total_polymer_accessibility'] = total
return
def tag_dssp_solvent_accessibility(self, force=False):
"""Tags each `Residues` Polymer with its solvent accessibility.
Notes
-----
For more about DSSP's solvent accessibilty metric, see:
http://swift.cmbi.ru.nl/gv/dssp/HTML/descrip.html#ACC
References
----------
.. [1] Kabsch W, Sander C (1983) "Dictionary of protein
secondary structure: pattern recognition of hydrogen-bonded
and geometrical features", Biopolymers, 22, 2577-637.
Parameters
----------
force : bool, optional
If `True` the tag will be run even if `Residues` are
already tagged.
"""
tagged = ['dssp_acc' in x.tags.keys() for x in self._monomers]
if (not all(tagged)) or force:
dssp_out = run_dssp(self.pdb, path=False)
if dssp_out is None:
return
dssp_acc_list = extract_solvent_accessibility_dssp(
dssp_out, path=False)
for monomer, dssp_acc in zip(self._monomers, dssp_acc_list):
monomer.tags['dssp_acc'] = dssp_acc[-1]
return
def tag_sidechain_dihedrals(self, force=False):
"""Tags each monomer with side-chain dihedral angles
force: bool, optional
If `True` the tag will be run even if `Residues` are
already tagged.
"""
tagged = ['chi_angles' in x.tags.keys() for x in self._monomers]
if (not all(tagged)) or force:
for monomer in self._monomers:
chi_angles = measure_sidechain_torsion_angles(
monomer, verbose=False)
monomer.tags['chi_angles'] = chi_angles
return
def tag_torsion_angles(self, force=False):
"""Tags each Monomer of the Polymer with its omega, phi and psi torsion angle.
Parameters
----------
force : bool, optional
If `True` the tag will be run even if `Residues` are
already tagged.
"""
tagged = ['omega' in x.tags.keys() for x in self._monomers]
if (not all(tagged)) or force:
tas = measure_torsion_angles(self._monomers)
for monomer, (omega, phi, psi) in zip(self._monomers, tas):
monomer.tags['omega'] = omega
monomer.tags['phi'] = phi
monomer.tags['psi'] = psi
monomer.tags['tas'] = (omega, phi, psi)
return
def rise_per_residue(self):
"""List of rise per residue values along the `Polypeptide`.
Notes
-----
Calculated from `Polypeptide.primitive`."""
return self.primitive.rise_per_residue()
def radii_of_curvature(self):
""" List of radius of curvature values along the `Polypeptide`."""
return self.primitive.radii_of_curvature()
def tag_ca_geometry(self, force=False, reference_axis=None,
reference_axis_name='ref_axis'):
"""Tags each `Residue` with rise_per_residue, radius_of_curvature and residues_per_turn.
Parameters
----------
force : bool, optional
If `True` the tag will be run even if `Residues` are already
tagged.
reference_axis : list(numpy.array or tuple or list), optional
Coordinates to feed to geometry functions that depend on
having a reference axis.
reference_axis_name : str, optional
Used to name the keys in tags at `Polypeptide` and `Residue` level.
"""
tagged = ['rise_per_residue' in x.tags.keys() for x in self._monomers]
if (not all(tagged)) or force:
# Assign tags None if Polymer is too short to have a primitive.
if len(self) < 7:
rprs = [None] * len(self)
rocs = [None] * len(self)
rpts = [None] * len(self)
else:
rprs = self.rise_per_residue()
rocs = self.radii_of_curvature()
rpts = residues_per_turn(self)
for monomer, rpr, roc, rpt in zip(self._monomers, rprs, rocs, rpts):
monomer.tags['rise_per_residue'] = rpr
monomer.tags['radius_of_curvature'] = roc
monomer.tags['residues_per_turn'] = rpt
# Functions that require a reference_axis.
if (reference_axis is not None) and (len(reference_axis) == len(self)):
# Set up arguments to pass to functions.
ref_axis_args = dict(p=self,
reference_axis=reference_axis,
tag=True,
reference_axis_name=reference_axis_name)
# Run the functions.
polymer_to_reference_axis_distances(**ref_axis_args)
crick_angles(**ref_axis_args)
alpha_angles(**ref_axis_args)
return
def valid_backbone_bond_lengths(self, atol=0.1):
"""True if all backbone bonds are within atol Angstroms of the expected distance.
Notes
-----
Ideal bond lengths taken from [1].
References
----------
.. [1] Schulz, G. E, and R. Heiner Schirmer. Principles Of
Protein Structure. New York: Springer-Verlag, 1979.
Parameters
----------
atol : float, optional
Tolerance value in Angstoms for the absolute deviation
away from ideal backbone bond lengths.
"""
bond_lengths = self.backbone_bond_lengths
a1 = numpy.allclose(bond_lengths['n_ca'],
[ideal_backbone_bond_lengths['n_ca']] * len(self),
atol=atol)
a2 = numpy.allclose(bond_lengths['ca_c'],
[ideal_backbone_bond_lengths['ca_c']] * len(self),
atol=atol)
a3 = numpy.allclose(bond_lengths['c_o'],
[ideal_backbone_bond_lengths['c_o']] * len(self),
atol=atol)
a4 = numpy.allclose(bond_lengths['c_n'],
[ideal_backbone_bond_lengths['c_n']] *
(len(self) - 1),
atol=atol)
return all([a1, a2, a3, a4])
def valid_backbone_bond_angles(self, atol=20):
"""True if all backbone bond angles are within atol degrees of their expected values.
Notes
-----
Ideal bond angles taken from [1].
References
----------
.. [1] Schulz, G. E, and R. Heiner Schirmer. Principles Of
Protein Structure. New York: Springer-Verlag, 1979.
Parameters
----------
atol : float, optional
Tolerance value in degrees for the absolute deviation
away from ideal backbone bond angles.
"""
bond_angles = self.backbone_bond_angles
omegas = [x[0] for x in measure_torsion_angles(self)]
trans = ['trans' if (omega is None) or (
abs(omega) >= 90) else 'cis' for omega in omegas]
ideal_n_ca_c = [ideal_backbone_bond_angles[x]['n_ca_c'] for x in trans]
ideal_ca_c_o = [ideal_backbone_bond_angles[trans[i + 1]]
['ca_c_o'] for i in range(len(trans) - 1)]
ideal_ca_c_o.append(ideal_backbone_bond_angles['trans']['ca_c_o'])
ideal_ca_c_n = [ideal_backbone_bond_angles[x]['ca_c_n']
for x in trans[1:]]
ideal_c_n_ca = [ideal_backbone_bond_angles[x]['c_n_ca']
for x in trans[1:]]
a1 = numpy.allclose(bond_angles['n_ca_c'], [ideal_n_ca_c], atol=atol)
a2 = numpy.allclose(bond_angles['ca_c_o'], [ideal_ca_c_o], atol=atol)
a3 = numpy.allclose(bond_angles['ca_c_n'], [ideal_ca_c_n], atol=atol)
a4 = numpy.allclose(bond_angles['c_n_ca'], [ideal_c_n_ca], atol=atol)
return all([a1, a2, a3, a4])
def c_cap(self, cap='acid', cap_dihedral=False):
"""Caps C-terminus of polypeptide chain.
Notes
-----
Default behaviour is to add an oxygen atom to create a
carboxylate function at the C-terminus without changing the
psi angle of the C-terminal residue. Alternative psi angles
can be accessed through the cap_dihedral parameter. Will not
remove an existing cap if one is present, though altering a
cap of the same type will overwrite the original one.
Parameters
----------
cap : str, optional
Type of cap to be added. Options: 'acid', 'amide'
cap_dihedral : bool
Alternate psi angle to be used when added cap.
"""
if cap == 'acid':
acetate = Ligand(atoms=None, mol_code='UNK',
is_hetero=True, ampal_parent=Polypeptide)
atoms = OrderedDict()
atoms['CA'] = Atom([-1.4210, 0.4120, 0.0000], 'C',
res_label='CA', ampal_parent=Ligand)
atoms['C'] = Atom([0.0120, -0.0560, 0.0020], 'C',
res_label='C', ampal_parent=Ligand)
atoms['O'] = Atom([0.2610, -1.2380, 0.0000], 'O',
res_label='O', ampal_parent=Ligand)
atoms['OXT'] = Atom([1.0110, 0.8400, 0.0000],
'O', res_label='OXT', ampal_parent=Ligand)
acetate.atoms = atoms
s1, e1, s2, e2 = [
x._vector for x in [acetate['CA'],
acetate['C'],
self._monomers[-1]['CA'],
self._monomers[-1]['C']]]
translation, angle, axis, point = find_transformations(
s1, e1, s2, e2, radians=False)
acetate.rotate(angle=angle, axis=axis, point=point, radians=False)
acetate.translate(vector=translation)
start_angle = dihedral(
self._monomers[-1]['N'], self._monomers[-1]['CA'],
self._monomers[-1]['C'], acetate['O'])
ref_angle = dihedral(
self._monomers[-1]['N'], self._monomers[-1]['CA'],
self._monomers[-1]['C'], self._monomers[-1]['O'])
if cap_dihedral is not False:
acetate.rotate(
ref_angle - start_angle + cap_dihedral,
axis=acetate['C']._vector - acetate['CA']._vector,
point=acetate['C']._vector)
else:
acetate.rotate(
ref_angle - start_angle,
axis=acetate['C']._vector - acetate['CA']._vector,
point=acetate['C']._vector)
acetate['OXT'].ampal_parent = self._monomers[-1]
self._monomers[-1].atoms['OXT'] = acetate['OXT']
diff = acetate['O']._vector - self._monomers[-1]['O']._vector
self._monomers[-1]['O']._vector += diff
elif cap == 'amide':
acetamide = Ligand(atoms=None, mol_code='UNK', is_hetero=True)
atoms = OrderedDict()
atoms['CA'] = Atom([-0.4040, 0.0000, 1.4030], 'C', res_label='CA')
atoms['C'] = Atom([0.0580, 0.0000, -0.0300], 'C', res_label='C')
atoms['O'] = Atom([1.2440, 0.0000, -0.2840], 'O', res_label='O')
atoms['NH2'] = Atom([-0.8450, 0.0000, -1.0300],
'N', res_label='NH2')
acetamide.atoms = atoms
s1, e1, s2, e2 = [
x._vector for x in [acetamide['CA'],
acetamide['C'],
self._monomers[-1]['CA'],
self._monomers[-1]['C']]]
translation, angle, axis, point = find_transformations(
s1, e1, s2, e2, radians=False)
acetamide.rotate(angle=angle, axis=axis,
point=point, radians=False)
acetamide.translate(vector=translation)
start_angle = dihedral(
self._monomers[-1]['N'], self._monomers[-1]['CA'],
self._monomers[-1]['C'], acetamide['O'])
ref_angle = dihedral(
self._monomers[-1]['N'], self._monomers[-1]['CA'],
self._monomers[-1]['C'], self._monomers[-1]['O'])
if cap_dihedral is not False:
acetamide.rotate(
ref_angle - start_angle + cap_dihedral,
axis=acetamide['C']._vector - acetamide['CA']._vector,
point=acetamide['C']._vector)
else:
acetamide.rotate(
ref_angle - start_angle,
axis=acetamide['C']._vector - acetamide['CA']._vector,
point=acetamide['C']._vector)
if self.ligands is None:
self.ligands = LigandGroup(ampal_parent=self)
amide = Ligand(mol_code='NH2', ampal_parent=self.ligands)
amide_atoms = OrderedDict([('NH2', acetamide['NH2'])])
amide_atoms['NH2'].ampal_parent = amide
amide.atoms = amide_atoms
self.ligands.append(amide)
else:
pass
self.tags['assigned_ff'] = False
return
def n_cap(self, n_cap='acetyl', cap_dihedral=None):
"""Adds an N-terminal acetamide cap.
Notes
-----
Default behaviour is to duplicate the dihedral angle of the
succeeding residues such that the orientation of the carbonyl
of the acetyl will resemble that of the first residue. This
can be adjusted by supplying a cap_dihedral value. Currently
only acetyl cap is supported, but this structure should work
for other caps.
Parameters
----------
cap : str, optional
Type of cap to be added. Options: 'acetyl'
cap_dihedral : bool
Alternate psi angle to be used when added cap.
"""
if n_cap == 'acetyl':
methylacetamide = Ligand(
atoms=None, mol_code='UNK', is_hetero=True)
atoms = OrderedDict()
atoms['C'] = Atom([0.9500, -0.2290, 0.5090], 'C', res_label='C')
atoms['CA'] = Atom([0.7450, -0.9430, 1.8040], 'C', res_label='CA')
atoms['O'] = Atom([0.1660, -2.0230, 1.8130], 'O', res_label='O')
atoms['N'] = Atom([1.2540, -0.2750, 2.9010], 'N', res_label='N')
atoms['CME'] = Atom([1.1630, -0.7870, 4.2500],
'C', res_label='CME')
# these coordinates seem ok, but could review
# and use a different fragment if necessary
methylacetamide.atoms = atoms
s1, e1, s2, e2 = [
x._vector for x in [methylacetamide['N'],
methylacetamide['CME'],
self._monomers[0]['N'],
self._monomers[0]['CA']]]
translation, angle, axis, point = find_transformations(
s1, e1, s2, e2, radians=False)
methylacetamide.rotate(
angle=angle, axis=axis, point=point, radians=False)
methylacetamide.translate(vector=translation)
start_angle = dihedral(
methylacetamide['C'], self._monomers[0]['N'],
self._monomers[0]['CA'], self._monomers[0]['C'])
ref_angle = dihedral(
self._monomers[0]['C'], self._monomers[1]['N'],
self._monomers[1]['CA'], self._monomers[1]['C'])
if cap_dihedral is not None:
methylacetamide.rotate(ref_angle - start_angle + cap_dihedral,
axis=methylacetamide['N']._vector -
self._monomers[0]['CA']._vector,
point=methylacetamide['N']._vector)
else:
methylacetamide.rotate(ref_angle - start_angle,
axis=methylacetamide['N']._vector -
self._monomers[0]['CA']._vector,
point=methylacetamide['N']._vector)
if self.ligands is None:
self.ligands = LigandGroup(ampal_parent=self)
acetamide = Ligand(mol_code='ACM', ampal_parent=self.ligands)
acetamide_atoms = OrderedDict()
acetamide_atoms['C'] = atoms['C']
acetamide_atoms['CA'] = atoms['CA']
acetamide_atoms['O'] = atoms['O']
for atom in acetamide_atoms.values():
atom.ampal_parent = acetamide
acetamide.atoms = acetamide_atoms
self.ligands.append(acetamide)
else:
pass # just in case we want to build different caps in later
self.tags['assigned_ff'] = False
return
class Residue(Monomer):
"""Represents a amino acid `Residue`.
Parameters
----------
atoms : OrderedDict, optional
OrderedDict containing Atoms for the Monomer. OrderedDict
is used to maintain the order items were added to the
dictionary.
mol_code : str, optional
One or three letter code that represents the monomer.
monomer_id : str, optional
String used to identify the residue.
insertion_code : str, optional
Insertion code of monomer, used if reading from pdb.
is_hetero : bool, optional
True if is a hetero atom in pdb. Helps with PDB formatting.
ampal_parent : ampal.Polypeptide, optional
Reference to `Polypeptide` containing the `Residue`.
Attributes
----------
mol_code : str
PDB molecule code that represents the `Residue`.
insertion_code : str
Insertion code of `Residue`, used if reading from pdb.
is_hetero : bool
True if is a hetero atom in pdb. Helps with PDB formatting.
states : dict
Contains an `OrderedDicts` containing atom information for each
state available for the `Residue`.
id : str
String used to identify the residue.
reference_atom : str
The key that corresponds to the reference atom. This is used
by various functions, for example backbone primitives are
calculated using the atom defined using this key.
ampal_parent : Polypeptide or None
A reference to the `Polypeptide` containing this `Residue`.
tags : dict
A dictionary containing information about this AMPAL object.
The tags dictionary is used by AMPAL to cache information
about this object, but is also intended to be used by users
to store any relevant information they have.
Raises
------
ValueError
Raised if `mol_code` is not length 1 or 3.
"""
def __init__(self, atoms=None, mol_code='UNK', monomer_id=' ',
insertion_code=' ', is_hetero=False, ampal_parent=None):
super(Residue, self).__init__(
atoms, monomer_id, ampal_parent=ampal_parent)
if len(mol_code) == 3:
self.mol_code = mol_code
self.mol_letter = get_aa_letter(mol_code)
elif len(mol_code) == 1:
self.mol_code = get_aa_code(mol_code)
self.mol_letter = mol_code
else:
raise ValueError(
'Monomer requires either a 1-letter or a 3-letter '
'amino acid code ({})'.format(mol_code))
self.insertion_code = insertion_code
self.is_hetero = is_hetero
self.reference_atom = 'CA'
def __repr__(self):
return '<Residue containing {} {}. Residue code: {}>'.format(
len(self.atoms), 'Atom' if len(self.atoms) == 1 else 'Atoms', self.mol_code)
@property
def backbone(self):
"""Returns a new `Residue` containing only the backbone atoms.
Returns
-------
bb_monomer : Residue
`Residue` containing only the backbone atoms of the original
`Monomer`.
Raises
------
IndexError
Raise if the `atoms` dict does not contain the backbone
atoms (N, CA, C, O).
"""
try:
backbone = OrderedDict([('N', self.atoms['N']),
('CA', self.atoms['CA']),
('C', self.atoms['C']),
('O', self.atoms['O'])])
except KeyError:
missing_atoms = filter(lambda x: x not in self.atoms.keys(),
('N', 'CA', 'C', 'O')
)
raise KeyError('Error in residue {} {} {}, missing ({}) atoms. '
'`atoms` must be an `OrderedDict` with coordinates '
'defined for the backbone (N, CA, C, O) atoms.'
.format(self.ampal_parent.id, self.mol_code,
self.id, ', '.join(missing_atoms)))
bb_monomer = Residue(backbone, self.mol_code, monomer_id=self.id,
insertion_code=self.insertion_code,
is_hetero=self.is_hetero)
return bb_monomer
@property
def unique_id(self):
"""Generates a tuple that uniquely identifies a `Monomer` in an `Assembly`.
Notes
-----
The unique_id will uniquely identify each monomer within a polymer.
If each polymer in an assembly has a distinct id, it will uniquely
identify each monomer within the assembly.
The hetero-flag is defined as in Biopython as a string that is
either a single whitespace in the case of a non-hetero atom,
or 'H_' plus the name of the hetero-residue (e.g. 'H_GLC' in
the case of a glucose molecule), or 'W' in the case of a water
molecule.
For more information, see the Biopython documentation or this
Biopython wiki page:
http://biopython.org/wiki/The_Biopython_Structural_Bioinformatics_FAQ
Returns
-------
unique_id : tuple
unique_id[0] is the polymer_id unique_id[1] is a triple
of the hetero-flag, the monomer id (residue number) and the
insertion code.
"""
if self.is_hetero:
if self.mol_code == 'HOH':
hetero_flag = 'W'
else:
hetero_flag = 'H_{0}'.format(self.mol_code)
else:
hetero_flag = ' '
return self.ampal_parent.id, (hetero_flag, self.id, self.insertion_code)
@property
def side_chain(self):
"""List of the side-chain atoms (R-group).
Notes
-----
Returns empty list for glycine.
Returns
-------
side_chain_atoms: list(`Atoms`)
"""
side_chain_atoms = []
if self.mol_code != 'GLY':
covalent_bond_graph = generate_covalent_bond_graph(
find_covalent_bonds(self))
try:
subgraphs = generate_bond_subgraphs_from_break(
covalent_bond_graph, self['CA'], self['CB'])
if len(subgraphs) == 1:
subgraphs = generate_bond_subgraphs_from_break(
subgraphs[0], self['CD'], self['N'])
if len(subgraphs) == 2:
for g in subgraphs:
if self['CB'] in g:
side_chain_atoms = g.nodes()
break
except:
warning_message = "Malformed PDB for Residue {0}: {1}.".format(
self.id, self)
if 'CB' in self.atoms.keys():
side_chain_atoms.append(self['CB'])
warning_message += " Side-chain is just the CB atom."
else:
warning_message += " Empty side-chain."
warnings.warn(warning_message, MalformedPDBWarning)
return side_chain_atoms
# TODO fix behaviour to allow option not to include residue itself
def side_chain_environment(self, cutoff=4, include_neighbours=True,
inter_chain=True, include_ligands=False, include_solvent=False):
"""Finds `Residues` with any atom within the cutoff distance of side-chain.
Notes
-----
Includes the parent residue in the list.
Parameters
----------
cutoff : float, optional
Maximum inter-atom distance for residue to be included.
Defaults to 4.
include_neighbours : bool, optional
If `false`, does not return `Residue` at i-1, i+1 positions
in same chain as `Residue`.
inter_chain : bool, optional
If `false`, only includes nearby `Residue` in the same chain
as the `Residue`.
include_ligands : bool, optional
If `true`, `Residue` classed as ligands but not identified as
solvent will be included in the environment.
include_solvent : bool, optional
If `true`, Monomers classed as categorised as solvent
will be included in the environment.
Returns
-------
sc_environment : list
List of monomers within cutoff distance of side-chain.
"""
if self.mol_code == 'GLY':
return [self]
side_chain_dict = {x: {y: self.states[x][y]
for y in self.states[x] if self.states[x][y] in
self.side_chain} for x in self.states}
side_chain_monomer = Monomer(
atoms=side_chain_dict, monomer_id=self.id,
ampal_parent=self.ampal_parent)
sc_environment = side_chain_monomer.environment(
cutoff=cutoff, include_ligands=include_ligands,
include_neighbours=include_neighbours,
include_solvent=include_solvent, inter_chain=inter_chain)
return sc_environment
__author__ = ('Jack W. Heal, Christopher W. Wood, Gail J. Bartlett, '
'Andrew R. Thomson, Kieran L. Hudson')
| {
"repo_name": "woolfson-group/isambard",
"path": "isambard/ampal/protein.py",
"copies": "1",
"size": "56780",
"license": "mit",
"hash": 7422373198088210000,
"line_mean": 40.3245997089,
"line_max": 96,
"alpha_frac": 0.5544029588,
"autogenerated": false,
"ratio": 3.697095976038547,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9749957797919779,
"avg_score": 0.0003082273837536026,
"num_lines": 1374
} |
# amplicon.py - classes for fitting amplicon distributions from PSDs
#
# v 1.1.1
# rev 2017-11-29 (MS: minor bug fix)
# Notes:
import scipy.optimize
import scipy.special
import scipy.stats
import numpy as np
import pandas as pd
from . import extra_tools
def func_logis(x, inter, asym, xmid, scal):
""" Return logistic curve values for a given set of point
"""
return inter + asym / (1 + np.exp(-(x - xmid) / scal))
class PSDLogis(object):
# psd = {}
# freq = {}
# inter = 0
# asym = 0
# xmid = 0
# scal = 0
# var = 0
def __init__(self, freq, psd):
self.freq = {}
self.psd = {}
self.freq['raw'] = freq
self.psd['raw'] = psd
# self.freq['scale'] = 10 * np.log10(freq)
# self.freq['cut'] = self.freq['scale'][freq < 1e-3]
def fit_logistic(self, bulk="db/bulk_1x.spec"):
""" Fit a logistic curve with intercept to power spectral density
The logistic is fit to the "flipped" psd where the x-axis is in BP not 1/BP
"""
psd = self.psd['raw']
freq = self.freq['raw']
# Load bulk spectrum
psd_bulk = pd.Series.from_csv(bulk, index_col=0, header=None, sep="\t").as_matrix()
# psd_bulk = np.loadtxt(bulk)
# Normalize psd by bulk spectrum
# psd_norm = 10 * np.log10(psd / psd_bulk)
psd_norm = 10 * np.log10(psd / psd_bulk)
psd_cut = psd_norm[freq < 1e-3]
freq_cut = freq[freq < 1e-3]
freq_scale = -np.log10(freq_cut)
# Fit the curve
try:
popt, pcov = scipy.optimize.curve_fit(func_logis, freq_scale, psd_cut)
# Make sure the scale parameter is positive
# May be negative for bulk samples
if popt[-1] > 0:
self.success=True
else:
self.success=False
# Sometimes curve fitting fails for bulk samples
except RuntimeError:
popt = [np.nan, np.nan, np.nan, np.nan]
self.success=False
self.psd['logis'] = psd_cut
self.freq['logis'] = freq_scale
self.popt = popt
self.inter, self.asym, self.xmid, self.scal = popt
def predict_vals(self, point=None):
""" Return predictions for values of the logistic curve
"""
if point:
if isinstance(point, str):
vals = getattr(self, point)
else:
vals = point
else:
vals = self.freq['logis']
return func_logis(vals, *self.popt)
def amplicon_range(self):
""" Calculate mean and 95% upper and lower bounds on amplicon sizes using logistic fit
"""
# var = self.scal**2 * np.pi**2 / 3
if self.success:
print(self.xmid, self.scal)
x = scipy.stats.logistic.rvs(loc=self.xmid, scale=self.scal, size=100000)
x_scale = 10**x
self.mean = np.mean(x_scale)
self.median = np.median(x_scale)
self.lower_95 = np.percentile(x_scale, 5)
self.upper_95 = np.percentile(x_scale, 95)
else:
print('pass')
self.mean = 0
self.median = 0
self.lower_95 = 0
self.upper_95 = 0
return [self.median, self.mean, self.lower_95, self.upper_95]
# self.upper_q = self.xmid + self.scal * np.log(0.95 / 0.05)
# self.lower_q = self.xmid + self.scal * np.log(0.05 / 0.95)
# self.mean = 10**(self.xmid)
# self.upper_95 = 10**(self.upper_q)
# self.lower_95 = 10**(self.lower_q)
# return [self.mean, self.lower_95, self.upper_95]
def logistic_dist(self, point=None):
""" Return the logistic distribution of the amplicon sizes
(in log10 coordinates)
"""
if point:
freq = point
else:
freq = -np.log10(self.freq['raw'][self.freq['raw'] < 3e-3])
if self.success:
pdf = scipy.stats.logistic.pdf(freq, loc=self.xmid, scale=self.scal)
else:
pdf = np.zeros(len(freq))
self.freq['dist'] = freq
return pdf
class AmplDist(object):
def __init__(self, freq, psd):
self.freq = {}
self.psd = {}
self.popt = {}
self.freq['raw'] = freq
self.psd['raw'] = psd
@staticmethod
def func_logis(x, inter, asym, xmid, scal):
""" Return logistic curve values for a given set of point
"""
return inter + asym / (1 + np.exp(-(x - xmid) / scal))
@staticmethod
def func_erf(x, inter, asym, mu, sigma):
""" Return erf curve values for a given set of point
The erf function is fit s.t. mu and sigma have a Gaussian interpretation
"""
return inter + asym * scipy.special.erf((x-mu) / (np.sqrt(2) * sigma))
@staticmethod
def func_gamma(x, inter, asym, alpha, beta):
""" Return logistic curve values for a given set of point
"""
return inter + asym * scipy.special.gammainc(alpha, beta*x)
def fit_curve(self, method='erf', bulk="bulk_1x.smooth3.spec", shift=0):
""" Fit a curve of specified type to power spectral density
The logistic is fit to the "flipped" psd where the x-axis is in BP not 1/BP
"""
psd = self.psd['raw']
freq = self.freq['raw']
f_fit = getattr(self, "func_{}".format(method))
# Load bulk spectrum
# f_bulk = extra_tools.get_data_file(bulk)
# psd_bulk = pd.Series.from_csv(f_bulk, index_col=0, header=None, sep="\t").as_matrix()
psd_bulk = extra_tools.load_bulk_psd(bulk)
# psd_bulk = np.loadtxt(bulk)
# Normalize psd by bulk spectrum and shift
psd_norm = 10 * np.log10(psd / psd_bulk)
psd_cut = psd_norm[freq < 1e-3]
freq_cut = freq[freq < 1e-3]
period = -np.log10(freq_cut) - shift
# Fit the curve
try:
popt, pcov = scipy.optimize.curve_fit(f_fit, period, psd_cut, method='trf')
# Make sure the scale parameter is positive
if popt[-1] < 0:
popt[-1] = np.abs(popt[-1])
self.success=True
# Sometimes curve fitting fails for bulk samples
except RuntimeError:
popt = [np.nan, np.nan, np.nan, np.nan]
self.success=False
self.psd[method] = psd_cut
self.freq[method] = period
popt2 = np.append(popt, shift)
self.popt[method] = popt2
# self.inter, self.asym, self.xmid, self.scal = popt
def predict_vals(self, method='erf', point=None):
""" Return predictions for values of the logistic curve
"""
fit_fnc = getattr(self, "func_{}".format(method))
if point:
if isinstance(point, str):
vals = getattr(self, point)
else:
vals = point
else:
vals = self.freq[method]
return fit_fnc(vals, *self.popt[method][:-1])
# def amplicon_range(self, method='erf'):
def amplicon_range(self, popt=None, method='erf'):
""" Calculate mean and 95% upper and lower bounds on amplicon sizes
Input:
popt: params of fitted curve (optional. If not supplied, uses class popt)
method: curve-fitting method
"""
if popt is None or np.isnan(popt[0]):
if self.success:
popt = self.popt[method]
# if self.success:
if popt is not None:
# popt = self.popt[method]
shift = popt[-1]
if method == 'erf':
dist = scipy.stats.norm(loc=popt[2], scale=popt[3])
x = dist.rvs(size=100000)
x_scale = 10**(x + shift)
mean = np.exp(np.log(10)*popt[2] + 0.5 * np.log(10)**2 * popt[3]**2)
median = np.exp(np.log(10)*popt[2])
# self.mean = np.exp(np.log(10)*popt[2] + 0.5 * np.log(10)**2 * popt[3]**2)
# self.median = np.exp(np.log(10)*popt[2])
elif method == 'logis':
dist = scipy.stats.logistic(loc=popt[2], scale=popt[3])
x = dist.rvs(size=100000)
x_scale = 10**(x + shift)
mean = np.mean(x_scale)
median = np.median(x_scale)
# self.mean = np.mean(x_scale)
# self.median = np.median(x_scale)
elif method == 'gamma':
dist = scipy.stats.gamma(a=popt[2], scale=1/popt[3])
x = dist.rvs(size=100000)
x_scale = 10**(x + shift)
mean = np.mean(x_scale)
median = np.median(x_scale)
# self.mean = np.mean(x_scale)
# self.median = np.median(x_scale)
# x = dist.rvs(size=100000)
# x_scale = 10**(x + shift)
# self.mean = np.mean(x_scale)
# self.median = np.median(x_scale)
lower_95 = np.percentile(x_scale, 5)
upper_95 = np.percentile(x_scale, 95)
# self.lower_95 = np.percentile(x_scale, 5)
# self.upper_95 = np.percentile(x_scale, 95)
else:
print('pass')
mean = 0
median = 0
lower_95 = 0
upper_95 = 0
# self.mean = 0
# self.median = 0
# self.lower_95 = 0
# self.upper_95 = 0
return [median, mean, lower_95, upper_95]
# return [self.median, self.mean, self.lower_95, self.upper_95]
# def amplicon_dist(self, method='erf'):
def amplicon_dist(self, popt=None, method='erf'):
""" Calculate the distribution of amplicon sizes based on a curve fit
Input:
popt: params of fitted curve (optional. If not supplied, uses class popt)
method: curve-fitting method
"""
store = False
if popt is None:
store = True
if self.success:
popt = self.popt[method]
if popt is not None:
# popt = self.popt[method]
if method == 'erf':
dist = scipy.stats.norm(loc=popt[2], scale=popt[3])
elif method == 'logis':
dist = scipy.stats.logistic(loc=popt[2], scale=popt[3])
elif method == 'gamma':
dist = scipy.stats.gamma(a=popt[2], scale=1/popt[3])
lower = dist.ppf(0.001)
upper = dist.ppf(0.999)
vals = np.linspace(lower, upper, 100)
pdf = dist.pdf(vals)
else:
pdf = np.zeros(100)
vals = np.linspace(2, 6, 100)
if store:
self.freq['dist'] = 10**vals
return pdf
else:
return pdf, 10**vals
@staticmethod
def param_names(method):
""" Return parameter names for a curve fit method
"""
d = {'erf': ['intercept', 'scale', 'mu', 'sigma', 'shift'],
'logis': ['intercept', 'asymptote', 'xmid', 'scale', 'shift'],
'gamma': ['intercept', 'scale', 'alpha', 'beta', 'shift'],
}
return d[method]
| {
"repo_name": "parklab/PaSDqc",
"path": "PaSDqc/amplicon.py",
"copies": "1",
"size": "11243",
"license": "mit",
"hash": -1579974794750582000,
"line_mean": 31.1228571429,
"line_max": 95,
"alpha_frac": 0.5198790358,
"autogenerated": false,
"ratio": 3.503583670925522,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9500245540994174,
"avg_score": 0.004643433146269565,
"num_lines": 350
} |
# amplitude[i] variables take the amplitude of the pressure wave or the amplitude of the stress ratio.
# period[i] variables take the time-period of the wave
# cr = color of the line
# pressure[i] = mean pressure
# deformation[i] = polar deformation
# trailno = the trail values
# t = thickness
# cnc = concentration
# start = starting time of the second sinewave
## Importing Modules
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
def stressratio(amplitude1, amplitude2, amplitude3, period1, period2, period3, cnc):
## Cycle duration
dur = 0.5
## For Curve Smoothing
timeranage1 = period1/10000
timeranage2 = period2/10000
timeranage3 = period3/10000
## Sine Curve
x1 = np.arange(0, period1, timeranage1)
y1 = np.sin(2 * np.pi * dur * x1 / period1)
y1 = amplitude1 * y1
x2 = np.arange(0, period2, timeranage2)
y2 = np.sin(2 * np.pi * dur * x2 / period2)
y2 = amplitude2 * y2
x3 = np.arange(0, period3, timeranage3)
y3 = np.sin(2 * np.pi * dur * x3 / period3)
y3 = amplitude3 * y3
## Plotting
plt.ylim(0.0, amplitude3 + 1)
plt.plot(x1, y1, color='blue', label='5kg mass')
plt.plot(x2, y2, color='red', label = '6kg mass')
plt.plot(x3,y3, color='green', label = '7kg mass')
plt.xlabel('Time (ms)')
plt.ylabel('Stress Ratio')
plt.title('Stress Ratio vs Time (' + str(cnc) +'% Concentration)')
plt.legend(loc="best")
plt.show()
return 0
def pressurerigid(amplitude, period, trailno, cr):
## Cycle duration
dur = 0.5
## For Curve Smoothing
timerange = period/10000
## Sine Curve
x = np.arange(0, period, timerange)
y = np.sin(2 * np.pi * dur * x / period)
y = amplitude * y
## Print mean pressure
z = np.mean(y)
print z
##Plotting
plt.ylim(0.0, amplitude + 10)
plt.plot(x, y, color=cr, label = 'Trial' + str(trailno))
plt.xlabel('Time (ms)')
plt.ylabel('Pressure (Mpa)')
plt.title('Pressure vs Time (Rigid End)')
plt.legend(loc="best")
return 0
def pressureflex(amplitude, period1, period2, start, trailno, cr):
## Cycle duration
dur1 = 0.5
## For Curve Smoothing
timerange1 = period1/1000
timerange2 = period2/1000
## Sine Curve
x1 = np.arange(0, period1, timerange1)
y1 = np.sin(2 * np.pi * dur1 * x1 / period1)
k,l = np.split(y1,2)
y1 = k
dur2 = 0.5
x2 = np.arange(0, period2, timerange2)
y2 = np.sin(2 * np.pi * dur2 * x2 / period2)
a,b = np.split(y2,2)
y2 = b
y = np.concatenate((y1, y2), axis=0)
y = amplitude * y
g,h = np.split(x1,2)
x1 = g
x2 = start + x2
c,d = np.split(x2,2)
x2 = d
x = np.concatenate((x1, x2), axis=0)
## Print mean pressure
z = np.mean(y)
print z
##Plotting
plt.plot(x, y, color=cr, label= 'Trial'+str(trailno))
plt.xlabel('Time (ms)')
plt.ylabel('Pressure (MPa)')
plt.title('Pressure vs Time (Flexible End)')
plt.legend(loc="best")
return 0
def pressuredeform1(pressure1, pressure2, deformation1, deformation2, t, cr):
## Create 1-D arrays for pressure and deformation
pressure = np.array([pressure1, pressure2])
deformation = np.array([deformation1, deformation2])
## Plotting
plt.plot(pressure, deformation, marker = 'o', color=cr, label= str(t) + 'mm')
plt.xlabel('Mean Pressure (MPa)')
plt.ylabel('Deformation (mm)')
plt.title('Deformation vs Pressure')
plt.legend(loc="best")
def pressuredeform(pressure1, pressure2, pressure3, deformation1, deformation2, deformation3, t, cr):
## Create 1-D arrays for pressure and deformation
pressure = np.array([pressure1, pressure2, pressure3])
deformation = np.array([deformation1, deformation2, deformation3])
## Plotting
plt.plot(pressure, deformation, marker = 'o', color=cr, label= str(t) + 'mm')
plt.xlabel('Mean Pressure (MPa)')
plt.ylabel('Deformation (mm)')
plt.title('Deformation vs Pressure')
plt.legend(loc="best")
| {
"repo_name": "kredy/Liquid-Impact-Forming",
"path": "graph.py",
"copies": "1",
"size": "3768",
"license": "bsd-3-clause",
"hash": -2316760653832739000,
"line_mean": 25.9142857143,
"line_max": 102,
"alpha_frac": 0.6764861996,
"autogenerated": false,
"ratio": 2.5685071574642127,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37449933570642124,
"avg_score": null,
"num_lines": null
} |
"""Amplitude Spetrum produced in the paper.
DVS Benchmark Datasets for Object Tracking, Action Recognition and Object
Recognition
Please set your environment variable SPIKEFUEL_DATA:
export SPIKEFUEL_DATA=/path/to/data
and then place all HDF5 format data in this folder, then
create a folder `sf_data` in `/path/to/data`, and place stats file in there.
stats file can be found at:
https://github.com/duguyue100/spikefuel/tree/master/data
Author: Yuhuang Hu
Email : duguyue100@gmail.com
"""
from __future__ import print_function
import os
import h5py
import cPickle as pickle
import numpy as np
import matplotlib
import matplotlib.pylab as plt
matplotlib.rcParams.update({'font.size': 100})
# options
# VOT dataset amplitude spectrum : vot-as
# TrackingDataset amplitude spectrum: traking-as
# UCF-50 amplitude spectrum : ucf50-as
# Caltech-256 amplitude spectrum : caltech256-as
option = "caltech256-as"
data_path = os.environ["SPIKEFUEL_DATA"]
stats_path = os.path.join(data_path, "sf_data")
if option == "vot-as":
vot_fn = "INI_VOT_30fps_20160424.hdf5"
vot_path = os.path.join(data_path, vot_fn)
vot_db = h5py.File(vot_path, mode="r")
vot_stats_path = os.path.join(stats_path, "vot_stats.pkl")
vot_save_path = os.path.join(data_path, "vot_as.eps")
# load vot stats
f = file(vot_stats_path, mode="r")
vot_stats = pickle.load(f)
f.close()
vot_list = vot_stats['vot_list']
num_frames = vot_stats['num_frames']
vidseq = vot_list[9]
timestamps = vot_db[vidseq]["timestamps"][()]
print("[MESSAGE] DATA IS LOADED.")
tend = timestamps[-1]
vv = np.zeros(tend+1)
for i in xrange(timestamps.shape[0]):
vv[timestamps[i]] += 1
fs = 1e6
L = vv.shape[0]
vv = vv - np.sum(vv)/L
NFFT = int(2**np.ceil(np.log2(L)))
ff = np.fft.fft(vv, NFFT)/L
f = fs/2*(np.arange(NFFT/2)/float(NFFT/2))
f_draw = f
ff_draw = 2*np.abs(ff[:NFFT/2])
plt.figure(figsize=(50, 45))
plt.xlim([0, 100])
plt.plot(f_draw, ff_draw, 'b', linewidth=10)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.xticks(np.arange(0, 100+1, 20))
plt.yticks(np.arange(0, 1.8e-1+0.3e-1, 0.3e-1))
plt.xlabel("Frequency [Hz]", fontsize=150)
plt.savefig(vot_save_path, format="eps", dpi=1200,
bbox_inches='tight', pad_inches=0.5)
print("[MESSAGE] Amplitude Spectrum is saved at %s" % (vot_save_path))
elif option == "tracking-as":
tracking_fn = "INI_TrackingDataset_30fps_20160424.hdf5"
tracking_path = os.path.join(data_path, tracking_fn)
tracking_db = h5py.File(tracking_path, mode="r")
tracking_stats_path = os.path.join(stats_path, "tracking_stats.pkl")
tracking_save_path = os.path.join(data_path, "tracking_as.eps")
f = file(tracking_stats_path, mode="r")
tracking_stats = pickle.load(f)
f.close()
pl = tracking_stats["primary_list"]
sl = tracking_stats["secondary_list"]
pc = pl[0]
sc = sl[pc][1]
timestamps = tracking_db[pc][sc]["timestamps"][()]
print("[MESSAGE] DATA IS LOADED.")
tend = timestamps[-1]
vv = np.zeros(tend+1)
for i in xrange(timestamps.shape[0]):
vv[timestamps[i]] += 1
fs = 1e6
L = vv.shape[0]
vv = vv - np.sum(vv)/L
NFFT = int(2**np.ceil(np.log2(L)))
ff = np.fft.fft(vv, NFFT)/L
f = fs/2*(np.arange(NFFT/2)/float(NFFT/2))
f_draw = f
ff_draw = 2*np.abs(ff[:NFFT/2])
plt.figure(figsize=(50, 45))
plt.xlim([0, 100])
plt.plot(f_draw, ff_draw, 'b', linewidth=10)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.xticks(np.arange(0, 100+1, 20))
plt.yticks(np.arange(0, 7e-2+2e-2, 1.5e-2))
plt.xlabel("Frequency [Hz]", fontsize=150)
plt.savefig(tracking_save_path, format="eps", dpi=1200,
bbox_inches='tight', pad_inches=0.5)
print("[MESSAGE] Amplitude Spectrum is saved at %s" % (tracking_save_path))
elif option == "ucf50-as":
ucf50_fn = "INI_UCF50_30fps_20160424.hdf5"
ucf50_path = os.path.join(data_path, ucf50_fn)
ucf50_db = h5py.File(ucf50_path, mode="r")
ucf50_stats_path = os.path.join(stats_path, "ucf50_stats.pkl")
vid_num = 11
ucf50_save_path = os.path.join(data_path, "ucf50_as.eps")
f = file(ucf50_stats_path, mode="r")
ucf50_stats = pickle.load(f)
f.close()
ucf50_list = ucf50_stats["ucf50_list"]
cn = ucf50_list[0]
vid_name = ucf50_stats[cn][vid_num-1]
vid_n, vid_ex = os.path.splitext(vid_name)
timestamps = ucf50_db[cn][vid_n]["timestamps"][()]
print("[MESSAGE] DATA IS LOADED.")
tend = timestamps[-1]
vv = np.zeros(tend+1)
for i in xrange(timestamps.shape[0]):
vv[timestamps[i]] += 1
fs = 1e6
L = vv.shape[0]
vv = vv - np.sum(vv)/L
NFFT = int(2**np.ceil(np.log2(L)))
ff = np.fft.fft(vv, NFFT)/L
f = fs/2*(np.arange(NFFT/2)/float(NFFT/2))
f_draw = f
ff_draw = 2*np.abs(ff[:NFFT/2])
plt.figure(figsize=(50, 45))
plt.xlim([0, 100])
plt.plot(f_draw, ff_draw, 'b', linewidth=10)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.xticks(np.arange(0, 100+1, 20))
plt.yticks(np.arange(0, 2.5e-1, 0.4e-1))
plt.xlabel("Frequency [Hz]", fontsize=150)
plt.savefig(ucf50_save_path, format="eps", dpi=1200,
bbox_inches='tight', pad_inches=0.5)
print("[MESSAGE] Amplitude Spectrum is saved at %s" % (ucf50_save_path))
elif option == "caltech256-as":
caltech_fn = "INI_Caltech256_10fps_20160424.hdf5"
caltech_path = os.path.join(data_path, caltech_fn)
caltech_db = h5py.File(caltech_path, mode="r")
caltech_stats_path = os.path.join(stats_path, "caltech256_stats.pkl")
caltech_save_path = os.path.join(data_path, "caltech256_as.eps")
img_num = 60
f = file(caltech_stats_path, mode="r")
caltech_stats = pickle.load(f)
f.close()
caltech_list = caltech_stats["caltech256_list"]
cn = caltech_list[0]
img_name = caltech_stats[cn][img_num-1]
img_n, img_ex = os.path.splitext(img_name)
timestamps = caltech_db[cn][img_n]["timestamps"][()]
print("[MESSAGE] DATA IS LOADED.")
tend = timestamps[-1]
vv = np.zeros(tend+1)
for i in xrange(timestamps.shape[0]):
vv[timestamps[i]] += 1
fs = 1e6
L = vv.shape[0]
vv = vv - np.sum(vv)/L
NFFT = int(2**np.ceil(np.log2(L)))
ff = np.fft.fft(vv, NFFT)/L
f = fs/2*(np.arange(NFFT/2)/float(NFFT/2))
f_draw = f
ff_draw = 2*np.abs(ff[:NFFT/2])
plt.figure(figsize=(50, 45))
plt.xlim([0, 100])
plt.plot(f_draw, ff_draw, 'b', linewidth=10)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.xticks(np.arange(0, 100+1, 20))
plt.yticks(np.arange(0, 2.0e-1, 0.3e-1))
plt.xlabel("Frequency [Hz]", fontsize=150)
plt.savefig(caltech_save_path, format="eps", dpi=1200,
bbox_inches='tight', pad_inches=0.5)
print("[MESSAGE] Amplitude Spectrum is saved at %s" % (caltech_save_path))
| {
"repo_name": "duguyue100/spikefuel",
"path": "scripts/create_amplitude_spectrum.py",
"copies": "1",
"size": "7021",
"license": "mit",
"hash": 6963519470952872000,
"line_mean": 31.2064220183,
"line_max": 79,
"alpha_frac": 0.6248397664,
"autogenerated": false,
"ratio": 2.645440844009043,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8770280610409042,
"avg_score": 0,
"num_lines": 218
} |
"""a.m. / p.m.
---
layout: post
source: Garner's Modern American Usage
source_url: http://bit.ly/1T4alrY
title: a.m. & p.m.
date: 2014-06-10 12:31:19
categories: writing
---
"""
from proselint.tools import existence_check, memoize
@memoize
def check_lowercase_periods(text):
"""Check the text."""
err = "dates_times.am_pm.lowercase_periods"
msg = "With lowercase letters, the periods are standard."
return existence_check(text, [r"\d{1,2} ?[ap]m"], err, msg)
@memoize
def check_spacing(text):
"""Check the text."""
err = "dates_times.am_pm.spacing"
msg = "It's standard to put a space before 'a.m.' or 'p.m.'."
return existence_check(text, [r"\d{1,2}[ap]\.?m\.?"], err, msg)
@memoize
def check_midnight_noon(text):
"""Check the text."""
err = "dates_times.am_pm.midnight_noon"
msg = ("12 a.m. and 12 p.m. are wrong and confusing."
" Use 'midnight' or 'noon'.")
return existence_check(text, [r"12 ?[ap]\.?m\.?"], err, msg)
@memoize
def check_redundancy(text):
"""Check the text."""
err = "dates_times.am_pm.midnight_noon"
msg = ("'a.m.' is always morning; 'p.m.' is always night.")
list = [
r"\d{1,2} ?a\.?m\.? in the morning",
r"\d{1,2} ?p\.?m\.? in the evening",
r"\d{1,2} ?p\.?m\.? at night",
r"\d{1,2} ?p\.?m\.? in the afternoon",
]
return existence_check(text, list, err, msg, join=True)
| {
"repo_name": "amperser/proselint",
"path": "proselint/checks/dates_times/am_pm.py",
"copies": "1",
"size": "1441",
"license": "bsd-3-clause",
"hash": -1123359962359209200,
"line_mean": 24.2807017544,
"line_max": 67,
"alpha_frac": 0.5801526718,
"autogenerated": false,
"ratio": 2.703564727954972,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8783717399754971,
"avg_score": 0,
"num_lines": 57
} |
"""AMQP benchmarking tool."""
from __future__ import print_function
import glob
import json
import logging
import os
import random
import signal
import statistics
import sys
import time
from functools import partial, wraps
from operator import itemgetter
import asgi_rabbitmq
from channels.test.liveserver import (
ChannelLiveServerTestCase,
DaphneProcess,
WorkerProcess,
)
from daphne.access import AccessLogGenerator
from daphne.server import Server
from pika.spec import Basic
from tabulate import tabulate
from twisted.python.log import PythonLoggingObserver
amqp_stats = {}
layer_stats = {}
consumers = {}
BENCHMARK = os.environ.get('BENCHMARK', 'False') == 'True'
DEBUGLOG = os.environ.get('DEBUGLOG', 'False') == 'True'
PIKALOG = os.environ.get('PIKALOG', 'False') == 'True'
def maybe_monkeypatch(todir):
"""Setup benchmark if enable.[]"""
if BENCHMARK:
monkeypatch_all(todir)
def maybe_print_stats(fromdir):
"""Print benchmark stats if enable."""
if BENCHMARK:
print_stats(fromdir)
def monkeypatch_all(todir):
"""Setup benchmark."""
monkeypatch_connection()
monkeypatch_layer()
monkeypatch_test_case(todir)
def monkeypatch_connection():
"""Substitute AMQP channel with benchmark measurement class."""
asgi_rabbitmq.core.LayerConnection.Channel = DebugChannel
def monkeypatch_layer():
"""Decorate layer methods with benchmark."""
layer = asgi_rabbitmq.core.RabbitmqChannelLayer
layer.send = bench(layer.send)
layer.receive = bench(layer.receive)
layer.new_channel = bench(layer.new_channel)
layer.group_add = bench(layer.group_add)
layer.group_discard = bench(layer.group_discard)
layer.send_group = bench(layer.send_group, count=True)
def monkeypatch_test_case(todir):
"""
Setup live server test case with benchmark measurement processes.
"""
case = ChannelLiveServerTestCase
case.ProtocolServerProcess = partial(DebugDaphneProcess, todir)
case.WorkerProcess = partial(DebugWorkerProcess, todir)
# Decorate test teardown method.
case._post_teardown = signal_first(case._post_teardown)
def percentile(values, fraction):
"""
Returns a percentile value (e.g. fraction = 0.95 -> 95th percentile)
"""
values = sorted(values)
stopat = int(len(values) * fraction)
if stopat == len(values):
stopat -= 1
return values[stopat]
def print_stats(fromdir):
"""Print collected statistics."""
# Include statistics from subprocesses.
for statfile in glob.glob('%s/*.dump' % fromdir):
with open(statfile) as f:
statblob = f.read()
statdata = json.loads(statblob)
for num, stat in enumerate([amqp_stats, layer_stats]):
for k, v in statdata[num].items():
if isinstance(v, list):
stat.setdefault(k, [])
stat[k].extend(v)
else:
stat.setdefault(k, 0)
stat[k] += v
headers = ['method', 'calls', 'mean', 'median', 'stdev', '95%', '99%']
for num, stats in enumerate([amqp_stats, layer_stats], start=1):
if stats:
# Print statistic table.
data = []
for method, latencies in stats.items():
if isinstance(latencies, list):
data.append([
method,
len(latencies),
statistics.mean(latencies),
statistics.median(latencies),
statistics.stdev(latencies)
if len(latencies) > 1 else None,
percentile(latencies, 0.95),
percentile(latencies, 0.99),
])
elif isinstance(latencies, int):
data.append(
[method, latencies, None, None, None, None, None],
)
else:
raise Exception(
'Stat(%d) was currupted at method %s' % (num, method),
)
data = sorted(data, key=itemgetter(1), reverse=True)
print()
print(tabulate(data, headers))
else:
print("%d) No statistic available" % num)
def save_stats(todir):
"""
Dump collected statistic to the json file. Used to from live
server test case subprocesses.
"""
statdata = [amqp_stats, layer_stats]
statblob = json.dumps(statdata)
path = os.path.join(todir, '%d.dump' % random.randint(0, 100))
with open(path, 'w') as f:
f.write(statblob)
def bench(f, count=False):
"""Collect function call duration statistics."""
if count:
# Just count the numbers of function calls.
@wraps(f)
def wrapper(*args, **kwargs):
layer_stats.setdefault(f.__name__, 0)
layer_stats[f.__name__] += 1
return f(*args, **kwargs)
else:
# Calculate exact duration of each function call.
@wraps(f)
def wrapper(*args, **kwargs):
start = time.time()
result = f(*args, **kwargs)
latency = time.time() - start
layer_stats.setdefault(f.__name__, [])
layer_stats[f.__name__] += [latency]
return result
return wrapper
def wrap(method, callback):
"""
Measure the latency between request start and response callback.
Used to measure low-level AMQP frame operations.
"""
if callback is None:
return
start = time.time()
def wrapper(*args):
latency = time.time() - start
amqp_stats.setdefault(method, [])
amqp_stats[method] += [latency]
if callback:
callback(*args)
return wrapper
class DebugChannel(asgi_rabbitmq.core.LayerConnection.Channel):
"""Collect statistics about RabbitMQ methods usage on channel."""
def basic_ack(self, *args, **kwargs):
amqp_stats.setdefault('basic_ack', 0)
amqp_stats['basic_ack'] += 1
return super(DebugChannel, self).basic_ack(*args, **kwargs)
def basic_cancel(self, callback=None, *args, **kwargs):
return super(DebugChannel, self).basic_cancel(
wrap('basic_cancel', callback), *args, **kwargs)
def basic_consume(self, *args, **kwargs):
start = time.time()
consumer_tag = super(DebugChannel, self).basic_consume(*args, **kwargs)
consumers[consumer_tag] = start
return consumer_tag
def _on_eventok(self, method_frame):
end = time.time()
if isinstance(method_frame.method, Basic.ConsumeOk):
start = consumers.pop(method_frame.method.consumer_tag)
latency = end - start
amqp_stats.setdefault('basic_consume', [])
amqp_stats['basic_consume'] += [latency]
return
return super(DebugChannel, self)._on_eventok(method_frame)
def basic_get(self, callback=None, *args, **kwargs):
# TODO: Measure latency for Get-Empty responses.
return super(DebugChannel, self).basic_get(
wrap('basic_get', callback), *args, **kwargs)
def basic_publish(self, *args, **kwargs):
amqp_stats.setdefault('basic_publish', 0)
amqp_stats['basic_publish'] += 1
return super(DebugChannel, self).basic_publish(*args, **kwargs)
def exchange_bind(self, callback=None, *args, **kwargs):
return super(DebugChannel, self).exchange_bind(
wrap('exchange_bind', callback), *args, **kwargs)
def exchange_declare(self, callback=None, *args, **kwargs):
return super(DebugChannel, self).exchange_declare(
wrap('exchange_declare', callback), *args, **kwargs)
def exchange_delete(self, callback=None, *args, **kwargs):
return super(DebugChannel, self).exchange_delete(
wrap('exchange_delete', callback), *args, **kwargs)
def exchange_unbind(self, callback=None, *args, **kwargs):
return super(DebugChannel, self).exchange_unbind(
wrap('exchange_unbind', callback), *args, **kwargs)
def queue_bind(self, callback, *args, **kwargs):
return super(DebugChannel, self).queue_bind(
wrap('queue_bind', callback), *args, **kwargs)
def queue_declare(self, callback, *args, **kwargs):
return super(DebugChannel, self).queue_declare(
wrap('queue_declare', callback), *args, **kwargs)
class DebugDaphneProcess(DaphneProcess):
"""
Live server test case subprocess which dumps benchmark statistics
to the json file before exit.
"""
def __init__(self, todir, *args):
self.todir = todir
super(DebugDaphneProcess, self).__init__(*args)
def run(self):
setup_logger('Daphne')
monkeypatch_all(self.todir)
signal.signal(signal.SIGCHLD, partial(at_exit, self.todir))
super(DebugDaphneProcess, self).run()
class DebugWorkerProcess(WorkerProcess):
"""
Live server test case subprocess which dumps benchmark statistics
to the json file before exit.
"""
def __init__(self, todir, *args):
self.todir = todir
super(DebugWorkerProcess, self).__init__(*args)
def run(self):
setup_logger('Worker')
monkeypatch_all(self.todir)
signal.signal(signal.SIGCHLD, partial(at_exit, self.todir))
super(DebugWorkerProcess, self).run()
def signal_first(method):
"""Decorate function call with test subprocess teardown."""
def decorated_method(self):
os.kill(self._server_process.pid, signal.SIGCHLD)
os.kill(self._worker_process.pid, signal.SIGCHLD)
time.sleep(0.1)
method(self)
return decorated_method
def at_exit(todir, signum, frame):
"""Save statistics to the file."""
if BENCHMARK:
save_stats(todir)
def setup_logger(name):
"""Enable debug logging."""
if DEBUGLOG:
logging.basicConfig(
level=logging.DEBUG,
format=name + ' %(asctime)-15s %(levelname)-8s %(message)s',
)
disabled_loggers = []
if not PIKALOG:
disabled_loggers.append('pika')
for logger in disabled_loggers:
logging.getLogger(logger).setLevel(logging.WARNING)
new_defaults = list(Server.__init__.__defaults__)
# NOTE: Patch `action_logger` argument default value.
new_defaults[6] = AccessLogGenerator(sys.stdout)
Server.__init__.__defaults__ = tuple(new_defaults)
observer = PythonLoggingObserver(loggerName='twisted')
observer.start()
| {
"repo_name": "proofit404/asgi_rabbitmq",
"path": "libs/amqpstat.py",
"copies": "1",
"size": "10679",
"license": "bsd-3-clause",
"hash": 2627948430723636000,
"line_mean": 28.6638888889,
"line_max": 79,
"alpha_frac": 0.6068920311,
"autogenerated": false,
"ratio": 3.961053412462908,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 360
} |
"""AMQP Channels."""
# Copyright (C) 2007-2008 Barry Pederson <bp@barryp.org>
from __future__ import absolute_import, unicode_literals
import logging
import socket
from collections import defaultdict
from warnings import warn
from vine import ensure_promise
from . import spec
from .abstract_channel import AbstractChannel
from .exceptions import (ChannelError, ConsumerCancelled,
RecoverableChannelError, RecoverableConnectionError,
error_for_code, MessageNacked)
from .five import Queue
from .protocol import queue_declare_ok_t
__all__ = ['Channel']
AMQP_LOGGER = logging.getLogger('amqp')
EXCHANGE_AUTODELETE_DEPRECATED = """\
The auto_delete flag for exchanges has been deprecated and will be removed
from py-amqp v1.5.0.\
"""
REJECTED_MESSAGE_WITHOUT_CALLBACK = """\
Rejecting message with delivery tag %r for reason of having no callbacks.
consumer_tag=%r exchange=%r routing_key=%r.\
"""
class VDeprecationWarning(DeprecationWarning):
pass
class Channel(AbstractChannel):
"""AMQP Channel.
The channel class provides methods for a client to establish a
virtual connection - a channel - to a server and for both peers to
operate the virtual connection thereafter.
GRAMMAR::
channel = open-channel *use-channel close-channel
open-channel = C:OPEN S:OPEN-OK
use-channel = C:FLOW S:FLOW-OK
/ S:FLOW C:FLOW-OK
/ functional-class
close-channel = C:CLOSE S:CLOSE-OK
/ S:CLOSE C:CLOSE-OK
Create a channel bound to a connection and using the specified
numeric channel_id, and open on the server.
The 'auto_decode' parameter (defaults to True), indicates
whether the library should attempt to decode the body
of Messages to a Unicode string if there's a 'content_encoding'
property for the message. If there's no 'content_encoding'
property, or the decode raises an Exception, the message body
is left as plain bytes.
"""
_METHODS = {
spec.method(spec.Channel.Close, 'BsBB'),
spec.method(spec.Channel.CloseOk),
spec.method(spec.Channel.Flow, 'b'),
spec.method(spec.Channel.FlowOk, 'b'),
spec.method(spec.Channel.OpenOk),
spec.method(spec.Exchange.DeclareOk),
spec.method(spec.Exchange.DeleteOk),
spec.method(spec.Exchange.BindOk),
spec.method(spec.Exchange.UnbindOk),
spec.method(spec.Queue.BindOk),
spec.method(spec.Queue.UnbindOk),
spec.method(spec.Queue.DeclareOk, 'sll'),
spec.method(spec.Queue.DeleteOk, 'l'),
spec.method(spec.Queue.PurgeOk, 'l'),
spec.method(spec.Basic.Cancel, 's'),
spec.method(spec.Basic.CancelOk, 's'),
spec.method(spec.Basic.ConsumeOk, 's'),
spec.method(spec.Basic.Deliver, 'sLbss', content=True),
spec.method(spec.Basic.GetEmpty, 's'),
spec.method(spec.Basic.GetOk, 'Lbssl', content=True),
spec.method(spec.Basic.QosOk),
spec.method(spec.Basic.RecoverOk),
spec.method(spec.Basic.Return, 'Bsss', content=True),
spec.method(spec.Tx.CommitOk),
spec.method(spec.Tx.RollbackOk),
spec.method(spec.Tx.SelectOk),
spec.method(spec.Confirm.SelectOk),
spec.method(spec.Basic.Ack, 'Lb'),
spec.method(spec.Basic.Nack, 'Lb'),
}
_METHODS = {m.method_sig: m for m in _METHODS}
def __init__(self, connection,
channel_id=None, auto_decode=True, on_open=None):
if channel_id:
connection._claim_channel_id(channel_id)
else:
channel_id = connection._get_free_channel_id()
AMQP_LOGGER.debug('using channel_id: %s', channel_id)
super(Channel, self).__init__(connection, channel_id)
self.is_open = False
self.active = True # Flow control
self.returned_messages = Queue()
self.callbacks = {}
self.cancel_callbacks = {}
self.auto_decode = auto_decode
self.events = defaultdict(set)
self.no_ack_consumers = set()
self.on_open = ensure_promise(on_open)
# set first time basic_publish_confirm is called
# and publisher confirms are enabled for this channel.
self._confirm_selected = False
if self.connection.confirm_publish:
self.basic_publish = self.basic_publish_confirm
def then(self, on_success, on_error=None):
return self.on_open.then(on_success, on_error)
def _setup_listeners(self):
self._callbacks.update({
spec.Channel.Close: self._on_close,
spec.Channel.CloseOk: self._on_close_ok,
spec.Channel.Flow: self._on_flow,
spec.Channel.OpenOk: self._on_open_ok,
spec.Basic.Cancel: self._on_basic_cancel,
spec.Basic.CancelOk: self._on_basic_cancel_ok,
spec.Basic.Deliver: self._on_basic_deliver,
spec.Basic.Return: self._on_basic_return,
spec.Basic.Ack: self._on_basic_ack,
spec.Basic.Nack: self._on_basic_nack,
})
def collect(self):
"""Tear down this object.
Best called after we've agreed to close with the server.
"""
AMQP_LOGGER.debug('Closed channel #%s', self.channel_id)
self.is_open = False
channel_id, self.channel_id = self.channel_id, None
connection, self.connection = self.connection, None
if connection:
connection.channels.pop(channel_id, None)
connection._avail_channel_ids.append(channel_id)
self.callbacks.clear()
self.cancel_callbacks.clear()
self.events.clear()
self.no_ack_consumers.clear()
def _do_revive(self):
self.is_open = False
self.open()
def close(self, reply_code=0, reply_text='', method_sig=(0, 0),
argsig='BsBB'):
"""Request a channel close.
This method indicates that the sender wants to close the
channel. This may be due to internal conditions (e.g. a forced
shut-down) or due to an error handling a specific method, i.e.
an exception. When a close is due to an exception, the sender
provides the class and method id of the method which caused
the exception.
RULE:
After sending this method any received method except
Channel.Close-OK MUST be discarded.
RULE:
The peer sending this method MAY use a counter or timeout
to detect failure of the other peer to respond correctly
with Channel.Close-OK..
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
class_id: short
failing method class
When the close is provoked by a method exception, this
is the class of the method.
method_id: short
failing method ID
When the close is provoked by a method exception, this
is the ID of the method.
"""
try:
is_closed = (
not self.is_open or
self.connection is None or
self.connection.channels is None
)
if is_closed:
return
self.is_closing = True
return self.send_method(
spec.Channel.Close, argsig,
(reply_code, reply_text, method_sig[0], method_sig[1]),
wait=spec.Channel.CloseOk,
)
finally:
self.is_closing = False
self.connection = None
def _on_close(self, reply_code, reply_text, class_id, method_id):
"""Request a channel close.
This method indicates that the sender wants to close the
channel. This may be due to internal conditions (e.g. a forced
shut-down) or due to an error handling a specific method, i.e.
an exception. When a close is due to an exception, the sender
provides the class and method id of the method which caused
the exception.
RULE:
After sending this method any received method except
Channel.Close-OK MUST be discarded.
RULE:
The peer sending this method MAY use a counter or timeout
to detect failure of the other peer to respond correctly
with Channel.Close-OK..
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
class_id: short
failing method class
When the close is provoked by a method exception, this
is the class of the method.
method_id: short
failing method ID
When the close is provoked by a method exception, this
is the ID of the method.
"""
self.send_method(spec.Channel.CloseOk)
if not self.connection.is_closing:
self._do_revive()
raise error_for_code(
reply_code, reply_text, (class_id, method_id), ChannelError,
)
def _on_close_ok(self):
"""Confirm a channel close.
This method confirms a Channel.Close method and tells the
recipient that it is safe to release resources for the channel
and close the socket.
RULE:
A peer that detects a socket closure without having
received a Channel.Close-Ok handshake method SHOULD log
the error.
"""
self.collect()
def flow(self, active):
"""Enable/disable flow from peer.
This method asks the peer to pause or restart the flow of
content data. This is a simple flow-control mechanism that a
peer can use to avoid oveflowing its queues or otherwise
finding itself receiving more messages than it can process.
Note that this method is not intended for window control. The
peer that receives a request to stop sending content should
finish sending the current content, if any, and then wait
until it receives a Flow restart method.
RULE:
When a new channel is opened, it is active. Some
applications assume that channels are inactive until
started. To emulate this behaviour a client MAY open the
channel, then pause it.
RULE:
When sending content data in multiple frames, a peer
SHOULD monitor the channel for incoming methods and
respond to a Channel.Flow as rapidly as possible.
RULE:
A peer MAY use the Channel.Flow method to throttle
incoming content data for internal reasons, for example,
when exchangeing data over a slower connection.
RULE:
The peer that requests a Channel.Flow method MAY
disconnect and/or ban a peer that does not respect the
request.
PARAMETERS:
active: boolean
start/stop content frames
If True, the peer starts sending content frames. If
False, the peer stops sending content frames.
"""
return self.send_method(
spec.Channel.Flow, 'b', (active,), wait=spec.Channel.FlowOk,
)
def _on_flow(self, active):
"""Enable/disable flow from peer.
This method asks the peer to pause or restart the flow of
content data. This is a simple flow-control mechanism that a
peer can use to avoid oveflowing its queues or otherwise
finding itself receiving more messages than it can process.
Note that this method is not intended for window control. The
peer that receives a request to stop sending content should
finish sending the current content, if any, and then wait
until it receives a Flow restart method.
RULE:
When a new channel is opened, it is active. Some
applications assume that channels are inactive until
started. To emulate this behaviour a client MAY open the
channel, then pause it.
RULE:
When sending content data in multiple frames, a peer
SHOULD monitor the channel for incoming methods and
respond to a Channel.Flow as rapidly as possible.
RULE:
A peer MAY use the Channel.Flow method to throttle
incoming content data for internal reasons, for example,
when exchangeing data over a slower connection.
RULE:
The peer that requests a Channel.Flow method MAY
disconnect and/or ban a peer that does not respect the
request.
PARAMETERS:
active: boolean
start/stop content frames
If True, the peer starts sending content frames. If
False, the peer stops sending content frames.
"""
self.active = active
self._x_flow_ok(self.active)
def _x_flow_ok(self, active):
"""Confirm a flow method.
Confirms to the peer that a flow command was received and
processed.
PARAMETERS:
active: boolean
current flow setting
Confirms the setting of the processed flow method:
True means the peer will start sending or continue
to send content frames; False means it will not.
"""
return self.send_method(spec.Channel.FlowOk, 'b', (active,))
def open(self):
"""Open a channel for use.
This method opens a virtual connection (a channel).
RULE:
This method MUST NOT be called when the channel is already
open.
PARAMETERS:
out_of_band: shortstr (DEPRECATED)
out-of-band settings
Configures out-of-band transfers on this channel. The
syntax and meaning of this field will be formally
defined at a later date.
"""
if self.is_open:
return
return self.send_method(
spec.Channel.Open, 's', ('',), wait=spec.Channel.OpenOk,
)
def _on_open_ok(self):
"""Signal that the channel is ready.
This method signals to the client that the channel is ready
for use.
"""
self.is_open = True
self.on_open(self)
AMQP_LOGGER.debug('Channel open')
#############
#
# Exchange
#
#
# work with exchanges
#
# Exchanges match and distribute messages across queues.
# Exchanges can be configured in the server or created at runtime.
#
# GRAMMAR::
#
# exchange = C:DECLARE S:DECLARE-OK
# / C:DELETE S:DELETE-OK
#
# RULE:
#
# The server MUST implement the direct and fanout exchange
# types, and predeclare the corresponding exchanges named
# amq.direct and amq.fanout in each virtual host. The server
# MUST also predeclare a direct exchange to act as the default
# exchange for content Publish methods and for default queue
# bindings.
#
# RULE:
#
# The server SHOULD implement the topic exchange type, and
# predeclare the corresponding exchange named amq.topic in
# each virtual host.
#
# RULE:
#
# The server MAY implement the system exchange type, and
# predeclare the corresponding exchanges named amq.system in
# each virtual host. If the client attempts to bind a queue to
# the system exchange, the server MUST raise a connection
# exception with reply code 507 (not allowed).
#
def exchange_declare(self, exchange, type, passive=False, durable=False,
auto_delete=True, nowait=False, arguments=None,
argsig='BssbbbbbF'):
"""Declare exchange, create if needed.
This method creates an exchange if it does not already exist,
and if the exchange exists, verifies that it is of the correct
and expected class.
RULE:
The server SHOULD support a minimum of 16 exchanges per
virtual host and ideally, impose no limit except as
defined by available resources.
PARAMETERS:
exchange: shortstr
RULE:
Exchange names starting with "amq." are reserved
for predeclared and standardised exchanges. If
the client attempts to create an exchange starting
with "amq.", the server MUST raise a channel
exception with reply code 403 (access refused).
type: shortstr
exchange type
Each exchange belongs to one of a set of exchange
types implemented by the server. The exchange types
define the functionality of the exchange - i.e. how
messages are routed through it. It is not valid or
meaningful to attempt to change the type of an
existing exchange.
RULE:
If the exchange already exists with a different
type, the server MUST raise a connection exception
with a reply code 507 (not allowed).
RULE:
If the server does not support the requested
exchange type it MUST raise a connection exception
with a reply code 503 (command invalid).
passive: boolean
do not create exchange
If set, the server will not create the exchange. The
client can use this to check whether an exchange
exists without modifying the server state.
RULE:
If set, and the exchange does not already exist,
the server MUST raise a channel exception with
reply code 404 (not found).
durable: boolean
request a durable exchange
If set when creating a new exchange, the exchange will
be marked as durable. Durable exchanges remain active
when a server restarts. Non-durable exchanges
(transient exchanges) are purged if/when a server
restarts.
RULE:
The server MUST support both durable and transient
exchanges.
RULE:
The server MUST ignore the durable field if the
exchange already exists.
auto_delete: boolean
auto-delete when unused
If set, the exchange is deleted when all queues have
finished using it.
RULE:
The server SHOULD allow for a reasonable delay
between the point when it determines that an
exchange is not being used (or no longer used),
and the point when it deletes the exchange. At
the least it must allow a client to create an
exchange and then bind a queue to it, with a small
but non-zero delay between these two actions.
RULE:
The server MUST ignore the auto-delete field if
the exchange already exists.
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
arguments: table
arguments for declaration
A set of arguments for the declaration. The syntax and
semantics of these arguments depends on the server
implementation. This field is ignored if passive is
True.
"""
if auto_delete:
warn(VDeprecationWarning(EXCHANGE_AUTODELETE_DEPRECATED))
self.send_method(
spec.Exchange.Declare, argsig,
(0, exchange, type, passive, durable, auto_delete,
False, nowait, arguments),
wait=None if nowait else spec.Exchange.DeclareOk,
)
def exchange_delete(self, exchange, if_unused=False, nowait=False,
argsig='Bsbb'):
"""Delete an exchange.
This method deletes an exchange. When an exchange is deleted
all queue bindings on the exchange are cancelled.
PARAMETERS:
exchange: shortstr
RULE:
The exchange MUST exist. Attempting to delete a
non-existing exchange causes a channel exception.
if_unused: boolean
delete only if unused
If set, the server will only delete the exchange if it
has no queue bindings. If the exchange has queue
bindings the server does not delete it but raises a
channel exception instead.
RULE:
If set, the server SHOULD delete the exchange but
only if it has no queue bindings.
RULE:
If set, the server SHOULD raise a channel
exception if the exchange is in use.
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
"""
return self.send_method(
spec.Exchange.Delete, argsig, (0, exchange, if_unused, nowait),
wait=None if nowait else spec.Exchange.DeleteOk,
)
def exchange_bind(self, destination, source='', routing_key='',
nowait=False, arguments=None, argsig='BsssbF'):
"""Bind an exchange to an exchange.
RULE:
A server MUST allow and ignore duplicate bindings - that
is, two or more bind methods for a specific exchanges,
with identical arguments - without treating these as an
error.
RULE:
A server MUST allow cycles of exchange bindings to be
created including allowing an exchange to be bound to
itself.
RULE:
A server MUST not deliver the same message more than once
to a destination exchange, even if the topology of
exchanges and bindings results in multiple (even infinite)
routes to that exchange.
PARAMETERS:
reserved-1: short
destination: shortstr
Specifies the name of the destination exchange to
bind.
RULE:
A client MUST NOT be allowed to bind a non-
existent destination exchange.
RULE:
The server MUST accept a blank exchange name to
mean the default exchange.
source: shortstr
Specifies the name of the source exchange to bind.
RULE:
A client MUST NOT be allowed to bind a non-
existent source exchange.
RULE:
The server MUST accept a blank exchange name to
mean the default exchange.
routing-key: shortstr
Specifies the routing key for the binding. The routing
key is used for routing messages depending on the
exchange configuration. Not all exchanges use a
routing key - refer to the specific exchange
documentation.
no-wait: bit
arguments: table
A set of arguments for the binding. The syntax and
semantics of these arguments depends on the exchange
class.
"""
return self.send_method(
spec.Exchange.Bind, argsig,
(0, destination, source, routing_key, nowait, arguments),
wait=None if nowait else spec.Exchange.BindOk,
)
def exchange_unbind(self, destination, source='', routing_key='',
nowait=False, arguments=None, argsig='BsssbF'):
"""Unbind an exchange from an exchange.
RULE:
If a unbind fails, the server MUST raise a connection
exception.
PARAMETERS:
reserved-1: short
destination: shortstr
Specifies the name of the destination exchange to
unbind.
RULE:
The client MUST NOT attempt to unbind an exchange
that does not exist from an exchange.
RULE:
The server MUST accept a blank exchange name to
mean the default exchange.
source: shortstr
Specifies the name of the source exchange to unbind.
RULE:
The client MUST NOT attempt to unbind an exchange
from an exchange that does not exist.
RULE:
The server MUST accept a blank exchange name to
mean the default exchange.
routing-key: shortstr
Specifies the routing key of the binding to unbind.
no-wait: bit
arguments: table
Specifies the arguments of the binding to unbind.
"""
return self.send_method(
spec.Exchange.Unbind, argsig,
(0, destination, source, routing_key, nowait, arguments),
wait=None if nowait else spec.Exchange.UnbindOk,
)
#############
#
# Queue
#
#
# work with queues
#
# Queues store and forward messages. Queues can be configured in
# the server or created at runtime. Queues must be attached to at
# least one exchange in order to receive messages from publishers.
#
# GRAMMAR::
#
# queue = C:DECLARE S:DECLARE-OK
# / C:BIND S:BIND-OK
# / C:PURGE S:PURGE-OK
# / C:DELETE S:DELETE-OK
#
# RULE:
#
# A server MUST allow any content class to be sent to any
# queue, in any mix, and queue and delivery these content
# classes independently. Note that all methods that fetch
# content off queues are specific to a given content class.
#
def queue_bind(self, queue, exchange='', routing_key='',
nowait=False, arguments=None, argsig='BsssbF'):
"""Bind queue to an exchange.
This method binds a queue to an exchange. Until a queue is
bound it will not receive any messages. In a classic
messaging model, store-and-forward queues are bound to a dest
exchange and subscription queues are bound to a dest_wild
exchange.
RULE:
A server MUST allow ignore duplicate bindings - that is,
two or more bind methods for a specific queue, with
identical arguments - without treating these as an error.
RULE:
If a bind fails, the server MUST raise a connection
exception.
RULE:
The server MUST NOT allow a durable queue to bind to a
transient exchange. If the client attempts this the server
MUST raise a channel exception.
RULE:
Bindings for durable queues are automatically durable and
the server SHOULD restore such bindings after a server
restart.
RULE:
The server SHOULD support at least 4 bindings per queue,
and ideally, impose no limit except as defined by
available resources.
PARAMETERS:
queue: shortstr
Specifies the name of the queue to bind. If the queue
name is empty, refers to the current queue for the
channel, which is the last declared queue.
RULE:
If the client did not previously declare a queue,
and the queue name in this method is empty, the
server MUST raise a connection exception with
reply code 530 (not allowed).
RULE:
If the queue does not exist the server MUST raise
a channel exception with reply code 404 (not
found).
exchange: shortstr
The name of the exchange to bind to.
RULE:
If the exchange does not exist the server MUST
raise a channel exception with reply code 404 (not
found).
routing_key: shortstr
message routing key
Specifies the routing key for the binding. The
routing key is used for routing messages depending on
the exchange configuration. Not all exchanges use a
routing key - refer to the specific exchange
documentation. If the routing key is empty and the
queue name is empty, the routing key will be the
current queue for the channel, which is the last
declared queue.
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
arguments: table
arguments for binding
A set of arguments for the binding. The syntax and
semantics of these arguments depends on the exchange
class.
"""
return self.send_method(
spec.Queue.Bind, argsig,
(0, queue, exchange, routing_key, nowait, arguments),
wait=None if nowait else spec.Queue.BindOk,
)
def queue_unbind(self, queue, exchange, routing_key='',
nowait=False, arguments=None, argsig='BsssF'):
"""Unbind a queue from an exchange.
This method unbinds a queue from an exchange.
RULE:
If a unbind fails, the server MUST raise a connection exception.
PARAMETERS:
queue: shortstr
Specifies the name of the queue to unbind.
RULE:
The client MUST either specify a queue name or have
previously declared a queue on the same channel
RULE:
The client MUST NOT attempt to unbind a queue that
does not exist.
exchange: shortstr
The name of the exchange to unbind from.
RULE:
The client MUST NOT attempt to unbind a queue from an
exchange that does not exist.
RULE:
The server MUST accept a blank exchange name to mean
the default exchange.
routing_key: shortstr
routing key of binding
Specifies the routing key of the binding to unbind.
arguments: table
arguments of binding
Specifies the arguments of the binding to unbind.
"""
return self.send_method(
spec.Queue.Unbind, argsig,
(0, queue, exchange, routing_key, arguments),
wait=None if nowait else spec.Queue.UnbindOk,
)
def queue_declare(self, queue='', passive=False, durable=False,
exclusive=False, auto_delete=True, nowait=False,
arguments=None, argsig='BsbbbbbF'):
"""Declare queue, create if needed.
This method creates or checks a queue. When creating a new
queue the client can specify various properties that control
the durability of the queue and its contents, and the level of
sharing for the queue.
RULE:
The server MUST create a default binding for a newly-
created queue to the default exchange, which is an
exchange of type 'direct'.
RULE:
The server SHOULD support a minimum of 256 queues per
virtual host and ideally, impose no limit except as
defined by available resources.
PARAMETERS:
queue: shortstr
RULE:
The queue name MAY be empty, in which case the
server MUST create a new queue with a unique
generated name and return this to the client in
the Declare-Ok method.
RULE:
Queue names starting with "amq." are reserved for
predeclared and standardised server queues. If
the queue name starts with "amq." and the passive
option is False, the server MUST raise a connection
exception with reply code 403 (access refused).
passive: boolean
do not create queue
If set, the server will not create the queue. The
client can use this to check whether a queue exists
without modifying the server state.
RULE:
If set, and the queue does not already exist, the
server MUST respond with a reply code 404 (not
found) and raise a channel exception.
durable: boolean
request a durable queue
If set when creating a new queue, the queue will be
marked as durable. Durable queues remain active when
a server restarts. Non-durable queues (transient
queues) are purged if/when a server restarts. Note
that durable queues do not necessarily hold persistent
messages, although it does not make sense to send
persistent messages to a transient queue.
RULE:
The server MUST recreate the durable queue after a
restart.
RULE:
The server MUST support both durable and transient
queues.
RULE:
The server MUST ignore the durable field if the
queue already exists.
exclusive: boolean
request an exclusive queue
Exclusive queues may only be consumed from by the
current connection. Setting the 'exclusive' flag
always implies 'auto-delete'.
RULE:
The server MUST support both exclusive (private)
and non-exclusive (shared) queues.
RULE:
The server MUST raise a channel exception if
'exclusive' is specified and the queue already
exists and is owned by a different connection.
auto_delete: boolean
auto-delete queue when unused
If set, the queue is deleted when all consumers have
finished using it. Last consumer can be cancelled
either explicitly or because its channel is closed. If
there was no consumer ever on the queue, it won't be
deleted.
RULE:
The server SHOULD allow for a reasonable delay
between the point when it determines that a queue
is not being used (or no longer used), and the
point when it deletes the queue. At the least it
must allow a client to create a queue and then
create a consumer to read from it, with a small
but non-zero delay between these two actions. The
server should equally allow for clients that may
be disconnected prematurely, and wish to re-
consume from the same queue without losing
messages. We would recommend a configurable
timeout, with a suitable default value being one
minute.
RULE:
The server MUST ignore the auto-delete field if
the queue already exists.
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
arguments: table
arguments for declaration
A set of arguments for the declaration. The syntax and
semantics of these arguments depends on the server
implementation. This field is ignored if passive is
True.
Returns a tuple containing 3 items:
the name of the queue (essential for automatically-named queues),
message count and
consumer count
"""
self.send_method(
spec.Queue.Declare, argsig,
(0, queue, passive, durable, exclusive, auto_delete,
nowait, arguments),
)
if not nowait:
return queue_declare_ok_t(*self.wait(
spec.Queue.DeclareOk, returns_tuple=True,
))
def queue_delete(self, queue='',
if_unused=False, if_empty=False, nowait=False,
argsig='Bsbbb'):
"""Delete a queue.
This method deletes a queue. When a queue is deleted any
pending messages are sent to a dead-letter queue if this is
defined in the server configuration, and all consumers on the
queue are cancelled.
RULE:
The server SHOULD use a dead-letter queue to hold messages
that were pending on a deleted queue, and MAY provide
facilities for a system administrator to move these
messages back to an active queue.
PARAMETERS:
queue: shortstr
Specifies the name of the queue to delete. If the
queue name is empty, refers to the current queue for
the channel, which is the last declared queue.
RULE:
If the client did not previously declare a queue,
and the queue name in this method is empty, the
server MUST raise a connection exception with
reply code 530 (not allowed).
RULE:
The queue must exist. Attempting to delete a non-
existing queue causes a channel exception.
if_unused: boolean
delete only if unused
If set, the server will only delete the queue if it
has no consumers. If the queue has consumers the
server does does not delete it but raises a channel
exception instead.
RULE:
The server MUST respect the if-unused flag when
deleting a queue.
if_empty: boolean
delete only if empty
If set, the server will only delete the queue if it
has no messages. If the queue is not empty the server
raises a channel exception.
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
If nowait is False, returns the number of deleted messages.
"""
return self.send_method(
spec.Queue.Delete, argsig,
(0, queue, if_unused, if_empty, nowait),
wait=None if nowait else spec.Queue.DeleteOk,
)
def queue_purge(self, queue='', nowait=False, argsig='Bsb'):
"""Purge a queue.
This method removes all messages from a queue. It does not
cancel consumers. Purged messages are deleted without any
formal "undo" mechanism.
RULE:
A call to purge MUST result in an empty queue.
RULE:
On transacted channels the server MUST not purge messages
that have already been sent to a client but not yet
acknowledged.
RULE:
The server MAY implement a purge queue or log that allows
system administrators to recover accidentally-purged
messages. The server SHOULD NOT keep purged messages in
the same storage spaces as the live messages since the
volumes of purged messages may get very large.
PARAMETERS:
queue: shortstr
Specifies the name of the queue to purge. If the
queue name is empty, refers to the current queue for
the channel, which is the last declared queue.
RULE:
If the client did not previously declare a queue,
and the queue name in this method is empty, the
server MUST raise a connection exception with
reply code 530 (not allowed).
RULE:
The queue must exist. Attempting to purge a non-
existing queue causes a channel exception.
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
If nowait is False, returns a number of purged messages.
"""
return self.send_method(
spec.Queue.Purge, argsig, (0, queue, nowait),
wait=None if nowait else spec.Queue.PurgeOk,
)
#############
#
# Basic
#
#
# work with basic content
#
# The Basic class provides methods that support an industry-
# standard messaging model.
#
# GRAMMAR::
#
# basic = C:QOS S:QOS-OK
# / C:CONSUME S:CONSUME-OK
# / C:CANCEL S:CANCEL-OK
# / C:PUBLISH content
# / S:RETURN content
# / S:DELIVER content
# / C:GET ( S:GET-OK content / S:GET-EMPTY )
# / C:ACK
# / C:REJECT
#
# RULE:
#
# The server SHOULD respect the persistent property of basic
# messages and SHOULD make a best-effort to hold persistent
# basic messages on a reliable storage mechanism.
#
# RULE:
#
# The server MUST NOT discard a persistent basic message in
# case of a queue overflow. The server MAY use the
# Channel.Flow method to slow or stop a basic message
# publisher when necessary.
#
# RULE:
#
# The server MAY overflow non-persistent basic messages to
# persistent storage and MAY discard or dead-letter non-
# persistent basic messages on a priority basis if the queue
# size exceeds some configured limit.
#
# RULE:
#
# The server MUST implement at least 2 priority levels for
# basic messages, where priorities 0-4 and 5-9 are treated as
# two distinct levels. The server MAY implement up to 10
# priority levels.
#
# RULE:
#
# The server MUST deliver messages of the same priority in
# order irrespective of their individual persistence.
#
# RULE:
#
# The server MUST support both automatic and explicit
# acknowledgments on Basic content.
#
def basic_ack(self, delivery_tag, multiple=False, argsig='Lb'):
"""Acknowledge one or more messages.
This method acknowledges one or more messages delivered via
the Deliver or Get-Ok methods. The client can ask to confirm
a single message or a set of messages up to and including a
specific message.
PARAMETERS:
delivery_tag: longlong
server-assigned delivery tag
The server-assigned and channel-specific delivery tag
RULE:
The delivery tag is valid only within the channel
from which the message was received. I.e. a client
MUST NOT receive a message on one channel and then
acknowledge it on another.
RULE:
The server MUST NOT use a zero value for delivery
tags. Zero is reserved for client use, meaning "all
messages so far received".
multiple: boolean
acknowledge multiple messages
If set to True, the delivery tag is treated as "up to
and including", so that the client can acknowledge
multiple messages with a single method. If set to
False, the delivery tag refers to a single message.
If the multiple field is True, and the delivery tag
is zero, tells the server to acknowledge all
outstanding mesages.
RULE:
The server MUST validate that a non-zero delivery-
tag refers to an delivered message, and raise a
channel exception if this is not the case.
"""
return self.send_method(
spec.Basic.Ack, argsig, (delivery_tag, multiple),
)
def basic_cancel(self, consumer_tag, nowait=False, argsig='sb'):
"""End a queue consumer.
This method cancels a consumer. This does not affect already
delivered messages, but it does mean the server will not send
any more messages for that consumer. The client may receive
an abitrary number of messages in between sending the cancel
method and receiving the cancel-ok reply.
RULE:
If the queue no longer exists when the client sends a
cancel command, or the consumer has been cancelled for
other reasons, this command has no effect.
PARAMETERS:
consumer_tag: shortstr
consumer tag
Identifier for the consumer, valid within the current
connection.
RULE:
The consumer tag is valid only within the channel
from which the consumer was created. I.e. a client
MUST NOT create a consumer in one channel and then
use it in another.
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
"""
if self.connection is not None:
self.no_ack_consumers.discard(consumer_tag)
return self.send_method(
spec.Basic.Cancel, argsig, (consumer_tag, nowait),
wait=None if nowait else spec.Basic.CancelOk,
)
def _on_basic_cancel(self, consumer_tag):
"""Consumer cancelled by server.
Most likely the queue was deleted.
"""
callback = self._remove_tag(consumer_tag)
if callback:
callback(consumer_tag)
else:
raise ConsumerCancelled(consumer_tag, spec.Basic.Cancel)
def _on_basic_cancel_ok(self, consumer_tag):
self._remove_tag(consumer_tag)
def _remove_tag(self, consumer_tag):
self.callbacks.pop(consumer_tag, None)
return self.cancel_callbacks.pop(consumer_tag, None)
def basic_consume(self, queue='', consumer_tag='', no_local=False,
no_ack=False, exclusive=False, nowait=False,
callback=None, arguments=None, on_cancel=None,
argsig='BssbbbbF'):
"""Start a queue consumer.
This method asks the server to start a "consumer", which is a
transient request for messages from a specific queue.
Consumers last as long as the channel they were created on, or
until the client cancels them.
RULE:
The server SHOULD support at least 16 consumers per queue,
unless the queue was declared as private, and ideally,
impose no limit except as defined by available resources.
PARAMETERS:
queue: shortstr
Specifies the name of the queue to consume from. If
the queue name is null, refers to the current queue
for the channel, which is the last declared queue.
RULE:
If the client did not previously declare a queue,
and the queue name in this method is empty, the
server MUST raise a connection exception with
reply code 530 (not allowed).
consumer_tag: shortstr
Specifies the identifier for the consumer. The
consumer tag is local to a connection, so two clients
can use the same consumer tags. If this field is empty
the server will generate a unique tag.
RULE:
The tag MUST NOT refer to an existing consumer. If
the client attempts to create two consumers with
the same non-empty tag the server MUST raise a
connection exception with reply code 530 (not
allowed).
no_local: boolean
do not deliver own messages
If the no-local field is set the server will not send
messages to the client that published them.
no_ack: boolean
no acknowledgment needed
If this field is set the server does not expect
acknowledgments for messages. That is, when a message
is delivered to the client the server automatically and
silently acknowledges it on behalf of the client. This
functionality increases performance but at the cost of
reliability. Messages can get lost if a client dies
before it can deliver them to the application.
exclusive: boolean
request exclusive access
Request exclusive consumer access, meaning only this
consumer can access the queue.
RULE:
If the server cannot grant exclusive access to the
queue when asked, - because there are other
consumers active - it MUST raise a channel
exception with return code 403 (access refused).
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
callback: Python callable
function/method called with each delivered message
For each message delivered by the broker, the
callable will be called with a Message object
as the single argument. If no callable is specified,
messages are quietly discarded, no_ack should probably
be set to True in that case.
"""
p = self.send_method(
spec.Basic.Consume, argsig,
(
0, queue, consumer_tag, no_local, no_ack, exclusive,
nowait, arguments
),
wait=None if nowait else spec.Basic.ConsumeOk,
returns_tuple=True
)
if not nowait:
# send_method() returns (consumer_tag,) tuple.
# consumer_tag is returned by broker using following rules:
# * consumer_tag is not specified by client, random one
# is generated by Broker
# * consumer_tag is provided by client, the same one
# is returned by broker
consumer_tag = p[0]
elif nowait and not consumer_tag:
raise ValueError(
'Consumer tag must be specified when nowait is True'
)
self.callbacks[consumer_tag] = callback
if on_cancel:
self.cancel_callbacks[consumer_tag] = on_cancel
if no_ack:
self.no_ack_consumers.add(consumer_tag)
if not nowait:
return consumer_tag
else:
return p
def _on_basic_deliver(self, consumer_tag, delivery_tag, redelivered,
exchange, routing_key, msg):
msg.channel = self
msg.delivery_info = {
'consumer_tag': consumer_tag,
'delivery_tag': delivery_tag,
'redelivered': redelivered,
'exchange': exchange,
'routing_key': routing_key,
}
try:
fun = self.callbacks[consumer_tag]
except KeyError:
AMQP_LOGGER.warning(
REJECTED_MESSAGE_WITHOUT_CALLBACK,
delivery_tag, consumer_tag, exchange, routing_key,
)
self.basic_reject(delivery_tag, requeue=True)
else:
fun(msg)
def basic_get(self, queue='', no_ack=False, argsig='Bsb'):
"""Direct access to a queue.
This method provides a direct access to the messages in a
queue using a synchronous dialogue that is designed for
specific types of application where synchronous functionality
is more important than performance.
PARAMETERS:
queue: shortstr
Specifies the name of the queue to consume from. If
the queue name is null, refers to the current queue
for the channel, which is the last declared queue.
RULE:
If the client did not previously declare a queue,
and the queue name in this method is empty, the
server MUST raise a connection exception with
reply code 530 (not allowed).
no_ack: boolean
no acknowledgment needed
If this field is set the server does not expect
acknowledgments for messages. That is, when a message
is delivered to the client the server automatically and
silently acknowledges it on behalf of the client. This
functionality increases performance but at the cost of
reliability. Messages can get lost if a client dies
before it can deliver them to the application.
Non-blocking, returns a amqp.basic_message.Message object,
or None if queue is empty.
"""
ret = self.send_method(
spec.Basic.Get, argsig, (0, queue, no_ack),
wait=[spec.Basic.GetOk, spec.Basic.GetEmpty], returns_tuple=True,
)
if not ret or len(ret) < 2:
return self._on_get_empty(*ret)
return self._on_get_ok(*ret)
def _on_get_empty(self, cluster_id=None):
pass
def _on_get_ok(self, delivery_tag, redelivered, exchange, routing_key,
message_count, msg):
msg.channel = self
msg.delivery_info = {
'delivery_tag': delivery_tag,
'redelivered': redelivered,
'exchange': exchange,
'routing_key': routing_key,
'message_count': message_count
}
return msg
def _basic_publish(self, msg, exchange='', routing_key='',
mandatory=False, immediate=False, timeout=None,
argsig='Bssbb'):
"""Publish a message.
This method publishes a message to a specific exchange. The
message will be routed to queues as defined by the exchange
configuration and distributed to any active consumers when the
transaction, if any, is committed.
When channel is in confirm mode (when Connection parameter
confirm_publish is set to True), each message is confirmed. When
broker rejects published message (e.g. due internal broker
constrains), MessageNacked exception is raised.
PARAMETERS:
exchange: shortstr
Specifies the name of the exchange to publish to. The
exchange name can be empty, meaning the default
exchange. If the exchange name is specified, and that
exchange does not exist, the server will raise a
channel exception.
RULE:
The server MUST accept a blank exchange name to
mean the default exchange.
RULE:
The exchange MAY refuse basic content in which
case it MUST raise a channel exception with reply
code 540 (not implemented).
routing_key: shortstr
Message routing key
Specifies the routing key for the message. The
routing key is used for routing messages depending on
the exchange configuration.
mandatory: boolean
indicate mandatory routing
This flag tells the server how to react if the message
cannot be routed to a queue. If this flag is True, the
server will return an unroutable message with a Return
method. If this flag is False, the server silently
drops the message.
RULE:
The server SHOULD implement the mandatory flag.
immediate: boolean
request immediate delivery
This flag tells the server how to react if the message
cannot be routed to a queue consumer immediately. If
this flag is set, the server will return an
undeliverable message with a Return method. If this
flag is zero, the server will queue the message, but
with no guarantee that it will ever be consumed.
RULE:
The server SHOULD implement the immediate flag.
"""
if not self.connection:
raise RecoverableConnectionError(
'basic_publish: connection closed')
capabilities = self.connection.\
client_properties.get('capabilities', {})
if capabilities.get('connection.blocked', False):
try:
# Check if an event was sent, such as the out of memory message
self.connection.drain_events(timeout=0)
except socket.timeout:
pass
try:
with self.connection.transport.having_timeout(timeout):
return self.send_method(
spec.Basic.Publish, argsig,
(0, exchange, routing_key, mandatory, immediate), msg
)
except socket.timeout:
raise RecoverableChannelError('basic_publish: timed out')
basic_publish = _basic_publish
def basic_publish_confirm(self, *args, **kwargs):
def confirm_handler(method, *args):
# When RMQ nacks message we are raising MessageNacked exception
if method == spec.Basic.Nack:
raise MessageNacked()
if not self._confirm_selected:
self._confirm_selected = True
self.confirm_select()
ret = self._basic_publish(*args, **kwargs)
# Waiting for confirmation of message.
self.wait([spec.Basic.Ack, spec.Basic.Nack], callback=confirm_handler)
return ret
def basic_qos(self, prefetch_size, prefetch_count, a_global,
argsig='lBb'):
"""Specify quality of service.
This method requests a specific quality of service. The QoS
can be specified for the current channel or for all channels
on the connection. The particular properties and semantics of
a qos method always depend on the content class semantics.
Though the qos method could in principle apply to both peers,
it is currently meaningful only for the server.
PARAMETERS:
prefetch_size: long
prefetch window in octets
The client can request that messages be sent in
advance so that when the client finishes processing a
message, the following message is already held
locally, rather than needing to be sent down the
channel. Prefetching gives a performance improvement.
This field specifies the prefetch window size in
octets. The server will send a message in advance if
it is equal to or smaller in size than the available
prefetch size (and also falls into other prefetch
limits). May be set to zero, meaning "no specific
limit", although other prefetch limits may still
apply. The prefetch-size is ignored if the no-ack
option is set.
RULE:
The server MUST ignore this setting when the
client is not processing any messages - i.e. the
prefetch size does not limit the transfer of
single messages to a client, only the sending in
advance of more messages while the client still
has one or more unacknowledged messages.
prefetch_count: short
prefetch window in messages
Specifies a prefetch window in terms of whole
messages. This field may be used in combination with
the prefetch-size field; a message will only be sent
in advance if both prefetch windows (and those at the
channel and connection level) allow it. The prefetch-
count is ignored if the no-ack option is set.
RULE:
The server MAY send less data in advance than
allowed by the client's specified prefetch windows
but it MUST NOT send more.
a_global: boolean
apply to entire connection
By default the QoS settings apply to the current
channel only. If this field is set, they are applied
to the entire connection.
"""
return self.send_method(
spec.Basic.Qos, argsig, (prefetch_size, prefetch_count, a_global),
wait=spec.Basic.QosOk,
)
def basic_recover(self, requeue=False):
"""Redeliver unacknowledged messages.
This method asks the broker to redeliver all unacknowledged
messages on a specified channel. Zero or more messages may be
redelivered. This method is only allowed on non-transacted
channels.
RULE:
The server MUST set the redelivered flag on all messages
that are resent.
RULE:
The server MUST raise a channel exception if this is
called on a transacted channel.
PARAMETERS:
requeue: boolean
requeue the message
If this field is False, the message will be redelivered
to the original recipient. If this field is True, the
server will attempt to requeue the message,
potentially then delivering it to an alternative
subscriber.
"""
return self.send_method(spec.Basic.Recover, 'b', (requeue,))
def basic_recover_async(self, requeue=False):
return self.send_method(spec.Basic.RecoverAsync, 'b', (requeue,))
def basic_reject(self, delivery_tag, requeue, argsig='Lb'):
"""Reject an incoming message.
This method allows a client to reject a message. It can be
used to interrupt and cancel large incoming messages, or
return untreatable messages to their original queue.
RULE:
The server SHOULD be capable of accepting and process the
Reject method while sending message content with a Deliver
or Get-Ok method. I.e. the server should read and process
incoming methods while sending output frames. To cancel a
partially-send content, the server sends a content body
frame of size 1 (i.e. with no data except the frame-end
octet).
RULE:
The server SHOULD interpret this method as meaning that
the client is unable to process the message at this time.
RULE:
A client MUST NOT use this method as a means of selecting
messages to process. A rejected message MAY be discarded
or dead-lettered, not necessarily passed to another
client.
PARAMETERS:
delivery_tag: longlong
server-assigned delivery tag
The server-assigned and channel-specific delivery tag
RULE:
The delivery tag is valid only within the channel
from which the message was received. I.e. a client
MUST NOT receive a message on one channel and then
acknowledge it on another.
RULE:
The server MUST NOT use a zero value for delivery
tags. Zero is reserved for client use, meaning "all
messages so far received".
requeue: boolean
requeue the message
If this field is False, the message will be discarded.
If this field is True, the server will attempt to
requeue the message.
RULE:
The server MUST NOT deliver the message to the
same client within the context of the current
channel. The recommended strategy is to attempt
to deliver the message to an alternative consumer,
and if that is not possible, to move the message
to a dead-letter queue. The server MAY use more
sophisticated tracking to hold the message on the
queue and redeliver it to the same client at a
later stage.
"""
return self.send_method(
spec.Basic.Reject, argsig, (delivery_tag, requeue),
)
def _on_basic_return(self, reply_code, reply_text,
exchange, routing_key, message):
"""Return a failed message.
This method returns an undeliverable message that was
published with the "immediate" flag set, or an unroutable
message published with the "mandatory" flag set. The reply
code and text provide information about the reason that the
message was undeliverable.
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
exchange: shortstr
Specifies the name of the exchange that the message
was originally published to.
routing_key: shortstr
Message routing key
Specifies the routing key name specified when the
message was published.
"""
exc = error_for_code(
reply_code, reply_text, spec.Basic.Return, ChannelError,
)
handlers = self.events.get('basic_return')
if not handlers:
raise exc
for callback in handlers:
callback(exc, exchange, routing_key, message)
#############
#
# Tx
#
#
# work with standard transactions
#
# Standard transactions provide so-called "1.5 phase commit". We
# can ensure that work is never lost, but there is a chance of
# confirmations being lost, so that messages may be resent.
# Applications that use standard transactions must be able to
# detect and ignore duplicate messages.
#
# GRAMMAR::
#
# tx = C:SELECT S:SELECT-OK
# / C:COMMIT S:COMMIT-OK
# / C:ROLLBACK S:ROLLBACK-OK
#
# RULE:
#
# An client using standard transactions SHOULD be able to
# track all messages received within a reasonable period, and
# thus detect and reject duplicates of the same message. It
# SHOULD NOT pass these to the application layer.
#
#
def tx_commit(self):
"""Commit the current transaction.
This method commits all messages published and acknowledged in
the current transaction. A new transaction starts immediately
after a commit.
"""
return self.send_method(spec.Tx.Commit, wait=spec.Tx.CommitOk)
def tx_rollback(self):
"""Abandon the current transaction.
This method abandons all messages published and acknowledged
in the current transaction. A new transaction starts
immediately after a rollback.
"""
return self.send_method(spec.Tx.Rollback, wait=spec.Tx.RollbackOk)
def tx_select(self):
"""Select standard transaction mode.
This method sets the channel to use standard transactions.
The client must use this method at least once on a channel
before using the Commit or Rollback methods.
"""
return self.send_method(spec.Tx.Select, wait=spec.Tx.SelectOk)
def confirm_select(self, nowait=False):
"""Enable publisher confirms for this channel.
Note: This is an RabbitMQ extension.
Can now be used if the channel is in transactional mode.
:param nowait:
If set, the server will not respond to the method.
The client should not wait for a reply method. If the
server could not complete the method it will raise a channel
or connection exception.
"""
return self.send_method(
spec.Confirm.Select, 'b', (nowait,),
wait=None if nowait else spec.Confirm.SelectOk,
)
def _on_basic_ack(self, delivery_tag, multiple):
for callback in self.events['basic_ack']:
callback(delivery_tag, multiple)
def _on_basic_nack(self, delivery_tag, multiple):
for callback in self.events['basic_nack']:
callback(delivery_tag, multiple)
| {
"repo_name": "kawamon/hue",
"path": "desktop/core/ext-py/amqp-2.4.1/amqp/channel.py",
"copies": "2",
"size": "73292",
"license": "apache-2.0",
"hash": -4194805293553487000,
"line_mean": 34.0847295357,
"line_max": 79,
"alpha_frac": 0.5727500955,
"autogenerated": false,
"ratio": 5.060204363435515,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 2089
} |
"""AMQP Channels
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import logging
import six
if six.PY2:
from Queue import Queue
else:
from queue import Queue
from .proto import Method
from .concurrency import synchronized_connection
from .abstract_channel import AbstractChannel
from .exceptions import ChannelError, ConsumerCancelled, error_for_code
from .spec import basic_return_t, queue_declare_ok_t, method_t
from .serialization import AMQPWriter
from . import spec
__all__ = ['Channel']
log = logging.getLogger('amqpy')
class Channel(AbstractChannel):
"""
The channel class provides methods for a client to establish and operate an AMQP channel. All
public members are fully thread-safe.
"""
### constants
#: Default channel mode
CH_MODE_NONE = 0
#: Transaction mode
CH_MODE_TX = 1
#: Publisher confirm mode (RabbitMQ extension)
CH_MODE_CONFIRM = 2
def __init__(self, connection, channel_id=None, auto_decode=True):
"""Create a channel bound to a connection and using the specified numeric channel_id, and
open on the server
If `auto_decode` is enabled (default), incoming Message bodies will be automatically decoded
to `str` if possible.
:param connection: the channel's associated Connection
:param channel_id: the channel's assigned channel ID
:param auto_decode: enable auto decoding of message bodies
:type connection: amqpy.connection.Connection
:type channel_id: int or None
:type auto_decode: bool
"""
if channel_id:
# noinspection PyProtectedMember
connection._claim_channel_id(channel_id)
else:
# noinspection PyProtectedMember
channel_id = connection._get_free_channel_id()
super(Channel, self).__init__(connection, channel_id)
# auto decode received messages
self.auto_decode = auto_decode
### channel state variables:
#: Current channel open/closed state
#:
#: :type: bool
self.is_open = False
#: Current channel active state (flow control)
#:
#: :type: bool
self.active = True
#: Channel mode state (default, transactional, publisher confirm)
#:
#: :type: int
self.mode = 0
#: Returned messages that the server was unable to deliver
#:
#: :type: queue.Queue
self.returned_messages = Queue()
# consumer callbacks dict[consumer_tag str: callable]
self.callbacks = {}
# consumer cancel callbacks dict dict[consumer_tag str: callable]
self.cancel_callbacks = {}
# set of consumers that have opted for `no_ack` delivery (server will not expect an ack
# for delivered messages)
self.no_ack_consumers = set()
# open the channel
self._open()
def _close(self):
"""Tear down this object, after we've agreed to close with the server
"""
log.debug('Channel close #{}'.format(self.channel_id))
self.is_open = False
channel_id, self.channel_id = self.channel_id, None
connection, self.connection = self.connection, None
if connection:
connection.channels.pop(channel_id, None)
# noinspection PyProtectedMember
connection._avail_channel_ids.append(channel_id)
self.callbacks.clear()
self.cancel_callbacks.clear()
self.no_ack_consumers.clear()
def _open(self):
"""Open the channel
"""
if self.is_open:
return
self._send_open()
def _revive(self):
self.is_open = False
self.mode = self.CH_MODE_NONE
self._send_open()
@synchronized_connection()
def close(self, reply_code=0, reply_text='', method_type=method_t(0, 0)):
"""Request a channel close
This method indicates that the sender wants to close the channel. This may be due to
internal conditions (e.g. a forced shut-down) or due to an error handling a specific method,
i.e. an exception When a close is due to an exception, the sender provides the class and
method id of the method which caused the exception.
:param reply_code: the reply code
:param reply_text: localized reply text
:param method_type: if close is triggered by a failing method, this is the method that
caused it
:type reply_code: int
:type reply_text: str
:type method_type: amqpy.spec.method_t
"""
try:
if not self.is_open or self.connection is None:
return
args = AMQPWriter()
args.write_short(reply_code)
args.write_shortstr(reply_text)
args.write_short(method_type.class_id)
args.write_short(method_type.method_id)
self._send_method(Method(spec.Channel.Close, args))
return self.wait_any([spec.Channel.Close, spec.Channel.CloseOk])
finally:
self.connection = None
def _cb_close(self, method):
"""Respond to a channel close sent by the server
This method indicates that the sender (server) wants to close the channel. This may be due
to internal conditions (e.g. a forced shut-down) or due to an error handling a specific
method, i.e. an exception. When a close is due to an exception, the sender provides the
class and method id of the method which caused the exception.
This method sends a "close-ok" to the server, then re-opens the channel.
"""
args = method.args
reply_code = args.read_short()
reply_text = args.read_shortstr()
class_id = args.read_short()
method_id = args.read_short()
self._send_method(Method(spec.Channel.CloseOk))
self.is_open = False
# re-open the channel
self._revive()
# get information about the method which caused the server to close the channel
method_type = method_t(class_id, method_id)
raise error_for_code(reply_code, reply_text, method_type, ChannelError, self.channel_id)
def _cb_close_ok(self, method):
"""Confirm a channel close
This method confirms a Channel.Close method and tells the recipient that it is safe to
release resources for the channel and close the socket.
"""
assert method
self._close()
@synchronized_connection()
def flow(self, active):
"""Enable/disable flow from peer
This method asks the peer to pause or restart the flow of content data. This is a simple
flow-control mechanism that a peer can use to avoid overflowing its queues or otherwise
finding itself receiving more messages than it can process. Note that this method is not
intended for window control The peer that receives a request to stop sending content
should finish sending the current content, if any, and then wait until it receives a Flow
restart method.
:param active: True: peer starts sending content frames; False: peer stops sending content
frames
:type active: bool
"""
args = AMQPWriter()
args.write_bit(active)
self._send_method(Method(spec.Channel.Flow, args))
return self.wait_any([spec.Channel.FlowOk, self._cb_flow_ok])
def _cb_flow(self, method):
"""Enable/disable flow from peer
This method asks the peer to pause or restart the flow of content data. This is a simple
flow-control mechanism that a peer can use to avoid overflowing its queues or otherwise
finding itself receiving more messages than it can process. Note that this method is not
intended for window control The peer that receives a request to stop sending content
should finish sending the current content, if any, and then wait until it receives a Flow
restart method.
"""
args = method.args
self.active = args.read_bit()
self._send_flow_ok(self.active)
def _send_flow_ok(self, active):
"""Confirm a flow method
Confirms to the peer that a flow command was received and processed.
:param active: True: peer starts sending content frames; False: peer stops sending content
frames
:type active: bool
"""
args = AMQPWriter()
args.write_bit(active)
self._send_method(Method(spec.Channel.FlowOk, args))
def _cb_flow_ok(self, method):
"""Confirm a flow method
Confirms to the peer that a flow command was received and processed.
"""
args = method.args
return args.read_bit()
def _send_open(self):
"""Open a channel
This method opens a channel.
"""
args = AMQPWriter()
args.write_shortstr('') # reserved
self._send_method(Method(spec.Channel.Open, args))
return self.wait(spec.Channel.OpenOk)
def _cb_open_ok(self, method):
"""Handle received "open-ok"
The server sends this method to signal to the client that this channel is ready for use.
"""
assert method
self.is_open = True
log.debug('Channel open')
@synchronized_connection()
def exchange_declare(self, exchange, exch_type, passive=False, durable=False, auto_delete=True,
nowait=False, arguments=None):
"""Declare exchange, create if needed
* Exchanges cannot be redeclared with different types. The client MUST not attempt to
redeclare an existing exchange with a different type than used in the original
Exchange.Declare method.
* This method creates an exchange if it does not already exist, and if the exchange
exists, verifies that it is of the correct and expected class.
* The server must ignore the `durable` field if the exchange already exists.
* The server must ignore the `auto_delete` field if the exchange already exists.
* If `nowait` is enabled and the server could not complete the method, it will raise a
channel or connection exception.
* `arguments` is ignored if passive is True.
:param str exchange: exchange name
:param str exch_type: exchange type (direct, fanout, etc.)
:param bool passive: do not create exchange; client can use this to check whether an
exchange exists
:param bool durable: mark exchange as durable (remain active after server restarts)
:param bool auto_delete: auto-delete exchange when all queues have finished using it
:param bool nowait: if set, the server will not respond to the method and the client should
not wait for a reply
:param dict arguments: exchange declare arguments
:raise AccessRefused: if attempting to declare an exchange with a reserved name (amq.*)
:raise NotFound: if `passive` is enabled and the exchange does not exist
:return: None
"""
arguments = arguments or {}
args = AMQPWriter()
args.write_short(0) # reserved-1
args.write_shortstr(exchange) # exchange name
args.write_shortstr(exch_type) # exchange type
args.write_bit(passive) # passive
args.write_bit(durable) # durable
args.write_bit(auto_delete) # auto-delete
args.write_bit(False) # internal
args.write_bit(nowait)
args.write_table(arguments)
self._send_method(Method(spec.Exchange.Declare, args))
if not nowait:
return self.wait(spec.Exchange.DeclareOk)
def _cb_exchange_declare_ok(self, method):
"""Confirms an exchange declaration
The server sends this method to confirm a Declare method and confirms the name of the
exchange, essential for automatically-named exchanges.
"""
pass
@synchronized_connection()
def exchange_delete(self, exchange, if_unused=False, nowait=False):
"""Delete an exchange
This method deletes an exchange.
* If the exchange does not exist, the server must raise a channel exception. When an
exchange is deleted, all queue bindings on the exchange are cancelled.
* If `if_unused` is set, and the exchange has queue bindings, the server must raise a
channel exception.
:param str exchange: exchange name
:param bool if_unused: delete only if unused (has no queue bindings)
:param bool nowait: if set, the server will not respond to the method and the client should
not wait for a reply
:raise NotFound: if exchange with `exchange` does not exist
:raise PreconditionFailed: if attempting to delete a queue with bindings and `if_unused` is
set
:return: None
"""
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(exchange)
args.write_bit(if_unused)
args.write_bit(nowait)
self._send_method(Method(spec.Exchange.Delete, args))
if not nowait:
return self.wait(spec.Exchange.DeleteOk)
def _cb_exchange_delete_ok(self, method):
"""Confirm deletion of an exchange
The server sends this method to confirm that the deletion of an exchange was successful.
"""
pass
@synchronized_connection()
def exchange_bind(self, dest_exch, source_exch='', routing_key='', nowait=False,
arguments=None):
"""Bind an exchange to an exchange
* Both the `dest_exch` and `source_exch` must already exist. Blank exchange names mean
the default exchange.
* A server MUST allow and ignore duplicate bindings - that is, two or more bind methods
for a specific exchanges, with identical arguments - without treating these as an error.
* A server MUST allow cycles of exchange bindings to be created including allowing an
exchange to be bound to itself.
* A server MUST not deliver the same message more than once to a destination exchange,
even if the topology of exchanges and bindings results in multiple (even infinite)
routes to that exchange.
:param str dest_exch: name of destination exchange to bind
:param str source_exch: name of source exchange to bind
:param str routing_key: routing key for the binding (note: not all exchanges use a
routing key)
:param bool nowait: if set, the server will not respond to the method and the client
should not wait for a reply
:param dict arguments: binding arguments, specific to the exchange class
"""
arguments = {} if arguments is None else arguments
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(dest_exch)
args.write_shortstr(source_exch)
args.write_shortstr(routing_key)
args.write_bit(nowait)
args.write_table(arguments)
self._send_method(Method(spec.Exchange.Bind, args))
if not nowait:
return self.wait(spec.Exchange.BindOk)
@synchronized_connection()
def exchange_unbind(self, dest_exch, source_exch='', routing_key='', nowait=False,
arguments=None):
"""Unbind an exchange from an exchange
* If the unbind fails, the server must raise a connection exception. The server must not
attempt to unbind an exchange that does not exist from an exchange.
* Blank exchange names mean the default exchange.
:param str dest_exch: destination exchange name
:param str source_exch: source exchange name
:param str routing_key: routing key to unbind
:param bool nowait: if set, the server will not respond to the method and the client
should not wait for a reply
:param dict arguments: binding arguments, specific to the exchange class
"""
arguments = {} if arguments is None else arguments
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(dest_exch)
args.write_shortstr(source_exch)
args.write_shortstr(routing_key)
args.write_bit(nowait)
args.write_table(arguments)
self._send_method(Method(spec.Exchange.Unbind, args))
if not nowait:
return self.wait(spec.Exchange.UnbindOk)
def _cb_exchange_bind_ok(self, method):
"""Confirm bind successful
The server sends this method to confirm that the bind was successful.
"""
pass
def _cb_exchange_unbind_ok(self, method):
"""Confirm unbind successful
The server sends this method to confirm that the unbind was successful.
"""
pass
@synchronized_connection()
def queue_bind(self, queue, exchange='', routing_key='', nowait=False, arguments=None):
"""Bind queue to an exchange
This method binds a queue to an exchange. Until a queue is bound it will not receive any
messages. In a classic messaging model, store-and-forward queues are bound to a dest
exchange and subscription queues are bound to a dest_wild exchange.
* The server must allow and ignore duplicate bindings without treating these as an error.
* If a bind fails, the server must raise a connection exception.
* The server must not allow a durable queue to bind to a transient exchange. If a client
attempts this, the server must raise a channel exception.
* The server should support at least 4 bindings per queue, and ideally, impose no limit
except as defined by available resources.
* If the client did not previously declare a queue, and the `queue` is empty, the server
must raise a connection exception with reply code 530 (not allowed).
* If `queue` does not exist, the server must raise a channel exception with reply code
404 (not found).
* If `exchange` does not exist, the server must raise a channel exception with reply code
404 (not found).
:param str queue: name of queue to bind; blank refers to the last declared queue for this
channel
:param str exchange: name of exchange to bind to
:param str routing_key: routing key for the binding
:param bool nowait: if set, the server will not respond to the method and the client
should not wait for a reply
:param dict arguments: binding arguments, specific to the exchange class
"""
arguments = {} if arguments is None else arguments
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(queue)
args.write_shortstr(exchange)
args.write_shortstr(routing_key)
args.write_bit(nowait)
args.write_table(arguments)
self._send_method(Method(spec.Queue.Bind, args))
if not nowait:
return self.wait(spec.Queue.BindOk)
def _cb_queue_bind_ok(self, method):
"""Confirm bind successful
The server sends this method to confirm that the bind was successful.
"""
pass
@synchronized_connection()
def queue_unbind(self, queue, exchange, routing_key='', nowait=False, arguments=None):
"""Unbind a queue from an exchange
This method unbinds a queue from an exchange.
* If a unbind fails, the server MUST raise a connection exception.
* The client must not attempt to unbind a queue that does not exist.
* The client must not attempt to unbind a queue from an exchange that does not exist.
:param str queue: name of queue to unbind, leave blank to refer to the last declared
queue on this channel
:param str exchange: name of exchange to unbind, leave blank to refer to default exchange
:param str routing_key: routing key of binding
:param dict arguments: binding arguments, specific to the exchange class
"""
arguments = {} if arguments is None else arguments
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(queue)
args.write_shortstr(exchange)
args.write_shortstr(routing_key)
# args.write_bit(nowait)
args.write_table(arguments)
self._send_method(Method(spec.Queue.Unbind, args))
if not nowait:
return self.wait(spec.Queue.UnbindOk)
def _cb_queue_unbind_ok(self, method):
"""Confirm unbind successful
This method confirms that the unbind was successful.
"""
pass
@synchronized_connection()
def queue_declare(self, queue='', passive=False, durable=False, exclusive=False,
auto_delete=True, nowait=False,
arguments=None):
"""Declare queue, create if needed
This method creates or checks a queue. When creating a new queue the client can specify
various properties that control the durability of the queue and its contents, and the level
of sharing for the queue. A tuple containing the queue name, message count, and consumer
count is returned, which is essential for declaring automatically named queues.
* If `passive` is specified, the server state is not modified (a queue will not be
declared), and the server only checks if the specified queue exists and returns its
properties. If the queue does not exist, the server must raise a 404 NOT FOUND channel
exception.
* The server must create a default binding for a newly-created queue to the default
exchange, which is an exchange of type 'direct'.
* Queue names starting with 'amq.' are reserved for use by the server. If an attempt is
made to declare a queue with such a name, and the `passive` flag is disabled, the server
must raise a 403 ACCESS REFUSED connection exception.
* The server must raise a 405 RESOURCE LOCKED channel exception if an attempt is made to
access a queue declared as exclusive by another open connection.
* The server must ignore the `auto_delete` flag if the queue already exists.
RabbitMQ supports the following useful additional arguments:
* x-max-length (int): maximum queue size
* Queue length is a measure that takes into account ready messages, ignoring
unacknowledged messages and message size. Messages will be dropped or dead-lettered
from the front of the queue to make room for new messages once the limit is reached.
:param str queue: queue name; leave blank to let the server generate a name automatically
:param bool passive: do not create queue; client can use this to check whether a queue
exists
:param bool durable: mark as durable (remain active after server restarts)
:param bool exclusive: mark as exclusive (can only be consumed from by this connection);
implies `auto_delete`
:param bool auto_delete: auto-delete queue when all consumers have finished using it
:param bool nowait: if set, the server will not respond to the method and the client
should not wait for a reply
:param dict arguments: exchange declare arguments
:raise NotFound: if `passive` is enabled and the queue does not exist
:raise AccessRefused: if an attempt is made to declare a queue with a reserved name
:raise ResourceLocked: if an attempt is made to access an exclusive queue declared by
another open connection
:return: queue_declare_ok_t(queue, message_count, consumer_count), or None if `nowait`
:rtype: queue_declare_ok_t or None
"""
arguments = arguments or {}
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(queue)
args.write_bit(passive)
args.write_bit(durable)
args.write_bit(exclusive)
args.write_bit(auto_delete)
args.write_bit(nowait)
args.write_table(arguments)
self._send_method(Method(spec.Queue.Declare, args))
if not nowait:
return self.wait(spec.Queue.DeclareOk)
def _cb_queue_declare_ok(self, method):
"""Confirm a queue declare
This method is called when the server responds to a `queue.declare`.
:return: queue_declare_ok_t(queue, message_count, consumer_count), or None if `nowait`
:rtype: queue_declare_ok_t or None
"""
args = method.args
return queue_declare_ok_t(args.read_shortstr(), args.read_long(), args.read_long())
@synchronized_connection()
def queue_delete(self, queue='', if_unused=False, if_empty=False, nowait=False):
"""Delete a queue
This method deletes a queue. When a queue is deleted any pending messages are sent to a
dead-letter queue if this is defined in the server configuration, and all consumers on the
queue are cancelled.
:param str queue: name of queue to delete, empty string refers to last declared queue on
this channel
:param bool if_unused: delete only if unused (has no consumers); raise a channel
exception otherwise
:param bool if_empty: delete only if empty; raise a channel exception otherwise
:param bool nowait: if set, the server will not respond to the method and the client
should not wait for a reply
:raise NotFound: if `queue` does not exist
:raise PreconditionFailed: if `if_unused` or `if_empty` conditions are not met
:return: number of messages deleted
:rtype: int
"""
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(queue)
args.write_bit(if_unused)
args.write_bit(if_empty)
args.write_bit(nowait)
self._send_method(Method(spec.Queue.Delete, args))
if not nowait:
return self.wait(spec.Queue.DeleteOk)
def _cb_queue_delete_ok(self, method):
"""Confirm deletion of a queue
This method confirms the deletion of a queue.
PARAMETERS:
message_count: long
number of messages purged
Reports the number of messages purged.
"""
args = method.args
return args.read_long()
@synchronized_connection()
def queue_purge(self, queue='', nowait=False):
"""Purge a queue
This method removes all messages from a queue. It does not cancel consumers. Purged messages
are deleted without any formal "undo" mechanism.
* On transacted channels the server MUST not purge messages that have already been sent
to a client but not yet acknowledged.
* If nowait is False, this method returns a message count.
:param str queue: queue name to purge; leave blank to refer to last declared queue for
this channel
:param bool nowait: if set, the server will not respond to the method and the client
should not wait for a reply
:return: message count (if nowait is False)
:rtype: int or None
"""
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(queue)
args.write_bit(nowait)
self._send_method(Method(spec.Queue.Purge, args))
if not nowait:
return self.wait(spec.Queue.PurgeOk)
def _cb_queue_purge_ok(self, method):
"""Confirms a queue purge
This method confirms the purge of a queue.
PARAMETERS:
message_count: long
number of messages purged
Reports the number of messages purged.
"""
args = method.args
return args.read_long()
@synchronized_connection()
def basic_ack(self, delivery_tag, multiple=False):
"""Acknowledge one or more messages
This method acknowledges one or more messages delivered via the Deliver or Get-Ok methods.
The client can ask to confirm a single message or a set of messages up to and including a
specific message.
* The delivery tag is valid only within the same channel that the message was received.
* Set `delivery_tag` to `0` and `multiple` to `True` to acknowledge all outstanding
messages.
* If the `delivery_tag` is invalid, the server must raise a channel exception.
:param int delivery_tag: server-assigned delivery tag; 0 means "all messages received so
far"
:param bool multiple: if set, the `delivery_tag` is treated as "all messages up to and
including"
"""
args = AMQPWriter()
args.write_longlong(delivery_tag)
args.write_bit(multiple)
self._send_method(Method(spec.Basic.Ack, args))
@synchronized_connection()
def basic_cancel(self, consumer_tag, nowait=False):
"""End a queue consumer
This method cancels a consumer. This does not affect already delivered messages, but it does
mean the server will not send any more messages for that consumer. The client may receive an
arbitrary number of messages in between sending the cancel method and receiving the
cancel-ok reply.
* If the queue no longer exists when the client sends a cancel command, or the consumer
has been cancelled for other reasons, this command has no effect.
:param str consumer_tag: consumer tag, valid only within the current connection and channel
:param bool nowait: if set, the server will not respond to the method and the client
should not wait for a reply
"""
if self.connection is not None:
self.no_ack_consumers.discard(consumer_tag)
args = AMQPWriter()
args.write_shortstr(consumer_tag)
args.write_bit(nowait)
self._send_method(Method(spec.Basic.Cancel, args))
return self.wait(spec.Basic.CancelOk)
def _cb_basic_cancel_notify(self, method):
"""Consumer cancelled by server.
Most likely the queue was deleted.
"""
args = method.args
consumer_tag = args.read_shortstr()
callback = self._on_cancel(consumer_tag)
if callback:
callback(consumer_tag)
else:
raise ConsumerCancelled(consumer_tag, spec.Basic.Cancel)
def _cb_basic_cancel_ok(self, method):
"""Confirm a cancelled consumer
This method confirms that the cancellation was completed.
PARAMETERS: consumer_tag: shortstr
consumer tag
Identifier for the consumer, valid within the current connection.
RULE:
The consumer tag is valid only within the channel from which the consumer was
created. I.e. a client
MUST NOT create a consumer in one channel and then use it in another.
"""
args = method.args
consumer_tag = args.read_shortstr()
self._on_cancel(consumer_tag)
def _on_cancel(self, consumer_tag):
"""
:param consumer_tag:
:return: callback, if any
:rtype: callable or None
"""
self.callbacks.pop(consumer_tag, None)
return self.cancel_callbacks.pop(consumer_tag, None)
@synchronized_connection()
def basic_consume(self, queue='', consumer_tag='', no_local=False, no_ack=False,
exclusive=False, nowait=False, callback=None, arguments=None, on_cancel=None):
"""Start a queue consumer
This method asks the server to start a "consumer", which is a transient request for messages
from a specific queue. Consumers last as long as the channel they were created on, or until
the client cancels them.
* The `consumer_tag` is local to a connection, so two clients can use the same consumer
tags. But on the same connection, the `consumer_tag` must be unique, or the server must
raise a 530 NOT ALLOWED connection exception.
* If `no_ack` is set, the server automatically acknowledges each message on behalf of the
client.
* If `exclusive` is set, the client asks for this consumer to have exclusive access to
the queue. If the server cannot grant exclusive access to the queue because there are
other consumers active, it must raise a 403 ACCESS REFUSED channel exception.
* `callback` must be a `Callable(message)` which is called for each messaged delivered by
the broker. If no callback is specified, messages are quietly discarded; `no_ack` should
probably be set to True in that case.
:param str queue: name of queue; if None, refers to last declared queue for this channel
:param str consumer_tag: consumer tag, local to the connection
:param bool no_local: if True: do not deliver own messages
:param bool no_ack: server will not expect an ack for each message
:param bool exclusive: request exclusive access
:param bool nowait: if set, the server will not respond to the method and the client
should not wait for a reply
:param Callable callback: a callback callable(message) for each delivered message
:param dict arguments: AMQP method arguments
:param Callable on_cancel: a callback callable
:return: consumer tag
:rtype: str
"""
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(queue)
args.write_shortstr(consumer_tag)
args.write_bit(no_local)
args.write_bit(no_ack)
args.write_bit(exclusive)
args.write_bit(nowait)
args.write_table(arguments or {})
self._send_method(Method(spec.Basic.Consume, args))
if not nowait:
consumer_tag = self.wait(spec.Basic.ConsumeOk)
self.callbacks[consumer_tag] = callback
if on_cancel:
self.cancel_callbacks[consumer_tag] = on_cancel
if no_ack:
self.no_ack_consumers.add(consumer_tag)
return consumer_tag
def _cb_basic_consume_ok(self, method):
"""Confirm a new consumer
The server provides the client with a consumer tag, which is used by the client for methods
called on the consumer at a later stage.
PARAMETERS:
consumer_tag: shortstr
Holds the consumer tag specified by the client or provided by the server.
"""
args = method.args
return args.read_shortstr()
def _cb_basic_deliver(self, method):
"""Notify the client of a consumer message
This method delivers a message to the client, via a consumer. In the asynchronous message
delivery model, the client starts a consumer using the Consume method, then the server
responds with Deliver methods as and when messages arrive for that consumer.
This method can be called in a "classmethod" style static-context and is done so by
:meth:`~amqpy.connection.Connection.drain_events()`.
RULE:
The server SHOULD track the number of times a message has been delivered to clients and
when a message is redelivered a certain number of times - e.g. 5 times - without being
acknowledged, the server SHOULD consider the message to be unprocessable (possibly
causing client applications to abort), and move the message to a dead letter queue.
PARAMETERS:
consumer_tag: shortstr
consumer tag
Identifier for the consumer, valid within the current connection.
RULE:
The consumer tag is valid only within the channel from which the consumer was
created. I.e. a client
MUST NOT create a consumer in one channel and then use it in another.
delivery_tag: longlong
server-assigned delivery tag
The server-assigned and channel-specific delivery tag
RULE:
The delivery tag is valid only within the channel from which the message was
received I.e. a
client MUST NOT receive a message on one channel and then acknowledge it on
another.
RULE:
The server MUST NOT use a zero value for delivery tags Zero is reserved for
client use, meaning
"all messages so far received".
redelivered: boolean
message is being redelivered
This indicates that the message has been previously delivered to this or another
client.
exchange: shortstr
Specifies the name of the exchange that the message was originally published to.
routing_key: shortstr
Message routing key
Specifies the routing key name specified when the message was published.
"""
args = method.args
msg = method.content
consumer_tag = args.read_shortstr()
delivery_tag = args.read_longlong()
redelivered = args.read_bit()
exchange = args.read_shortstr()
routing_key = args.read_shortstr()
msg.channel = self
msg.delivery_info = {
'consumer_tag': consumer_tag,
'delivery_tag': delivery_tag,
'redelivered': redelivered,
'exchange': exchange,
'routing_key': routing_key,
}
callback = self.callbacks.get(consumer_tag)
if callback:
callback(msg)
else:
raise Exception('No callback available for consumer tag: {}'.format(consumer_tag))
@synchronized_connection()
def basic_get(self, queue='', no_ack=False):
"""Directly get a message from the `queue`
This method is non-blocking. If no messages are available on the queue, `None` is returned.
:param str queue: queue name; leave blank to refer to last declared queue for the channel
:param bool no_ack: if enabled, the server automatically acknowledges the message
:return: message, or None if no messages are available on the queue
:rtype: amqpy.message.Message or None
"""
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(queue)
args.write_bit(no_ack)
self._send_method(Method(spec.Basic.Get, args))
return self.wait_any([spec.Basic.GetOk, spec.Basic.GetEmpty])
def _cb_basic_get_empty(self, method):
"""Indicate no messages available
This method tells the client that the queue has no messages
available for the client.
"""
args = method.args
args.read_shortstr()
def _cb_basic_get_ok(self, method):
"""Provide client with a message
This method delivers a message to the client following a get method. A message delivered
by 'get-ok' must be acknowledged unless the no-ack option was set in the get method.
PARAMETERS:
delivery_tag: longlong
server-assigned delivery tag
The server-assigned and channel-specific delivery tag
RULE:
The delivery tag is valid only within the channel from which the message was
received I.e. a
client MUST NOT receive a message on one channel and then acknowledge it on
another.
RULE:
The server MUST NOT use a zero value for delivery tags Zero is reserved for
client use, meaning
"all messages so far received".
redelivered: boolean
message is being redelivered
This indicates that the message has been previously delivered to this or another
client.
exchange: shortstr
Specifies the name of the exchange that the message was originally published to.
If empty, the message
was published to the default exchange.
routing_key: shortstr
Message routing key
Specifies the routing key name specified when the message was published.
message_count: long
number of messages pending
This field reports the number of messages pending on the queue, excluding the
message being delivered.
Note that this figure is indicative, not reliable, and can change arbitrarily as
messages are added to
the queue and removed by other clients.
"""
args = method.args
msg = method.content
delivery_tag = args.read_longlong()
redelivered = args.read_bit()
exchange = args.read_shortstr()
routing_key = args.read_shortstr()
message_count = args.read_long()
msg.channel = self
msg.delivery_info = {
'delivery_tag': delivery_tag,
'redelivered': redelivered,
'exchange': exchange,
'routing_key': routing_key,
'message_count': message_count
}
return msg
def _basic_publish(self, msg, exchange='', routing_key='', mandatory=False, immediate=False):
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(exchange)
args.write_shortstr(routing_key)
args.write_bit(mandatory)
args.write_bit(immediate)
self._send_method(Method(spec.Basic.Publish, args, msg))
@synchronized_connection()
def basic_publish(self, msg, exchange='', routing_key='', mandatory=False, immediate=False):
"""Publish a message
This method publishes a message to a specific exchange. The message will be routed to
queues as defined by the exchange configuration and distributed to any active consumers when
the transaction, if any, is committed.
If publisher confirms are enabled, this method will automatically wait to receive an "ack"
from the server.
.. note::
Returned messages are sent back from the server and loaded into
the `returned_messages` queue of the channel that sent them. In
order to receive all returned messages, call `loop(0)` on the
connection object before checking the channel's
`returned_messages` queue.
:param msg: message
:param str exchange: exchange name, empty string means default exchange
:param str routing_key: routing key
:param bool mandatory: True: deliver to at least one queue, or return it; False: drop the
unroutable message
:param bool immediate: request immediate delivery
:type msg: amqpy.Message
"""
self._basic_publish(msg, exchange, routing_key, mandatory, immediate)
if self.mode == self.CH_MODE_CONFIRM:
self.wait(spec.Basic.Ack)
@synchronized_connection()
def basic_qos(self, prefetch_size=0, prefetch_count=0, a_global=False):
"""Specify quality of service
This method requests a specific quality of service. The QoS can be specified for the
current channel or for all channels on the connection. The particular properties and
semantics of a qos method always depend on the content class semantics. Though the qos
method could in principle apply to both peers, it is currently meaningful only for the
server.
* The client can request that messages be sent in advance so that when the client finishes
processing a message, the following message is already held locally, rather than needing
to be sent down the channel. Prefetching gives a performance improvement. This field
specifies the prefetch window size in octets. The server will send a message in advance
if it is equal to or smaller in size than the available prefetch size (and also falls
into other prefetch limits). May be set to zero, meaning "no specific limit", although
other prefetch limits may still apply. The prefetch-size is ignored if the no-ack option
is set.
* The server must ignore `prefetch_size` setting when the client is not processing any
messages - i.e. the prefetch size does not limit the transfer of single messages to a
client, only the sending in advance of more messages while the client still has one or
more unacknowledged messages.
* The `prefetch_count` specifies a prefetch window in terms of whole messages. This field
may be used in combination with the prefetch-size field; a message will only be sent in
advance if both prefetch windows (and those at the channel and connection level) allow
it. The prefetch-count is ignored if the no-ack option is set.
* The server may send less data in advance than allowed by the client's specified
prefetch windows but it must not send more.
:param int prefetch_size: prefetch window in octets
:param int prefetch_count: prefetch window in messages
:param bool a_global: apply to entire connection (default is for current channel only)
"""
args = AMQPWriter()
args.write_long(prefetch_size)
args.write_short(prefetch_count)
args.write_bit(a_global)
self._send_method(Method(spec.Basic.Qos, args))
return self.wait(spec.Basic.QosOk)
def _cb_basic_qos_ok(self, method):
"""Confirm the requested qos
This method tells the client that the requested QoS levels could be handled by the server.
The requested QoS applies to all active consumers until a new QoS is defined.
"""
pass
@synchronized_connection()
def basic_recover(self, requeue=False):
"""Redeliver unacknowledged messages
This method asks the broker to redeliver all unacknowledged messages on a specified
channel. Zero or more messages may be redelivered. This method is only allowed on
non-transacted channels.
* The server MUST set the redelivered flag on all messages that are resent.
* The server MUST raise a channel exception if this is called on a transacted channel.
:param bool requeue: if set, the server will attempt to requeue the message, potentially
then delivering it to a different subscriber
"""
args = AMQPWriter()
args.write_bit(requeue)
self._send_method(Method(spec.Basic.Recover, args))
@synchronized_connection()
def basic_recover_async(self, requeue=False):
"""Redeliver unacknowledged messages (async)
This method asks the broker to redeliver all unacknowledged messages on a specified
channel. Zero or more messages may be redelivered. This method is only allowed on
non-transacted channels.
* The server MUST set the redelivered flag on all messages that are resent.
* The server MUST raise a channel exception if this is called on a transacted channel.
:param bool requeue: if set, the server will attempt to requeue the message, potentially
then delivering it to a different subscriber
"""
args = AMQPWriter()
args.write_bit(requeue)
self._send_method(Method(spec.Basic.RecoverAsync, args))
def _cb_basic_recover_ok(self, method):
"""In 0-9-1 the deprecated recover solicits a response
"""
pass
@synchronized_connection()
def basic_reject(self, delivery_tag, requeue):
"""Reject an incoming message
This method allows a client to reject a message. It can be used to interrupt and cancel
large incoming messages,
or return untreatable messages to their original queue.
* The server SHOULD be capable of accepting and process the Reject method while sending
message content with a Deliver or Get-Ok method I.e. the server should read and process
incoming methods while sending output frames. To cancel a partially-send content, the
server sends a content body frame of size 1 (i.e. with no data except the frame-end
octet).
* The server SHOULD interpret this method as meaning that the client is unable to process
the message at this time.
* A client MUST NOT use this method as a means of selecting messages to process A
rejected message MAY be discarded or dead-lettered, not necessarily passed to another
client.
* The server MUST NOT deliver the message to the same client within the context of the
current channel. The recommended strategy is to attempt to deliver the message to an
alternative consumer, and if that is not possible, to move the message to a dead-letter
queue. The server MAY use more sophisticated tracking to hold the message on the queue and
redeliver it to the same client at a later stage.
:param int delivery_tag: server-assigned channel-specific delivery tag
:param bool requeue: True: requeue the message; False: discard the message
"""
args = AMQPWriter()
args.write_longlong(delivery_tag)
args.write_bit(requeue)
self._send_method(Method(spec.Basic.Reject, args))
def _cb_basic_return(self, method):
"""Return a failed message
This method returns an undeliverable message that was published with the `immediate` flag
set, or an unroutable message published with the `mandatory` flag set. The reply code and
text provide information about the reason that the message was undeliverable.
"""
args = method.args
msg = method.content
self.returned_messages.put(basic_return_t(
args.read_short(),
args.read_shortstr(),
args.read_shortstr(),
args.read_shortstr(),
msg,
))
@synchronized_connection()
def tx_commit(self):
"""Commit the current transaction
This method commits all messages published and acknowledged in the current transaction. A
new transaction starts immediately after a commit.
"""
self._send_method(Method(spec.Tx.Commit))
return self.wait(spec.Tx.CommitOk)
def _cb_tx_commit_ok(self, method):
"""Confirm a successful commit
This method confirms to the client that the commit succeeded. Note that if a commit fails,
the server raises a channel exception.
"""
pass
@synchronized_connection()
def tx_rollback(self):
"""Abandon the current transaction
This method abandons all messages published and acknowledged in the current transaction. A
new transaction starts immediately after a rollback.
"""
self._send_method(Method(spec.Tx.Rollback))
return self.wait(spec.Tx.RollbackOk)
def _cb_tx_rollback_ok(self, method):
"""Confirm a successful rollback
This method confirms to the client that the rollback succeeded. Note that if an rollback
fails, the server raises a channel exception.
"""
pass
@synchronized_connection()
def tx_select(self):
"""Select standard transaction mode
This method sets the channel to use standard transactions. The client must use this method
at least once on a channel before using the Commit or Rollback methods.
The channel must not be in publish acknowledge mode. If it is, the server raises a
:exc:`PreconditionFailed` exception and closes the channel. Note that amqpy will
automatically reopen the channel, at which point this method can be called again
successfully.
:raise PreconditionFailed: if the channel is in publish acknowledge mode
"""
self._send_method(Method(spec.Tx.Select))
#self.wait(spec.Tx.SelectOk)
self.wait(spec.Tx.SelectOk)
self.mode = self.CH_MODE_TX
def _cb_tx_select_ok(self, method):
"""Confirm transaction mode
This method confirms to the client that the channel was successfully set to use standard
transactions.
"""
pass
@synchronized_connection()
def confirm_select(self, nowait=False):
"""Enable publisher confirms for this channel (RabbitMQ extension)
The channel must not be in transactional mode. If it is, the server raises a
:exc:`PreconditionFailed` exception and closes the channel. Note that amqpy will
automatically reopen the channel, at which point this method can be called again
successfully.
:param bool nowait: if set, the server will not respond to the method and the client
should not wait for a reply
:raise PreconditionFailed: if the channel is in transactional mode
"""
args = AMQPWriter()
args.write_bit(nowait)
self._send_method(Method(spec.Confirm.Select, args))
if not nowait:
self.wait(spec.Confirm.SelectOk)
self.mode = self.CH_MODE_CONFIRM
def _cb_confirm_select_ok(self, method):
"""With this method, the broker confirms to the client that the channel is now using
publisher confirms
"""
pass
def _cb_basic_ack_recv(self, method):
"""Callback for receiving a `spec.Basic.Ack`
This will be called when the server acknowledges a published message (RabbitMQ extension).
"""
# args = method.args
# delivery_tag = args.read_longlong()
# multiple = args.read_bit()
METHOD_MAP = {
spec.Channel.OpenOk: _cb_open_ok,
spec.Channel.Flow: _cb_flow,
spec.Channel.FlowOk: _cb_flow_ok,
spec.Channel.Close: _cb_close,
spec.Channel.CloseOk: _cb_close_ok,
spec.Exchange.DeclareOk: _cb_exchange_declare_ok,
spec.Exchange.DeleteOk: _cb_exchange_delete_ok,
spec.Exchange.BindOk: _cb_exchange_bind_ok,
spec.Exchange.UnbindOk: _cb_exchange_unbind_ok,
spec.Queue.DeclareOk: _cb_queue_declare_ok,
spec.Queue.BindOk: _cb_queue_bind_ok,
spec.Queue.PurgeOk: _cb_queue_purge_ok,
spec.Queue.DeleteOk: _cb_queue_delete_ok,
spec.Queue.UnbindOk: _cb_queue_unbind_ok,
spec.Basic.QosOk: _cb_basic_qos_ok,
spec.Basic.ConsumeOk: _cb_basic_consume_ok,
spec.Basic.Cancel: _cb_basic_cancel_notify,
spec.Basic.CancelOk: _cb_basic_cancel_ok,
spec.Basic.Return: _cb_basic_return,
spec.Basic.Deliver: _cb_basic_deliver,
spec.Basic.GetOk: _cb_basic_get_ok,
spec.Basic.GetEmpty: _cb_basic_get_empty,
spec.Basic.Ack: _cb_basic_ack_recv,
spec.Basic.RecoverOk: _cb_basic_recover_ok,
spec.Confirm.SelectOk: _cb_confirm_select_ok,
spec.Tx.SelectOk: _cb_tx_select_ok,
spec.Tx.CommitOk: _cb_tx_commit_ok,
spec.Tx.RollbackOk: _cb_tx_rollback_ok,
}
| {
"repo_name": "veegee/amqpy",
"path": "amqpy/channel.py",
"copies": "1",
"size": "55638",
"license": "mit",
"hash": -8414000878105390000,
"line_mean": 40.2438843588,
"line_max": 100,
"alpha_frac": 0.644793127,
"autogenerated": false,
"ratio": 4.472508038585209,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5617301165585209,
"avg_score": null,
"num_lines": null
} |
"""AMQP Channels
"""
import logging
from queue import Queue
from .proto import Method
from .concurrency import synchronized
from .abstract_channel import AbstractChannel
from .exceptions import ChannelError, ConsumerCancelled, error_for_code
from .spec import basic_return_t, queue_declare_ok_t, method_t
from .serialization import AMQPWriter
from . import spec
__all__ = ['Channel']
log = logging.getLogger('amqpy')
class Channel(AbstractChannel):
"""
The channel class provides methods for a client to establish and operate an AMQP channel. All
public members are fully thread-safe.
"""
### constants
#: Default channel mode
CH_MODE_NONE = 0
#: Transaction mode
CH_MODE_TX = 1
#: Publisher confirm mode (RabbitMQ extension)
CH_MODE_CONFIRM = 2
def __init__(self, connection, channel_id=None, auto_decode=True):
"""Create a channel bound to a connection and using the specified numeric channel_id, and
open on the server
If `auto_decode` is enabled (default), incoming Message bodies will be automatically decoded
to `str` if possible.
:param connection: the channel's associated Connection
:param channel_id: the channel's assigned channel ID
:param auto_decode: enable auto decoding of message bodies
:type connection: amqpy.connection.Connection
:type channel_id: int or None
:type auto_decode: bool
"""
if channel_id:
# noinspection PyProtectedMember
connection._claim_channel_id(channel_id)
else:
# noinspection PyProtectedMember
channel_id = connection._get_free_channel_id()
super().__init__(connection, channel_id)
# auto decode received messages
self.auto_decode = auto_decode
### channel state variables:
#: Current channel open/closed state
#:
#: :type: bool
self.is_open = False
#: Current channel active state (flow control)
#:
#: :type: bool
self.active = True
#: Channel mode state (default, transactional, publisher confirm)
#:
#: :type: int
self.mode = 0
#: Returned messages that the server was unable to deliver
#:
#: :type: queue.Queue
self.returned_messages = Queue()
# consumer callbacks dict[consumer_tag str: callable]
self.callbacks = {}
# consumer cancel callbacks dict dict[consumer_tag str: callable]
self.cancel_callbacks = {}
# set of consumers that have opted for `no_ack` delivery (server will not expect an ack
# for delivered messages)
self.no_ack_consumers = set()
# open the channel
self._open()
def _close(self):
"""Tear down this object, after we've agreed to close with the server
"""
log.debug('Channel close #{}'.format(self.channel_id))
self.is_open = False
channel_id, self.channel_id = self.channel_id, None
connection, self.connection = self.connection, None
if connection:
connection.channels.pop(channel_id, None)
# noinspection PyProtectedMember
connection._avail_channel_ids.append(channel_id)
self.callbacks.clear()
self.cancel_callbacks.clear()
self.no_ack_consumers.clear()
def _open(self):
"""Open the channel
"""
if self.is_open:
return
self._send_open()
def _revive(self):
self.is_open = False
self.mode = self.CH_MODE_NONE
self._send_open()
@synchronized('lock')
def close(self, reply_code=0, reply_text='', method_type=method_t(0, 0)):
"""Request a channel close
This method indicates that the sender wants to close the channel. This may be due to
internal conditions (e.g. a forced shut-down) or due to an error handling a specific method,
i.e. an exception When a close is due to an exception, the sender provides the class and
method id of the method which caused the exception.
:param reply_code: the reply code
:param reply_text: localized reply text
:param method_type: if close is triggered by a failing method, this is the method that
caused it
:type reply_code: int
:type reply_text: str
:type method_type: amqpy.spec.method_t
"""
try:
if not self.is_open or self.connection is None:
return
args = AMQPWriter()
args.write_short(reply_code)
args.write_shortstr(reply_text)
args.write_short(method_type.class_id)
args.write_short(method_type.method_id)
self._send_method(Method(spec.Channel.Close, args))
return self.wait_any([spec.Channel.Close, spec.Channel.CloseOk])
finally:
self.connection = None
def _cb_close(self, method):
"""Respond to a channel close sent by the server
This method indicates that the sender (server) wants to close the channel. This may be due
to internal conditions (e.g. a forced shut-down) or due to an error handling a specific
method, i.e. an exception. When a close is due to an exception, the sender provides the
class and method id of the method which caused the exception.
This method sends a "close-ok" to the server, then re-opens the channel.
"""
args = method.args
reply_code = args.read_short()
reply_text = args.read_shortstr()
class_id = args.read_short()
method_id = args.read_short()
self._send_method(Method(spec.Channel.CloseOk))
self.is_open = False
# re-open the channel
self._revive()
# get information about the method which caused the server to close the channel
method_type = method_t(class_id, method_id)
raise error_for_code(reply_code, reply_text, method_type, ChannelError, self.channel_id)
def _cb_close_ok(self, method):
"""Confirm a channel close
This method confirms a Channel.Close method and tells the recipient that it is safe to
release resources for the channel and close the socket.
"""
assert method
self._close()
@synchronized('lock')
def flow(self, active):
"""Enable/disable flow from peer
This method asks the peer to pause or restart the flow of content data. This is a simple
flow-control mechanism that a peer can use to avoid overflowing its queues or otherwise
finding itself receiving more messages than it can process. Note that this method is not
intended for window control The peer that receives a request to stop sending content
should finish sending the current content, if any, and then wait until it receives a Flow
restart method.
:param active: True: peer starts sending content frames; False: peer stops sending content
frames
:type active: bool
"""
args = AMQPWriter()
args.write_bit(active)
self._send_method(Method(spec.Channel.Flow, args))
return self.wait_any([spec.Channel.FlowOk, self._cb_flow_ok])
def _cb_flow(self, method):
"""Enable/disable flow from peer
This method asks the peer to pause or restart the flow of content data. This is a simple
flow-control mechanism that a peer can use to avoid overflowing its queues or otherwise
finding itself receiving more messages than it can process. Note that this method is not
intended for window control The peer that receives a request to stop sending content
should finish sending the current content, if any, and then wait until it receives a Flow
restart method.
"""
args = method.args
self.active = args.read_bit()
self._send_flow_ok(self.active)
def _send_flow_ok(self, active):
"""Confirm a flow method
Confirms to the peer that a flow command was received and processed.
:param active: True: peer starts sending content frames; False: peer stops sending content
frames
:type active: bool
"""
args = AMQPWriter()
args.write_bit(active)
self._send_method(Method(spec.Channel.FlowOk, args))
def _cb_flow_ok(self, method):
"""Confirm a flow method
Confirms to the peer that a flow command was received and processed.
"""
args = method.args
return args.read_bit()
def _send_open(self):
"""Open a channel
This method opens a channel.
"""
args = AMQPWriter()
args.write_shortstr('') # reserved
self._send_method(Method(spec.Channel.Open, args))
return self.wait(spec.Channel.OpenOk)
def _cb_open_ok(self, method):
"""Handle received "open-ok"
The server sends this method to signal to the client that this channel is ready for use.
"""
assert method
self.is_open = True
log.debug('Channel open')
@synchronized('lock')
def exchange_declare(self, exchange, exch_type, passive=False, durable=False, auto_delete=True,
nowait=False, arguments=None):
"""Declare exchange, create if needed
* Exchanges cannot be redeclared with different types. The client MUST not attempt to
redeclare an existing exchange with a different type than used in the original
Exchange.Declare method.
* This method creates an exchange if it does not already exist, and if the exchange
exists, verifies that it is of the correct and expected class.
* The server must ignore the `durable` field if the exchange already exists.
* The server must ignore the `auto_delete` field if the exchange already exists.
* If `nowait` is enabled and the server could not complete the method, it will raise a
channel or connection exception.
* `arguments` is ignored if passive is True.
:param str exchange: exchange name
:param str exch_type: exchange type (direct, fanout, etc.)
:param bool passive: do not create exchange; client can use this to check whether an
exchange exists
:param bool durable: mark exchange as durable (remain active after server restarts)
:param bool auto_delete: auto-delete exchange when all queues have finished using it
:param bool nowait: if set, the server will not respond to the method and the client should
not wait for a reply
:param dict arguments: exchange declare arguments
:raise AccessRefused: if attempting to declare an exchange with a reserved name (amq.*)
:raise NotFound: if `passive` is enabled and the exchange does not exist
:return: None
"""
arguments = arguments or {}
args = AMQPWriter()
args.write_short(0) # reserved-1
args.write_shortstr(exchange) # exchange name
args.write_shortstr(exch_type) # exchange type
args.write_bit(passive) # passive
args.write_bit(durable) # durable
args.write_bit(auto_delete) # auto-delete
args.write_bit(False) # internal
args.write_bit(nowait)
args.write_table(arguments)
self._send_method(Method(spec.Exchange.Declare, args))
if not nowait:
return self.wait(spec.Exchange.DeclareOk)
def _cb_exchange_declare_ok(self, method):
"""Confirms an exchange declaration
The server sends this method to confirm a Declare method and confirms the name of the
exchange, essential for automatically-named exchanges.
"""
pass
@synchronized('lock')
def exchange_delete(self, exchange, if_unused=False, nowait=False):
"""Delete an exchange
This method deletes an exchange.
* If the exchange does not exist, the server must raise a channel exception. When an
exchange is deleted, all queue bindings on the exchange are cancelled.
* If `if_unused` is set, and the exchange has queue bindings, the server must raise a
channel exception.
:param str exchange: exchange name
:param bool if_unused: delete only if unused (has no queue bindings)
:param bool nowait: if set, the server will not respond to the method and the client should
not wait for a reply
:raise NotFound: if exchange with `exchange` does not exist
:raise PreconditionFailed: if attempting to delete a queue with bindings and `if_unused` is
set
:return: None
"""
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(exchange)
args.write_bit(if_unused)
args.write_bit(nowait)
self._send_method(Method(spec.Exchange.Delete, args))
if not nowait:
return self.wait(spec.Exchange.DeleteOk)
def _cb_exchange_delete_ok(self, method):
"""Confirm deletion of an exchange
The server sends this method to confirm that the deletion of an exchange was successful.
"""
pass
@synchronized('lock')
def exchange_bind(self, dest_exch, source_exch='', routing_key='', nowait=False,
arguments=None):
"""Bind an exchange to an exchange
* Both the `dest_exch` and `source_exch` must already exist. Blank exchange names mean
the default exchange.
* A server MUST allow and ignore duplicate bindings - that is, two or more bind methods
for a specific exchanges, with identical arguments - without treating these as an error.
* A server MUST allow cycles of exchange bindings to be created including allowing an
exchange to be bound to itself.
* A server MUST not deliver the same message more than once to a destination exchange,
even if the topology of exchanges and bindings results in multiple (even infinite)
routes to that exchange.
:param str dest_exch: name of destination exchange to bind
:param str source_exch: name of source exchange to bind
:param str routing_key: routing key for the binding (note: not all exchanges use a
routing key)
:param bool nowait: if set, the server will not respond to the method and the client
should not wait for a reply
:param dict arguments: binding arguments, specific to the exchange class
"""
arguments = {} if arguments is None else arguments
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(dest_exch)
args.write_shortstr(source_exch)
args.write_shortstr(routing_key)
args.write_bit(nowait)
args.write_table(arguments)
self._send_method(Method(spec.Exchange.Bind, args))
if not nowait:
return self.wait(spec.Exchange.BindOk)
@synchronized('lock')
def exchange_unbind(self, dest_exch, source_exch='', routing_key='', nowait=False,
arguments=None):
"""Unbind an exchange from an exchange
* If the unbind fails, the server must raise a connection exception. The server must not
attempt to unbind an exchange that does not exist from an exchange.
* Blank exchange names mean the default exchange.
:param str dest_exch: destination exchange name
:param str source_exch: source exchange name
:param str routing_key: routing key to unbind
:param bool nowait: if set, the server will not respond to the method and the client
should not wait for a reply
:param dict arguments: binding arguments, specific to the exchange class
"""
arguments = {} if arguments is None else arguments
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(dest_exch)
args.write_shortstr(source_exch)
args.write_shortstr(routing_key)
args.write_bit(nowait)
args.write_table(arguments)
self._send_method(Method(spec.Exchange.Unbind, args))
if not nowait:
return self.wait(spec.Exchange.UnbindOk)
def _cb_exchange_bind_ok(self, method):
"""Confirm bind successful
The server sends this method to confirm that the bind was successful.
"""
pass
def _cb_exchange_unbind_ok(self, method):
"""Confirm unbind successful
The server sends this method to confirm that the unbind was successful.
"""
pass
@synchronized('lock')
def queue_bind(self, queue, exchange='', routing_key='', nowait=False, arguments=None):
"""Bind queue to an exchange
This method binds a queue to an exchange. Until a queue is bound it will not receive any
messages. In a classic messaging model, store-and-forward queues are bound to a dest
exchange and subscription queues are bound to a dest_wild exchange.
* The server must allow and ignore duplicate bindings without treating these as an error.
* If a bind fails, the server must raise a connection exception.
* The server must not allow a durable queue to bind to a transient exchange. If a client
attempts this, the server must raise a channel exception.
* The server should support at least 4 bindings per queue, and ideally, impose no limit
except as defined by available resources.
* If the client did not previously declare a queue, and the `queue` is empty, the server
must raise a connection exception with reply code 530 (not allowed).
* If `queue` does not exist, the server must raise a channel exception with reply code
404 (not found).
* If `exchange` does not exist, the server must raise a channel exception with reply code
404 (not found).
:param str queue: name of queue to bind; blank refers to the last declared queue for this
channel
:param str exchange: name of exchange to bind to
:param str routing_key: routing key for the binding
:param bool nowait: if set, the server will not respond to the method and the client
should not wait for a reply
:param dict arguments: binding arguments, specific to the exchange class
"""
arguments = {} if arguments is None else arguments
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(queue)
args.write_shortstr(exchange)
args.write_shortstr(routing_key)
args.write_bit(nowait)
args.write_table(arguments)
self._send_method(Method(spec.Queue.Bind, args))
if not nowait:
return self.wait(spec.Queue.BindOk)
def _cb_queue_bind_ok(self, method):
"""Confirm bind successful
The server sends this method to confirm that the bind was successful.
"""
pass
@synchronized('lock')
def queue_unbind(self, queue, exchange, routing_key='', nowait=False, arguments=None):
"""Unbind a queue from an exchange
This method unbinds a queue from an exchange.
* If a unbind fails, the server MUST raise a connection exception.
* The client must not attempt to unbind a queue that does not exist.
* The client must not attempt to unbind a queue from an exchange that does not exist.
:param str queue: name of queue to unbind, leave blank to refer to the last declared
queue on this channel
:param str exchange: name of exchange to unbind, leave blank to refer to default exchange
:param str routing_key: routing key of binding
:param dict arguments: binding arguments, specific to the exchange class
"""
arguments = {} if arguments is None else arguments
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(queue)
args.write_shortstr(exchange)
args.write_shortstr(routing_key)
# args.write_bit(nowait)
args.write_table(arguments)
self._send_method(Method(spec.Queue.Unbind, args))
if not nowait:
return self.wait(spec.Queue.UnbindOk)
def _cb_queue_unbind_ok(self, method):
"""Confirm unbind successful
This method confirms that the unbind was successful.
"""
pass
@synchronized('lock')
def queue_declare(self, queue='', passive=False, durable=False, exclusive=False,
auto_delete=True, nowait=False,
arguments=None):
"""Declare queue, create if needed
This method creates or checks a queue. When creating a new queue the client can specify
various properties that control the durability of the queue and its contents, and the level
of sharing for the queue. A tuple containing the queue name, message count, and consumer
count is returned, which is essential for declaring automatically named queues.
* If `passive` is specified, the server state is not modified (a queue will not be
declared), and the server only checks if the specified queue exists and returns its
properties. If the queue does not exist, the server must raise a 404 NOT FOUND channel
exception.
* The server must create a default binding for a newly-created queue to the default
exchange, which is an exchange of type 'direct'.
* Queue names starting with 'amq.' are reserved for use by the server. If an attempt is
made to declare a queue with such a name, and the `passive` flag is disabled, the server
must raise a 403 ACCESS REFUSED connection exception.
* The server must raise a 405 RESOURCE LOCKED channel exception if an attempt is made to
access a queue declared as exclusive by another open connection.
* The server must ignore the `auto_delete` flag if the queue already exists.
RabbitMQ supports the following useful additional arguments:
* x-max-length (int): maximum queue size
* Queue length is a measure that takes into account ready messages, ignoring
unacknowledged messages and message size. Messages will be dropped or dead-lettered
from the front of the queue to make room for new messages once the limit is reached.
:param str queue: queue name; leave blank to let the server generate a name automatically
:param bool passive: do not create queue; client can use this to check whether a queue
exists
:param bool durable: mark as durable (remain active after server restarts)
:param bool exclusive: mark as exclusive (can only be consumed from by this connection);
implies `auto_delete`
:param bool auto_delete: auto-delete queue when all consumers have finished using it
:param bool nowait: if set, the server will not respond to the method and the client
should not wait for a reply
:param dict arguments: exchange declare arguments
:raise NotFound: if `passive` is enabled and the queue does not exist
:raise AccessRefused: if an attempt is made to declare a queue with a reserved name
:raise ResourceLocked: if an attempt is made to access an exclusive queue declared by
another open connection
:return: queue_declare_ok_t(queue, message_count, consumer_count), or None if `nowait`
:rtype: queue_declare_ok_t or None
"""
arguments = arguments or {}
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(queue)
args.write_bit(passive)
args.write_bit(durable)
args.write_bit(exclusive)
args.write_bit(auto_delete)
args.write_bit(nowait)
args.write_table(arguments)
self._send_method(Method(spec.Queue.Declare, args))
if not nowait:
return self.wait(spec.Queue.DeclareOk)
def _cb_queue_declare_ok(self, method):
"""Confirm a queue declare
This method is called when the server responds to a `queue.declare`.
:return: queue_declare_ok_t(queue, message_count, consumer_count), or None if `nowait`
:rtype: queue_declare_ok_t or None
"""
args = method.args
return queue_declare_ok_t(args.read_shortstr(), args.read_long(), args.read_long())
@synchronized('lock')
def queue_delete(self, queue='', if_unused=False, if_empty=False, nowait=False):
"""Delete a queue
This method deletes a queue. When a queue is deleted any pending messages are sent to a
dead-letter queue if this is defined in the server configuration, and all consumers on the
queue are cancelled.
:param str queue: name of queue to delete, empty string refers to last declared queue on
this channel
:param bool if_unused: delete only if unused (has no consumers); raise a channel
exception otherwise
:param bool if_empty: delete only if empty; raise a channel exception otherwise
:param bool nowait: if set, the server will not respond to the method and the client
should not wait for a reply
:raise NotFound: if `queue` does not exist
:raise PreconditionFailed: if `if_unused` or `if_empty` conditions are not met
:return: number of messages deleted
:rtype: int
"""
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(queue)
args.write_bit(if_unused)
args.write_bit(if_empty)
args.write_bit(nowait)
self._send_method(Method(spec.Queue.Delete, args))
if not nowait:
return self.wait(spec.Queue.DeleteOk)
def _cb_queue_delete_ok(self, method):
"""Confirm deletion of a queue
This method confirms the deletion of a queue.
PARAMETERS:
message_count: long
number of messages purged
Reports the number of messages purged.
"""
args = method.args
return args.read_long()
@synchronized('lock')
def queue_purge(self, queue='', nowait=False):
"""Purge a queue
This method removes all messages from a queue. It does not cancel consumers. Purged messages
are deleted without any formal "undo" mechanism.
* On transacted channels the server MUST not purge messages that have already been sent
to a client but not yet acknowledged.
* If nowait is False, this method returns a message count.
:param str queue: queue name to purge; leave blank to refer to last declared queue for
this channel
:param bool nowait: if set, the server will not respond to the method and the client
should not wait for a reply
:return: message count (if nowait is False)
:rtype: int or None
"""
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(queue)
args.write_bit(nowait)
self._send_method(Method(spec.Queue.Purge, args))
if not nowait:
return self.wait(spec.Queue.PurgeOk)
def _cb_queue_purge_ok(self, method):
"""Confirms a queue purge
This method confirms the purge of a queue.
PARAMETERS:
message_count: long
number of messages purged
Reports the number of messages purged.
"""
args = method.args
return args.read_long()
@synchronized('lock')
def basic_ack(self, delivery_tag, multiple=False):
"""Acknowledge one or more messages
This method acknowledges one or more messages delivered via the Deliver or Get-Ok methods.
The client can ask to confirm a single message or a set of messages up to and including a
specific message.
* The delivery tag is valid only within the same channel that the message was received.
* Set `delivery_tag` to `0` and `multiple` to `True` to acknowledge all outstanding
messages.
* If the `delivery_tag` is invalid, the server must raise a channel exception.
:param int delivery_tag: server-assigned delivery tag; 0 means "all messages received so
far"
:param bool multiple: if set, the `delivery_tag` is treated as "all messages up to and
including"
"""
args = AMQPWriter()
args.write_longlong(delivery_tag)
args.write_bit(multiple)
self._send_method(Method(spec.Basic.Ack, args))
@synchronized('lock')
def basic_cancel(self, consumer_tag, nowait=False):
"""End a queue consumer
This method cancels a consumer. This does not affect already delivered messages, but it does
mean the server will not send any more messages for that consumer. The client may receive an
arbitrary number of messages in between sending the cancel method and receiving the
cancel-ok reply.
* If the queue no longer exists when the client sends a cancel command, or the consumer
has been cancelled for other reasons, this command has no effect.
:param str consumer_tag: consumer tag, valid only within the current connection and channel
:param bool nowait: if set, the server will not respond to the method and the client
should not wait for a reply
"""
if self.connection is not None:
self.no_ack_consumers.discard(consumer_tag)
args = AMQPWriter()
args.write_shortstr(consumer_tag)
args.write_bit(nowait)
self._send_method(Method(spec.Basic.Cancel, args))
return self.wait(spec.Basic.CancelOk)
def _cb_basic_cancel_notify(self, method):
"""Consumer cancelled by server.
Most likely the queue was deleted.
"""
args = method.args
consumer_tag = args.read_shortstr()
callback = self._on_cancel(consumer_tag)
if callback:
callback(consumer_tag)
else:
raise ConsumerCancelled(consumer_tag, spec.Basic.Cancel)
def _cb_basic_cancel_ok(self, method):
"""Confirm a cancelled consumer
This method confirms that the cancellation was completed.
PARAMETERS: consumer_tag: shortstr
consumer tag
Identifier for the consumer, valid within the current connection.
RULE:
The consumer tag is valid only within the channel from which the consumer was
created. I.e. a client
MUST NOT create a consumer in one channel and then use it in another.
"""
args = method.args
consumer_tag = args.read_shortstr()
self._on_cancel(consumer_tag)
def _on_cancel(self, consumer_tag):
"""
:param consumer_tag:
:return: callback, if any
:rtype: callable or None
"""
self.callbacks.pop(consumer_tag, None)
return self.cancel_callbacks.pop(consumer_tag, None)
@synchronized('lock')
def basic_consume(self, queue='', consumer_tag='', no_local=False, no_ack=False,
exclusive=False, nowait=False, callback=None, arguments=None, on_cancel=None):
"""Start a queue consumer
This method asks the server to start a "consumer", which is a transient request for messages
from a specific queue. Consumers last as long as the channel they were created on, or until
the client cancels them.
* The `consumer_tag` is local to a connection, so two clients can use the same consumer
tags. But on the same connection, the `consumer_tag` must be unique, or the server must
raise a 530 NOT ALLOWED connection exception.
* If `no_ack` is set, the server automatically acknowledges each message on behalf of the
client.
* If `exclusive` is set, the client asks for this consumer to have exclusive access to
the queue. If the server cannot grant exclusive access to the queue because there are
other consumers active, it must raise a 403 ACCESS REFUSED channel exception.
* `callback` must be a `Callable(message)` which is called for each messaged delivered by
the broker. If no callback is specified, messages are quietly discarded; `no_ack` should
probably be set to True in that case.
:param str queue: name of queue; if None, refers to last declared queue for this channel
:param str consumer_tag: consumer tag, local to the connection
:param bool no_local: if True: do not deliver own messages
:param bool no_ack: server will not expect an ack for each message
:param bool exclusive: request exclusive access
:param bool nowait: if set, the server will not respond to the method and the client
should not wait for a reply
:param Callable callback: a callback callable(message) for each delivered message
:param dict arguments: AMQP method arguments
:param Callable on_cancel: a callback callable
:return: consumer tag
:rtype: str
"""
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(queue)
args.write_shortstr(consumer_tag)
args.write_bit(no_local)
args.write_bit(no_ack)
args.write_bit(exclusive)
args.write_bit(nowait)
args.write_table(arguments or {})
self._send_method(Method(spec.Basic.Consume, args))
if not nowait:
consumer_tag = self.wait(spec.Basic.ConsumeOk)
self.callbacks[consumer_tag] = callback
if on_cancel:
self.cancel_callbacks[consumer_tag] = on_cancel
if no_ack:
self.no_ack_consumers.add(consumer_tag)
return consumer_tag
def _cb_basic_consume_ok(self, method):
"""Confirm a new consumer
The server provides the client with a consumer tag, which is used by the client for methods
called on the consumer at a later stage.
PARAMETERS:
consumer_tag: shortstr
Holds the consumer tag specified by the client or provided by the server.
"""
args = method.args
return args.read_shortstr()
def _cb_basic_deliver(self, method):
"""Notify the client of a consumer message
This method delivers a message to the client, via a consumer. In the asynchronous message
delivery model, the client starts a consumer using the Consume method, then the server
responds with Deliver methods as and when messages arrive for that consumer.
This method can be called in a "classmethod" style static-context and is done so by
:meth:`~amqpy.connection.Connection.drain_events()`.
RULE:
The server SHOULD track the number of times a message has been delivered to clients and
when a message is redelivered a certain number of times - e.g. 5 times - without being
acknowledged, the server SHOULD consider the message to be unprocessable (possibly
causing client applications to abort), and move the message to a dead letter queue.
PARAMETERS:
consumer_tag: shortstr
consumer tag
Identifier for the consumer, valid within the current connection.
RULE:
The consumer tag is valid only within the channel from which the consumer was
created. I.e. a client
MUST NOT create a consumer in one channel and then use it in another.
delivery_tag: longlong
server-assigned delivery tag
The server-assigned and channel-specific delivery tag
RULE:
The delivery tag is valid only within the channel from which the message was
received I.e. a
client MUST NOT receive a message on one channel and then acknowledge it on
another.
RULE:
The server MUST NOT use a zero value for delivery tags Zero is reserved for
client use, meaning
"all messages so far received".
redelivered: boolean
message is being redelivered
This indicates that the message has been previously delivered to this or another
client.
exchange: shortstr
Specifies the name of the exchange that the message was originally published to.
routing_key: shortstr
Message routing key
Specifies the routing key name specified when the message was published.
"""
args = method.args
msg = method.content
consumer_tag = args.read_shortstr()
delivery_tag = args.read_longlong()
redelivered = args.read_bit()
exchange = args.read_shortstr()
routing_key = args.read_shortstr()
msg.channel = self
msg.delivery_info = {
'consumer_tag': consumer_tag,
'delivery_tag': delivery_tag,
'redelivered': redelivered,
'exchange': exchange,
'routing_key': routing_key,
}
callback = self.callbacks.get(consumer_tag)
if callback:
callback(msg)
else:
raise Exception('No callback available for consumer tag: {}'.format(consumer_tag))
@synchronized('lock')
def basic_get(self, queue='', no_ack=False):
"""Directly get a message from the `queue`
This method is non-blocking. If no messages are available on the queue, `None` is returned.
:param str queue: queue name; leave blank to refer to last declared queue for the channel
:param bool no_ack: if enabled, the server automatically acknowledges the message
:return: message, or None if no messages are available on the queue
:rtype: amqpy.message.Message or None
"""
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(queue)
args.write_bit(no_ack)
self._send_method(Method(spec.Basic.Get, args))
return self.wait_any([spec.Basic.GetOk, spec.Basic.GetEmpty])
def _cb_basic_get_empty(self, method):
"""Indicate no messages available
This method tells the client that the queue has no messages
available for the client.
"""
args = method.args
args.read_shortstr()
def _cb_basic_get_ok(self, method):
"""Provide client with a message
This method delivers a message to the client following a get method. A message delivered
by 'get-ok' must be acknowledged unless the no-ack option was set in the get method.
PARAMETERS:
delivery_tag: longlong
server-assigned delivery tag
The server-assigned and channel-specific delivery tag
RULE:
The delivery tag is valid only within the channel from which the message was
received I.e. a
client MUST NOT receive a message on one channel and then acknowledge it on
another.
RULE:
The server MUST NOT use a zero value for delivery tags Zero is reserved for
client use, meaning
"all messages so far received".
redelivered: boolean
message is being redelivered
This indicates that the message has been previously delivered to this or another
client.
exchange: shortstr
Specifies the name of the exchange that the message was originally published to.
If empty, the message
was published to the default exchange.
routing_key: shortstr
Message routing key
Specifies the routing key name specified when the message was published.
message_count: long
number of messages pending
This field reports the number of messages pending on the queue, excluding the
message being delivered.
Note that this figure is indicative, not reliable, and can change arbitrarily as
messages are added to
the queue and removed by other clients.
"""
args = method.args
msg = method.content
delivery_tag = args.read_longlong()
redelivered = args.read_bit()
exchange = args.read_shortstr()
routing_key = args.read_shortstr()
message_count = args.read_long()
msg.channel = self
msg.delivery_info = {
'delivery_tag': delivery_tag,
'redelivered': redelivered,
'exchange': exchange,
'routing_key': routing_key,
'message_count': message_count
}
return msg
def _basic_publish(self, msg, exchange='', routing_key='', mandatory=False, immediate=False):
args = AMQPWriter()
args.write_short(0)
args.write_shortstr(exchange)
args.write_shortstr(routing_key)
args.write_bit(mandatory)
args.write_bit(immediate)
self._send_method(Method(spec.Basic.Publish, args, msg))
@synchronized('lock')
def basic_publish(self, msg, exchange='', routing_key='', mandatory=False, immediate=False):
"""Publish a message
This method publishes a message to a specific exchange. The message will be routed to
queues as defined by the exchange configuration and distributed to any active consumers when
the transaction, if any, is committed.
If publisher confirms are enabled, this method will automatically wait to receive an "ack"
from the server.
.. note::
Returned messages are sent back from the server and loaded into
the `returned_messages` queue of the channel that sent them. In
order to receive all returned messages, call `loop(0)` on the
connection object before checking the channel's
`returned_messages` queue.
:param msg: message
:param str exchange: exchange name, empty string means default exchange
:param str routing_key: routing key
:param bool mandatory: True: deliver to at least one queue, or return it; False: drop the
unroutable message
:param bool immediate: request immediate delivery
:type msg: amqpy.Message
"""
self._basic_publish(msg, exchange, routing_key, mandatory, immediate)
if self.mode == self.CH_MODE_CONFIRM:
self.wait(spec.Basic.Ack)
@synchronized('lock')
def basic_qos(self, prefetch_size=0, prefetch_count=0, a_global=False):
"""Specify quality of service
This method requests a specific quality of service. The QoS can be specified for the
current channel or for all channels on the connection. The particular properties and
semantics of a qos method always depend on the content class semantics. Though the qos
method could in principle apply to both peers, it is currently meaningful only for the
server.
* The client can request that messages be sent in advance so that when the client finishes
processing a message, the following message is already held locally, rather than needing
to be sent down the channel. Prefetching gives a performance improvement. This field
specifies the prefetch window size in octets. The server will send a message in advance
if it is equal to or smaller in size than the available prefetch size (and also falls
into other prefetch limits). May be set to zero, meaning "no specific limit", although
other prefetch limits may still apply. The prefetch-size is ignored if the no-ack option
is set.
* The server must ignore `prefetch_size` setting when the client is not processing any
messages - i.e. the prefetch size does not limit the transfer of single messages to a
client, only the sending in advance of more messages while the client still has one or
more unacknowledged messages.
* The `prefetch_count` specifies a prefetch window in terms of whole messages. This field
may be used in combination with the prefetch-size field; a message will only be sent in
advance if both prefetch windows (and those at the channel and connection level) allow
it. The prefetch-count is ignored if the no-ack option is set.
* The server may send less data in advance than allowed by the client's specified
prefetch windows but it must not send more.
:param int prefetch_size: prefetch window in octets
:param int prefetch_count: prefetch window in messages
:param bool a_global: apply to entire connection (default is for current channel only)
"""
args = AMQPWriter()
args.write_long(prefetch_size)
args.write_short(prefetch_count)
args.write_bit(a_global)
self._send_method(Method(spec.Basic.Qos, args))
return self.wait(spec.Basic.QosOk)
def _cb_basic_qos_ok(self, method):
"""Confirm the requested qos
This method tells the client that the requested QoS levels could be handled by the server.
The requested QoS applies to all active consumers until a new QoS is defined.
"""
pass
@synchronized('lock')
def basic_recover(self, requeue=False):
"""Redeliver unacknowledged messages
This method asks the broker to redeliver all unacknowledged messages on a specified
channel. Zero or more messages may be redelivered. This method is only allowed on
non-transacted channels.
* The server MUST set the redelivered flag on all messages that are resent.
* The server MUST raise a channel exception if this is called on a transacted channel.
:param bool requeue: if set, the server will attempt to requeue the message, potentially
then delivering it to a different subscriber
"""
args = AMQPWriter()
args.write_bit(requeue)
self._send_method(Method(spec.Basic.Recover, args))
@synchronized('lock')
def basic_recover_async(self, requeue=False):
"""Redeliver unacknowledged messages (async)
This method asks the broker to redeliver all unacknowledged messages on a specified
channel. Zero or more messages may be redelivered. This method is only allowed on
non-transacted channels.
* The server MUST set the redelivered flag on all messages that are resent.
* The server MUST raise a channel exception if this is called on a transacted channel.
:param bool requeue: if set, the server will attempt to requeue the message, potentially
then delivering it to a different subscriber
"""
args = AMQPWriter()
args.write_bit(requeue)
self._send_method(Method(spec.Basic.RecoverAsync, args))
def _cb_basic_recover_ok(self, method):
"""In 0-9-1 the deprecated recover solicits a response
"""
pass
@synchronized('lock')
def basic_reject(self, delivery_tag, requeue):
"""Reject an incoming message
This method allows a client to reject a message. It can be used to interrupt and cancel
large incoming messages,
or return untreatable messages to their original queue.
* The server SHOULD be capable of accepting and process the Reject method while sending
message content with a Deliver or Get-Ok method I.e. the server should read and process
incoming methods while sending output frames. To cancel a partially-send content, the
server sends a content body frame of size 1 (i.e. with no data except the frame-end
octet).
* The server SHOULD interpret this method as meaning that the client is unable to process
the message at this time.
* A client MUST NOT use this method as a means of selecting messages to process A
rejected message MAY be discarded or dead-lettered, not necessarily passed to another
client.
* The server MUST NOT deliver the message to the same client within the context of the
current channel. The recommended strategy is to attempt to deliver the message to an
alternative consumer, and if that is not possible, to move the message to a dead-letter
queue. The server MAY use more sophisticated tracking to hold the message on the queue and
redeliver it to the same client at a later stage.
:param int delivery_tag: server-assigned channel-specific delivery tag
:param bool requeue: True: requeue the message; False: discard the message
"""
args = AMQPWriter()
args.write_longlong(delivery_tag)
args.write_bit(requeue)
self._send_method(Method(spec.Basic.Reject, args))
def _cb_basic_return(self, method):
"""Return a failed message
This method returns an undeliverable message that was published with the `immediate` flag
set, or an unroutable message published with the `mandatory` flag set. The reply code and
text provide information about the reason that the message was undeliverable.
"""
args = method.args
msg = method.content
self.returned_messages.put(basic_return_t(
args.read_short(),
args.read_shortstr(),
args.read_shortstr(),
args.read_shortstr(),
msg,
))
@synchronized('lock')
def tx_commit(self):
"""Commit the current transaction
This method commits all messages published and acknowledged in the current transaction. A
new transaction starts immediately after a commit.
"""
self._send_method(Method(spec.Tx.Commit))
return self.wait(spec.Tx.CommitOk)
def _cb_tx_commit_ok(self, method):
"""Confirm a successful commit
This method confirms to the client that the commit succeeded. Note that if a commit fails,
the server raises a channel exception.
"""
pass
@synchronized('lock')
def tx_rollback(self):
"""Abandon the current transaction
This method abandons all messages published and acknowledged in the current transaction. A
new transaction starts immediately after a rollback.
"""
self._send_method(Method(spec.Tx.Rollback))
return self.wait(spec.Tx.RollbackOk)
def _cb_tx_rollback_ok(self, method):
"""Confirm a successful rollback
This method confirms to the client that the rollback succeeded. Note that if an rollback
fails, the server raises a channel exception.
"""
pass
@synchronized('lock')
def tx_select(self):
"""Select standard transaction mode
This method sets the channel to use standard transactions. The client must use this method
at least once on a channel before using the Commit or Rollback methods.
The channel must not be in publish acknowledge mode. If it is, the server raises a
:exc:`PreconditionFailed` exception and closes the channel. Note that amqpy will
automatically reopen the channel, at which point this method can be called again
successfully.
:raise PreconditionFailed: if the channel is in publish acknowledge mode
"""
self._send_method(Method(spec.Tx.Select))
#self.wait(spec.Tx.SelectOk)
self.wait(spec.Tx.SelectOk)
self.mode = self.CH_MODE_TX
def _cb_tx_select_ok(self, method):
"""Confirm transaction mode
This method confirms to the client that the channel was successfully set to use standard
transactions.
"""
pass
@synchronized('lock')
def confirm_select(self, nowait=False):
"""Enable publisher confirms for this channel (RabbitMQ extension)
The channel must not be in transactional mode. If it is, the server raises a
:exc:`PreconditionFailed` exception and closes the channel. Note that amqpy will
automatically reopen the channel, at which point this method can be called again
successfully.
:param bool nowait: if set, the server will not respond to the method and the client
should not wait for a reply
:raise PreconditionFailed: if the channel is in transactional mode
"""
args = AMQPWriter()
args.write_bit(nowait)
self._send_method(Method(spec.Confirm.Select, args))
if not nowait:
self.wait(spec.Confirm.SelectOk)
self.mode = self.CH_MODE_CONFIRM
def _cb_confirm_select_ok(self, method):
"""With this method, the broker confirms to the client that the channel is now using
publisher confirms
"""
pass
def _cb_basic_ack_recv(self, method):
"""Callback for receiving a `spec.Basic.Ack`
This will be called when the server acknowledges a published message (RabbitMQ extension).
"""
# args = method.args
# delivery_tag = args.read_longlong()
# multiple = args.read_bit()
METHOD_MAP = {
spec.Channel.OpenOk: _cb_open_ok,
spec.Channel.Flow: _cb_flow,
spec.Channel.FlowOk: _cb_flow_ok,
spec.Channel.Close: _cb_close,
spec.Channel.CloseOk: _cb_close_ok,
spec.Exchange.DeclareOk: _cb_exchange_declare_ok,
spec.Exchange.DeleteOk: _cb_exchange_delete_ok,
spec.Exchange.BindOk: _cb_exchange_bind_ok,
spec.Exchange.UnbindOk: _cb_exchange_unbind_ok,
spec.Queue.DeclareOk: _cb_queue_declare_ok,
spec.Queue.BindOk: _cb_queue_bind_ok,
spec.Queue.PurgeOk: _cb_queue_purge_ok,
spec.Queue.DeleteOk: _cb_queue_delete_ok,
spec.Queue.UnbindOk: _cb_queue_unbind_ok,
spec.Basic.QosOk: _cb_basic_qos_ok,
spec.Basic.ConsumeOk: _cb_basic_consume_ok,
spec.Basic.Cancel: _cb_basic_cancel_notify,
spec.Basic.CancelOk: _cb_basic_cancel_ok,
spec.Basic.Return: _cb_basic_return,
spec.Basic.Deliver: _cb_basic_deliver,
spec.Basic.GetOk: _cb_basic_get_ok,
spec.Basic.GetEmpty: _cb_basic_get_empty,
spec.Basic.Ack: _cb_basic_ack_recv,
spec.Basic.RecoverOk: _cb_basic_recover_ok,
spec.Confirm.SelectOk: _cb_confirm_select_ok,
spec.Tx.SelectOk: _cb_tx_select_ok,
spec.Tx.CommitOk: _cb_tx_commit_ok,
spec.Tx.RollbackOk: _cb_tx_rollback_ok,
}
| {
"repo_name": "gst/amqpy",
"path": "amqpy/channel.py",
"copies": "1",
"size": "55345",
"license": "mit",
"hash": -499626030043929900,
"line_mean": 40.2714392245,
"line_max": 100,
"alpha_frac": 0.6433282139,
"autogenerated": false,
"ratio": 4.465827483256677,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002072490954520673,
"num_lines": 1341
} |
"""AMQP Connections."""
# Copyright (C) 2007-2008 Barry Pederson <bp@barryp.org>
from __future__ import absolute_import, unicode_literals
import logging
import socket
import uuid
import warnings
from vine import ensure_promise
from . import __version__, sasl, spec
from .abstract_channel import AbstractChannel
from .channel import Channel
from .exceptions import (AMQPDeprecationWarning, ChannelError, ConnectionError,
ConnectionForced, RecoverableChannelError,
RecoverableConnectionError, ResourceError,
error_for_code)
from .five import array, items, monotonic, range, string, values
from .method_framing import frame_handler, frame_writer
from .transport import Transport
try:
from ssl import SSLError
except ImportError: # pragma: no cover
class SSLError(Exception): # noqa
pass
W_FORCE_CONNECT = """\
The .{attr} attribute on the connection was accessed before
the connection was established. This is supported for now, but will
be deprecated in amqp 2.2.0.
Since amqp 2.0 you have to explicitly call Connection.connect()
before using the connection.
"""
START_DEBUG_FMT = """
Start from server, version: %d.%d, properties: %s, mechanisms: %s, locales: %s
""".strip()
__all__ = ['Connection']
AMQP_LOGGER = logging.getLogger('amqp')
#: Default map for :attr:`Connection.library_properties`
LIBRARY_PROPERTIES = {
'product': 'py-amqp',
'product_version': __version__,
}
#: Default map for :attr:`Connection.negotiate_capabilities`
NEGOTIATE_CAPABILITIES = {
'consumer_cancel_notify': True,
'connection.blocked': True,
'authentication_failure_close': True,
}
class Connection(AbstractChannel):
"""AMQP Connection.
The connection class provides methods for a client to establish a
network connection to a server, and for both peers to operate the
connection thereafter.
GRAMMAR::
connection = open-connection *use-connection close-connection
open-connection = C:protocol-header
S:START C:START-OK
*challenge
S:TUNE C:TUNE-OK
C:OPEN S:OPEN-OK
challenge = S:SECURE C:SECURE-OK
use-connection = *channel
close-connection = C:CLOSE S:CLOSE-OK
/ S:CLOSE C:CLOSE-OK
Create a connection to the specified host, which should be
a 'host[:port]', such as 'localhost', or '1.2.3.4:5672'
(defaults to 'localhost', if a port is not specified then
5672 is used)
Authentication can be controlled by passing one or more
`amqp.sasl.SASL` instances as the `authentication` parameter, or
setting the `login_method` string to one of the supported methods:
'GSSAPI', 'EXTERNAL', 'AMQPLAIN', or 'PLAIN'.
Otherwise authentication will be performed using any supported method
preferred by the server. Userid and passwords apply to AMQPLAIN and
PLAIN authentication, whereas on GSSAPI only userid will be used as the
client name. For EXTERNAL authentication both userid and password are
ignored.
The 'ssl' parameter may be simply True/False, or for Python >= 2.6
a dictionary of options to pass to ssl.wrap_socket() such as
requiring certain certificates.
The "socket_settings" parameter is a dictionary defining tcp
settings which will be applied as socket options.
When "confirm_publish" is set to True, the channel is put to
confirm mode. In this mode, each published message is
confirmed using Publisher confirms RabbitMQ extention.
"""
Channel = Channel
#: Mapping of protocol extensions to enable.
#: The server will report these in server_properties[capabilities],
#: and if a key in this map is present the client will tell the
#: server to either enable or disable the capability depending
#: on the value set in this map.
#: For example with:
#: negotiate_capabilities = {
#: 'consumer_cancel_notify': True,
#: }
#: The client will enable this capability if the server reports
#: support for it, but if the value is False the client will
#: disable the capability.
negotiate_capabilities = NEGOTIATE_CAPABILITIES
#: These are sent to the server to announce what features
#: we support, type of client etc.
library_properties = LIBRARY_PROPERTIES
#: Final heartbeat interval value (in float seconds) after negotiation
heartbeat = None
#: Original heartbeat interval value proposed by client.
client_heartbeat = None
#: Original heartbeat interval proposed by server.
server_heartbeat = None
#: Time of last heartbeat sent (in monotonic time, if available).
last_heartbeat_sent = 0
#: Time of last heartbeat received (in monotonic time, if available).
last_heartbeat_received = 0
#: Number of successful writes to socket.
bytes_sent = 0
#: Number of successful reads from socket.
bytes_recv = 0
#: Number of bytes sent to socket at the last heartbeat check.
prev_sent = None
#: Number of bytes received from socket at the last heartbeat check.
prev_recv = None
_METHODS = {
spec.method(spec.Connection.Start, 'ooFSS'),
spec.method(spec.Connection.OpenOk),
spec.method(spec.Connection.Secure, 's'),
spec.method(spec.Connection.Tune, 'BlB'),
spec.method(spec.Connection.Close, 'BsBB'),
spec.method(spec.Connection.Blocked),
spec.method(spec.Connection.Unblocked),
spec.method(spec.Connection.CloseOk),
}
_METHODS = {m.method_sig: m for m in _METHODS}
connection_errors = (
ConnectionError,
socket.error,
IOError,
OSError,
)
channel_errors = (ChannelError,)
recoverable_connection_errors = (
RecoverableConnectionError,
socket.error,
IOError,
OSError,
)
recoverable_channel_errors = (
RecoverableChannelError,
)
def __init__(self, host='localhost:5672', userid='guest', password='guest',
login_method=None, login_response=None,
authentication=(),
virtual_host='/', locale='en_US', client_properties=None,
ssl=False, connect_timeout=None, channel_max=None,
frame_max=None, heartbeat=0, on_open=None, on_blocked=None,
on_unblocked=None, confirm_publish=False,
on_tune_ok=None, read_timeout=None, write_timeout=None,
socket_settings=None, frame_handler=frame_handler,
frame_writer=frame_writer, **kwargs):
self._connection_id = uuid.uuid4().hex
channel_max = channel_max or 65535
frame_max = frame_max or 131072
if authentication:
if isinstance(authentication, sasl.SASL):
authentication = (authentication,)
self.authentication = authentication
elif login_method is not None:
if login_method == 'GSSAPI':
auth = sasl.GSSAPI(userid)
elif login_method == 'EXTERNAL':
auth = sasl.EXTERNAL()
elif login_method == 'AMQPLAIN':
if userid is None or password is None:
raise ValueError(
"Must supply authentication or userid/password")
auth = sasl.AMQPLAIN(userid, password)
elif login_method == 'PLAIN':
if userid is None or password is None:
raise ValueError(
"Must supply authentication or userid/password")
auth = sasl.PLAIN(userid, password)
elif login_response is not None:
auth = sasl.RAW(login_method, login_response)
else:
raise ValueError("Invalid login method", login_method)
self.authentication = (auth,)
else:
self.authentication = (sasl.GSSAPI(userid, fail_soft=True),
sasl.EXTERNAL(),
sasl.AMQPLAIN(userid, password),
sasl.PLAIN(userid, password))
self.client_properties = dict(
self.library_properties, **client_properties or {}
)
self.locale = locale
self.host = host
self.virtual_host = virtual_host
self.on_tune_ok = ensure_promise(on_tune_ok)
self.frame_handler_cls = frame_handler
self.frame_writer_cls = frame_writer
self._handshake_complete = False
self.channels = {}
# The connection object itself is treated as channel 0
super(Connection, self).__init__(self, 0)
self._frame_writer = None
self._on_inbound_frame = None
self._transport = None
# Properties set in the Tune method
self.channel_max = channel_max
self.frame_max = frame_max
self.client_heartbeat = heartbeat
self.confirm_publish = confirm_publish
self.ssl = ssl
self.read_timeout = read_timeout
self.write_timeout = write_timeout
self.socket_settings = socket_settings
# Callbacks
self.on_blocked = on_blocked
self.on_unblocked = on_unblocked
self.on_open = ensure_promise(on_open)
self._avail_channel_ids = array('H', range(self.channel_max, 0, -1))
# Properties set in the Start method
self.version_major = 0
self.version_minor = 0
self.server_properties = {}
self.mechanisms = []
self.locales = []
self.connect_timeout = connect_timeout
def __enter__(self):
self.connect()
return self
def __exit__(self, *eargs):
self.close()
def then(self, on_success, on_error=None):
return self.on_open.then(on_success, on_error)
def _setup_listeners(self):
self._callbacks.update({
spec.Connection.Start: self._on_start,
spec.Connection.OpenOk: self._on_open_ok,
spec.Connection.Secure: self._on_secure,
spec.Connection.Tune: self._on_tune,
spec.Connection.Close: self._on_close,
spec.Connection.Blocked: self._on_blocked,
spec.Connection.Unblocked: self._on_unblocked,
spec.Connection.CloseOk: self._on_close_ok,
})
def connect(self, callback=None):
# Let the transport.py module setup the actual
# socket connection to the broker.
#
if self.connected:
return callback() if callback else None
try:
self.transport = self.Transport(
self.host, self.connect_timeout, self.ssl,
self.read_timeout, self.write_timeout,
socket_settings=self.socket_settings,
)
self.transport.connect()
self.on_inbound_frame = self.frame_handler_cls(
self, self.on_inbound_method)
self.frame_writer = self.frame_writer_cls(self, self.transport)
while not self._handshake_complete:
self.drain_events(timeout=self.connect_timeout)
except (OSError, IOError, SSLError):
self.collect()
raise
def _warn_force_connect(self, attr):
warnings.warn(AMQPDeprecationWarning(
W_FORCE_CONNECT.format(attr=attr)))
@property
def transport(self):
if self._transport is None:
self._warn_force_connect('transport')
self.connect()
return self._transport
@transport.setter
def transport(self, transport):
self._transport = transport
@property
def on_inbound_frame(self):
if self._on_inbound_frame is None:
self._warn_force_connect('on_inbound_frame')
self.connect()
return self._on_inbound_frame
@on_inbound_frame.setter
def on_inbound_frame(self, on_inbound_frame):
self._on_inbound_frame = on_inbound_frame
@property
def frame_writer(self):
if self._frame_writer is None:
self._warn_force_connect('frame_writer')
self.connect()
return self._frame_writer
@frame_writer.setter
def frame_writer(self, frame_writer):
self._frame_writer = frame_writer
def _on_start(self, version_major, version_minor, server_properties,
mechanisms, locales, argsig='FsSs'):
client_properties = self.client_properties
self.version_major = version_major
self.version_minor = version_minor
self.server_properties = server_properties
if isinstance(mechanisms, string):
mechanisms = mechanisms.encode('utf-8')
self.mechanisms = mechanisms.split(b' ')
self.locales = locales.split(' ')
AMQP_LOGGER.debug(
START_DEBUG_FMT,
self.version_major, self.version_minor,
self.server_properties, self.mechanisms, self.locales,
)
# Negotiate protocol extensions (capabilities)
scap = server_properties.get('capabilities') or {}
cap = client_properties.setdefault('capabilities', {})
cap.update({
wanted_cap: enable_cap
for wanted_cap, enable_cap in items(self.negotiate_capabilities)
if scap.get(wanted_cap)
})
if not cap:
# no capabilities, server may not react well to having
# this key present in client_properties, so we remove it.
client_properties.pop('capabilities', None)
for authentication in self.authentication:
if authentication.mechanism in self.mechanisms:
login_response = authentication.start(self)
if login_response is not NotImplemented:
break
else:
raise ConnectionError(
"Couldn't find appropriate auth mechanism "
"(can offer: {0}; available: {1})".format(
b", ".join(m.mechanism
for m in self.authentication
if m.mechanism).decode(),
b", ".join(self.mechanisms).decode()))
self.send_method(
spec.Connection.StartOk, argsig,
(client_properties, authentication.mechanism,
login_response, self.locale),
)
def _on_secure(self, challenge):
pass
def _on_tune(self, channel_max, frame_max, server_heartbeat, argsig='BlB'):
client_heartbeat = self.client_heartbeat or 0
self.channel_max = channel_max or self.channel_max
self.frame_max = frame_max or self.frame_max
self.server_heartbeat = server_heartbeat or 0
# negotiate the heartbeat interval to the smaller of the
# specified values
if self.server_heartbeat == 0 or client_heartbeat == 0:
self.heartbeat = max(self.server_heartbeat, client_heartbeat)
else:
self.heartbeat = min(self.server_heartbeat, client_heartbeat)
# Ignore server heartbeat if client_heartbeat is disabled
if not self.client_heartbeat:
self.heartbeat = 0
self.send_method(
spec.Connection.TuneOk, argsig,
(self.channel_max, self.frame_max, self.heartbeat),
callback=self._on_tune_sent,
)
def _on_tune_sent(self, argsig='ssb'):
self.send_method(
spec.Connection.Open, argsig, (self.virtual_host, '', False),
)
def _on_open_ok(self):
self._handshake_complete = True
self.on_open(self)
def Transport(self, host, connect_timeout,
ssl=False, read_timeout=None, write_timeout=None,
socket_settings=None, **kwargs):
return Transport(
host, connect_timeout=connect_timeout, ssl=ssl,
read_timeout=read_timeout, write_timeout=write_timeout,
socket_settings=socket_settings, **kwargs)
@property
def connected(self):
return self._transport and self._transport.connected
def collect(self):
try:
if self._transport:
self._transport.close()
temp_list = [x for x in values(self.channels or {})
if x is not self]
for ch in temp_list:
ch.collect()
except socket.error:
pass # connection already closed on the other end
finally:
self._transport = self.connection = self.channels = None
def _get_free_channel_id(self):
try:
return self._avail_channel_ids.pop()
except IndexError:
raise ResourceError(
'No free channel ids, current={0}, channel_max={1}'.format(
len(self.channels), self.channel_max), spec.Channel.Open)
def _claim_channel_id(self, channel_id):
try:
return self._avail_channel_ids.remove(channel_id)
except ValueError:
raise ConnectionError('Channel %r already open' % (channel_id,))
def channel(self, channel_id=None, callback=None):
"""Create new channel.
Fetch a Channel object identified by the numeric channel_id, or
create that object if it doesn't already exist.
"""
if self.channels is not None:
try:
return self.channels[channel_id]
except KeyError:
channel = self.Channel(self, channel_id, on_open=callback)
channel.open()
return channel
raise RecoverableConnectionError('Connection already closed.')
def is_alive(self):
raise NotImplementedError('Use AMQP heartbeats')
def drain_events(self, timeout=None):
# read until message is ready
while not self.blocking_read(timeout):
pass
def blocking_read(self, timeout=None):
with self.transport.having_timeout(timeout):
frame = self.transport.read_frame()
return self.on_inbound_frame(frame)
def on_inbound_method(self, channel_id, method_sig, payload, content):
return self.channels[channel_id].dispatch_method(
method_sig, payload, content,
)
def close(self, reply_code=0, reply_text='', method_sig=(0, 0),
argsig='BsBB'):
"""Request a connection close.
This method indicates that the sender wants to close the
connection. This may be due to internal conditions (e.g. a
forced shut-down) or due to an error handling a specific
method, i.e. an exception. When a close is due to an
exception, the sender provides the class and method id of the
method which caused the exception.
RULE:
After sending this method any received method except the
Close-OK method MUST be discarded.
RULE:
The peer sending this method MAY use a counter or timeout
to detect failure of the other peer to respond correctly
with the Close-OK method.
RULE:
When a server receives the Close method from a client it
MUST delete all server-side resources associated with the
client's context. A client CANNOT reconnect to a context
after sending or receiving a Close method.
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
class_id: short
failing method class
When the close is provoked by a method exception, this
is the class of the method.
method_id: short
failing method ID
When the close is provoked by a method exception, this
is the ID of the method.
"""
if self._transport is None:
# already closed
return
try:
self.is_closing = True
return self.send_method(
spec.Connection.Close, argsig,
(reply_code, reply_text, method_sig[0], method_sig[1]),
wait=spec.Connection.CloseOk,
)
except (OSError, IOError, SSLError):
self.is_closing = False
# close connection
self.collect()
raise
def _on_close(self, reply_code, reply_text, class_id, method_id):
"""Request a connection close.
This method indicates that the sender wants to close the
connection. This may be due to internal conditions (e.g. a
forced shut-down) or due to an error handling a specific
method, i.e. an exception. When a close is due to an
exception, the sender provides the class and method id of the
method which caused the exception.
RULE:
After sending this method any received method except the
Close-OK method MUST be discarded.
RULE:
The peer sending this method MAY use a counter or timeout
to detect failure of the other peer to respond correctly
with the Close-OK method.
RULE:
When a server receives the Close method from a client it
MUST delete all server-side resources associated with the
client's context. A client CANNOT reconnect to a context
after sending or receiving a Close method.
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
class_id: short
failing method class
When the close is provoked by a method exception, this
is the class of the method.
method_id: short
failing method ID
When the close is provoked by a method exception, this
is the ID of the method.
"""
self._x_close_ok()
raise error_for_code(reply_code, reply_text,
(class_id, method_id), ConnectionError)
def _x_close_ok(self):
"""Confirm a connection close.
This method confirms a Connection.Close method and tells the
recipient that it is safe to release resources for the
connection and close the socket.
RULE:
A peer that detects a socket closure without having
received a Close-Ok handshake method SHOULD log the error.
"""
self.send_method(spec.Connection.CloseOk, callback=self._on_close_ok)
def _on_close_ok(self):
"""Confirm a connection close.
This method confirms a Connection.Close method and tells the
recipient that it is safe to release resources for the
connection and close the socket.
RULE:
A peer that detects a socket closure without having
received a Close-Ok handshake method SHOULD log the error.
"""
self.collect()
def _on_blocked(self):
"""Callback called when connection blocked.
Notes:
This is an RabbitMQ Extension.
"""
reason = 'connection blocked, see broker logs'
if self.on_blocked:
return self.on_blocked(reason)
def _on_unblocked(self):
if self.on_unblocked:
return self.on_unblocked()
def send_heartbeat(self):
self.frame_writer(8, 0, None, None, None)
def heartbeat_tick(self, rate=2):
"""Send heartbeat packets if necessary.
Raises:
~amqp.exceptions.ConnectionForvced: if none have been
received recently.
Note:
This should be called frequently, on the order of
once per second.
Keyword Arguments:
rate (int): Previously used, but ignored now.
"""
AMQP_LOGGER.debug('heartbeat_tick : for connection %s',
self._connection_id)
if not self.heartbeat:
return
# treat actual data exchange in either direction as a heartbeat
sent_now = self.bytes_sent
recv_now = self.bytes_recv
if self.prev_sent is None or self.prev_sent != sent_now:
self.last_heartbeat_sent = monotonic()
if self.prev_recv is None or self.prev_recv != recv_now:
self.last_heartbeat_received = monotonic()
now = monotonic()
AMQP_LOGGER.debug(
'heartbeat_tick : Prev sent/recv: %s/%s, '
'now - %s/%s, monotonic - %s, '
'last_heartbeat_sent - %s, heartbeat int. - %s '
'for connection %s',
self.prev_sent, self.prev_recv,
sent_now, recv_now, now,
self.last_heartbeat_sent,
self.heartbeat,
self._connection_id,
)
self.prev_sent, self.prev_recv = sent_now, recv_now
# send a heartbeat if it's time to do so
if now > self.last_heartbeat_sent + self.heartbeat:
AMQP_LOGGER.debug(
'heartbeat_tick: sending heartbeat for connection %s',
self._connection_id)
self.send_heartbeat()
self.last_heartbeat_sent = monotonic()
# if we've missed two intervals' heartbeats, fail; this gives the
# server enough time to send heartbeats a little late
if (self.last_heartbeat_received and
self.last_heartbeat_received + 2 *
self.heartbeat < monotonic()):
raise ConnectionForced('Too many heartbeats missed')
@property
def sock(self):
return self.transport.sock
@property
def server_capabilities(self):
return self.server_properties.get('capabilities') or {}
| {
"repo_name": "cloudera/hue",
"path": "desktop/core/ext-py/amqp-2.4.1/amqp/connection.py",
"copies": "3",
"size": "26305",
"license": "apache-2.0",
"hash": -3454930289542322000,
"line_mean": 34.1671122995,
"line_max": 79,
"alpha_frac": 0.600456187,
"autogenerated": false,
"ratio": 4.409151860543077,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6509608047543077,
"avg_score": null,
"num_lines": null
} |
"""AMQP Connections
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import logging
import socket
from array import array
import pprint
import six
from threading import Event, Thread, Lock
from . import __version__, compat
from .proto import Method
from .method_io import MethodReader, MethodWriter
from .serialization import AMQPWriter
from .abstract_channel import AbstractChannel
from .channel import Channel
from .exceptions import ResourceError, AMQPConnectionError, Timeout, error_for_code
from .transport import create_transport
from . import spec
from .spec import method_t
from .concurrency import synchronized_connection
from .login import login_responses
__all__ = ['Connection']
# client property info that gets sent to the server on connection startup
LIBRARY_PROPERTIES = {
'product': 'amqpy',
'product_version': __version__,
'version': __version__,
'capabilities': {},
}
log = logging.getLogger('amqpy')
compat.patch()
class Connection(AbstractChannel):
"""The connection class provides methods for a client to establish a network connection to a
server, and for both peers to operate the connection thereafter
"""
def __init__(self, host='localhost', port=5672, ssl=None, connect_timeout=None,
userid='guest', password='guest', login_method='AMQPLAIN', virtual_host='/',
locale='en_US',
channel_max=65535, frame_max=131072,
heartbeat=0,
client_properties=None,
on_blocked=None, on_unblocked=None):
"""Create a connection to the specified host
If you are using SSL, make sure the correct port number is specified (usually 5671), as the
default of 5672 is for non-SSL connections.
:param str host: host
:param int port: port
:param ssl: dict of SSL options passed to :func:`ssl.wrap_socket()`, None to disable SSL
:param float connect_timeout: connect timeout
:param str userid: username
:param str password: password
:param str login_method: login method (this is server-specific); default is for RabbitMQ
:param str virtual_host: virtual host
:param str locale: locale
:param int channel_max: maximum number of channels
:param int frame_max: maximum frame payload size in bytes
:param float heartbeat: heartbeat interval in seconds, 0 disables heartbeat
:param client_properties: dict of client properties
:param on_blocked: callback on connection blocked
:param on_unblocked: callback on connection unblocked
:type connect_timeout: float or None
:type client_properties: dict or None
:type ssl: dict or None
:type on_blocked: Callable or None
:type on_unblocked: Callable or None
"""
log.debug('amqpy {} Connection.__init__()'.format(__version__))
self.conn_lock = Lock()
#: Map of `{channel_id: Channel}` for all active channels
#:
#: :type: dict[int, Channel]
self.channels = {} # dict of {channel_id int: Channel}
# the connection object itself is treated as channel 0
super(Connection, self).__init__(self, 0) # also sets channels[0] = self
# instance variables
#: :type: amqpy.transport.Transport
self.transport = None
self.method_reader = None
self.method_writer = None
self._wait_tune_ok = None
# properties set in the start method, after a connection is established
self.version_major = 0
self.version_minor = 0
self.server_properties = {}
self.mechanisms = []
self.locales = []
# properties set in the Tune method
self.channel_max = channel_max
self.frame_max = frame_max
if six.PY2:
self._avail_channel_ids = array(b'H', range(self.channel_max, 0, -1))
else:
self._avail_channel_ids = array('H', range(self.channel_max, 0, -1))
self._heartbeat_final = 0 # final heartbeat interval after negotiation
self._heartbeat_server = None
# save connection parameters
self._host = host
self._port = port
self._connect_timeout = connect_timeout
self._ssl = ssl
self._userid = userid
self._password = password
self._login_method = login_method
self._virtual_host = virtual_host
self._locale = locale
self._heartbeat_client = heartbeat # original heartbeat interval value proposed by client
self._client_properties = client_properties
# callbacks
self.on_blocked = on_blocked
self.on_unblocked = on_unblocked
# heartbeat
self._close_event = Event()
self._heartbeat_thread = None
self.connect()
def connect(self):
"""Connect using saved connection parameters
This method does not need to be called explicitly; it is called by
the constructor during initialization.
Note: reconnecting invalidates all declarations (channels, queues,
consumers, delivery tags, etc.).
"""
# start the connection; this also sends the connection protocol header
self.connection = self # AbstractChannel.connection
self.transport = create_transport(self._host, self._port, self._connect_timeout,
self.frame_max, self._ssl)
# create global instances of `MethodReader` and `MethodWriter` which can be used by all
# channels
self.method_reader = MethodReader(self.transport)
self.method_writer = MethodWriter(self.transport, self.frame_max)
# wait for server to send the 'start' method
self.wait(spec.Connection.Start)
# create 'login response' to send to server
login_response = login_responses[self._login_method](self._userid, self._password)
# reply with 'start-ok' and connection parameters
# noinspection PyArgumentList
client_props = dict(LIBRARY_PROPERTIES, **self._client_properties or {})
self._send_start_ok(client_props, self._login_method, login_response, self._locale)
self._wait_tune_ok = True
while self._wait_tune_ok:
self.wait_any([spec.Connection.Secure, spec.Connection.Tune])
self._send_open(self._virtual_host)
# set up automatic heartbeats, if requested for:
if self._heartbeat_final:
self._close_event.clear()
log.debug('Start automatic heartbeat thread')
thr = Thread(target=self._heartbeat_run,
name='amqp-HeartBeatThread-%s' % id(self))
thr.daemon = True
thr.start()
self._heartbeat_thread = thr
@property
def last_heartbeat_recv(self):
return self.transport.last_heartbeat_received
@property
def last_heartbeat_sent(self):
return self.transport.last_heartbeat_sent
@property
def connected(self):
"""Check if connection is connected
:return: True if connected, else False
:rtype: bool
"""
return bool(self.transport and self.transport.connected)
@property
def sock(self):
"""Access underlying TCP socket
:return: socket
:rtype: socket.socket
"""
if self.transport and self.transport.sock:
return self.transport.sock
@property
def server_capabilities(self):
"""Get server capabilities
These properties are set only after successfully connecting.
:return: server capabilities
:rtype: dict
"""
return self.server_properties.get('capabilities') or {}
@synchronized_connection()
def channel(self, channel_id=None):
"""Create a new channel, or fetch the channel associated with `channel_id` if specified
:param channel_id: channel ID number
:type channel_id: int or None
:return: Channel
:rtype: amqpy.channel.Channel
"""
try:
return self.channels[channel_id]
except KeyError:
return Channel(self, channel_id)
def send_heartbeat(self):
"""Send a heartbeat to the server
"""
self.transport.send_heartbeat()
def is_alive(self):
"""Check if connection is alive
This method is the primary way to check if the connection is alive.
Side effects: This method may send a heartbeat as a last resort to check if the connection
is alive.
:return: True if connection is alive, else False
:rtype: bool
"""
if self.transport:
return self.transport.is_alive()
else:
return False
def _wait_any(self, timeout=None):
"""Wait for any event on the connection (for any channel)
When a method is received on the channel, it is delivered to the
appropriate channel incoming method queue
:param float timeout: timeout
:return: method
:rtype: amqpy.proto.Method
"""
# check the method queue of each channel
for ch_id, channel in self.channels.items():
if channel.incoming_methods:
return channel.incoming_methods.pop(0)
# do a blocking read for any incoming method
method = self.method_reader.read_method(timeout)
return method
def drain_events(self, timeout=None):
"""Wait for an event on all channels
This method should be called after creating consumers in order to
receive delivered messages and execute consumer callbacks.
:param timeout: maximum allowed time to wait for an event
:type timeout: float or None
:raise amqpy.exceptions.Timeout: if the operation times out
"""
method = self._wait_any(timeout)
assert isinstance(method, Method)
#: :type: amqpy.Channel
channel = self.channels[method.channel_id]
return channel.handle_method(method)
def loop(self, timeout=None):
"""Call :meth:`drain_events` continuously
- Does not raise Timeout exceptions if a timeout occurs
:param timeout: maximum allowed time to wait for an event
:type timeout: float or None
"""
while True:
try:
self.drain_events(timeout)
except Timeout:
break
def close(self, reply_code=0, reply_text='', method_type=method_t(0, 0)):
"""Close connection to the server
This method performs a connection close handshake with the server, then closes the
underlying connection.
If this connection close is due to a client error, the client may provide a `reply_code`,
`reply_text`, and `method_type` to indicate to the server the reason for closing the
connection.
:param int reply_code: the reply code
:param str reply_text: localized reply text
:param method_type: if close is triggered by a failing method, this is the method that
caused it
:type method_type: amqpy.spec.method_t
"""
if not self.is_alive():
# already closed
log.debug('Already closed')
return
# signal to the heartbeat thread to stop sending heartbeats
if self._heartbeat_final:
self._close_event.set()
self._heartbeat_thread.join()
self._heartbeat_thread = None
args = AMQPWriter()
args.write_short(reply_code)
args.write_shortstr(reply_text)
args.write_short(method_type.class_id)
args.write_short(method_type.method_id)
self._send_method(Method(spec.Connection.Close, args))
return self.wait_any([spec.Connection.Close, spec.Connection.CloseOk])
def _heartbeat_run(self):
# `is_alive()` sends heartbeats if the connection is alive
while self.is_alive():
# `close` is set to true if the `close_event` is signalled
close = self._close_event.wait(self._heartbeat_final / 1.5)
if close:
break
def _close(self):
try:
self.transport.close()
channels = [x for x in self.channels.values() if x is not self]
for ch in channels:
# noinspection PyProtectedMember
ch._close()
except socket.error:
pass # connection already closed on the other end
finally:
self.transport = self.connection = None
self.channels = {0: self} # reset the channels state
def _get_free_channel_id(self):
"""Get next free channel ID
:return: next free channel_id
:rtype: int
"""
try:
return self._avail_channel_ids.pop()
except IndexError:
raise ResourceError('No free channel ids, current={0}, channel_max={1}'.format(
len(self.channels), self.channel_max), spec.Channel.Open)
def _claim_channel_id(self, channel_id):
"""Claim channel ID
:param channel_id: channel ID
:type channel_id: int
"""
try:
return self._avail_channel_ids.remove(channel_id)
except ValueError:
raise AMQPConnectionError('Channel {} already open'.format(channel_id))
def _cb_close(self, method):
"""Handle received connection close
This method indicates that the sender (server) wants to close the connection. This may be
due to internal conditions (e.g. a forced shut-down) or due to an error handling a specific
method, i.e. an exception. When a close is due to an exception, the sender provides the
class and method id of the method which caused the exception.
"""
args = method.args
reply_code = args.read_short() # the AMQP reply code
reply_text = args.read_shortstr() # the localized reply text
class_id = args.read_short() # class_id of method
method_id = args.read_short() # method_id of method
self._send_close_ok() # send a close-ok to the server, to confirm that we've
# acknowledged the close request
method_type = method_t(class_id, method_id)
raise error_for_code(reply_code, reply_text, method_type, AMQPConnectionError,
self.channel_id)
def _cb_blocked(self, method):
"""RabbitMQ Extension
"""
reason = method.args.read_shortstr()
if callable(self.on_blocked):
# noinspection PyCallingNonCallable
return self.on_blocked(reason)
def _cb_unblocked(self, method):
assert method
if callable(self.on_unblocked):
# noinspection PyCallingNonCallable
return self.on_unblocked()
def _send_close_ok(self):
"""Confirm a connection close that has been requested by the server
This method confirms a Connection.Close method and tells the recipient that it is safe to
release resources for the connection and close the socket. RULE: A peer that detects a
socket closure without having received a Close-Ok handshake method SHOULD log the error.
"""
self._send_method(Method(spec.Connection.CloseOk))
self._close()
def _cb_close_ok(self, method):
"""Confirm a connection close
This method is called when the server send a close-ok in response to our close request. It
is now safe to close the underlying connection.
"""
assert method
self._close()
def _send_open(self, virtual_host, capabilities=''):
"""Open connection to virtual host
This method opens a connection to a virtual host, which is a collection of resources, and
acts to separate multiple application domains within a server. RULE: The client MUST open
the context before doing any work on the connection.
:param virtual_host: virtual host path
:param capabilities: required capabilities
:type virtual_host: str
:type capabilities: str
"""
args = AMQPWriter()
args.write_shortstr(virtual_host)
args.write_shortstr(capabilities)
args.write_bit(False)
self._send_method(Method(spec.Connection.Open, args))
return self.wait(spec.Connection.OpenOk)
def _cb_open_ok(self, method):
"""Signal that the connection is ready
This method signals to the client that the connection is ready for use.
"""
assert method
log.debug('Open OK')
def _cb_secure(self, method):
"""Security mechanism challenge
The SASL protocol works by exchanging challenges and responses until both peers have
received sufficient information to authenticate each other This method challenges the
client to provide more information.
PARAMETERS:
challenge: longstr
security challenge data
Challenge information, a block of opaque binary data passed to the security
mechanism.
"""
challenge = method.args.read_longstr()
assert challenge
def _send_secure_ok(self, response):
"""Security mechanism response
This method attempts to authenticate, passing a block of SASL data for the security
mechanism at the server side.
PARAMETERS:
response: longstr
security response data
A block of opaque data passed to the security mechanism. The contents of this data
are defined by the SASL security mechanism.
"""
args = AMQPWriter()
args.write_longstr(response)
self._send_method(Method(spec.Connection.SecureOk, args))
def _cb_start(self, method):
"""Start connection negotiation callback
This method starts the connection negotiation process by telling the client the protocol
version that the server proposes, along with a list of security mechanisms which the client
can use for authentication.
RULE: If the client cannot handle the protocol version suggested by the server it MUST close
the socket connection.
RULE: The server MUST provide a protocol version that is lower than or equal to that
requested by the client in the protocol header. If the server cannot support the specified
protocol it MUST NOT send this method, but MUST close the socket connection.
PARAMETERS:
version_major: octet
protocol major version
The protocol major version that the server agrees to use, which cannot be higher
than the client's major version.
version_minor: octet
protocol major version
The protocol minor version that the server agrees to use, which cannot be higher
than the client's minor version.
server_properties: table
server properties
mechanisms: longstr
available security mechanisms
A list of the security mechanisms that the server supports, delimited by spaces.
Currently ASL supports these mechanisms: PLAIN.
locales: longstr
available message locales
A list of the message locales that the server supports, delimited by spaces The
locale defines the language in which the server will send reply texts.
RULE:
All servers MUST support at least the en_US locale.
"""
args = method.args
self.version_major = args.read_octet()
self.version_minor = args.read_octet()
self.server_properties = args.read_table()
self.mechanisms = args.read_longstr().split(' ')
self.locales = args.read_longstr().split(' ')
properties = pprint.pformat(self.server_properties)
log.debug('Start from server')
log.debug('Version: {}.{}'.format(self.version_major, self.version_minor))
log.debug('Server properties:\n{}'.format(properties))
log.debug('Security mechanisms: {}'.format(self.mechanisms))
log.debug('Locales: {}'.format(self.locales))
def _send_start_ok(self, client_properties, mechanism, response, locale):
"""Select security mechanism and locale
This method selects a SASL security mechanism. ASL uses SASL (RFC2222) to negotiate
authentication and encryption.
PARAMETERS:
client_properties: table
client properties
mechanism: shortstr
selected security mechanism
A single security mechanisms selected by the client, which must be one of those
specified by the server.
RULE:
The client SHOULD authenticate using the highest- level security profile it
can handle from the list
provided by the server.
RULE:
The mechanism field MUST contain one of the security mechanisms proposed by
the server in the Start
method. If it doesn't, the server MUST close the socket.
response: longstr
security response data
A block of opaque data passed to the security mechanism. The contents of this
data are defined by the
SASL security mechanism For the PLAIN security mechanism this is defined as a
field table holding two
fields, LOGIN and PASSWORD.
locale: shortstr
selected message locale
A single message local selected by the client, which must be one of those
specified by the server.
"""
if self.server_capabilities.get('consumer_cancel_notify'):
if 'capabilities' not in client_properties:
client_properties['capabilities'] = {}
client_properties['capabilities']['consumer_cancel_notify'] = True
if self.server_capabilities.get('connection.blocked'):
if 'capabilities' not in client_properties:
client_properties['capabilities'] = {}
client_properties['capabilities']['connection.blocked'] = True
args = AMQPWriter()
args.write_table(client_properties)
args.write_shortstr(mechanism)
args.write_longstr(response)
args.write_shortstr(locale)
self._send_method(Method(spec.Connection.StartOk, args))
def _cb_tune(self, method):
"""Handle received "tune" method
This method is the handler for receiving a "tune" method. `channel_max` and `frame_max`
are set to the lower
of the values proposed by each party.
PARAMETERS:
channel_max: short
proposed maximum channels
The maximum total number of channels that the server allows per connection. Zero
means that the server
does not impose a fixed limit, but the number of allowed channels may be limited
by available server
resources.
frame_max: long
proposed maximum frame size
The largest frame size that the server proposes for the connection. The client
can negotiate a lower
value Zero means that the server does not impose any specific limit but may
reject very large frames
if it cannot allocate resources for them.
RULE:
Until the frame-max has been negotiated, both peers MUST accept frames of up
to 4096 octets large.
The minimum non-zero value for the frame-max field is 4096.
heartbeat: short
desired heartbeat delay
The delay, in seconds, of the connection heartbeat that the server wants Zero
means the server does
not want a heartbeat.
"""
args = method.args
client_heartbeat = self._heartbeat_client or 0
# maximum number of channels that the server supports
self.channel_max = min(args.read_short(), self.channel_max)
# largest frame size the server proposes for the connection
self.frame_max = min(args.read_long(), self.frame_max)
self.method_writer.frame_max = self.frame_max
# heartbeat interval proposed by server
self._heartbeat_server = args.read_short() or 0
# negotiate the heartbeat interval to the smaller of the specified values
if self._heartbeat_server == 0 or client_heartbeat == 0:
self._heartbeat_final = max(self._heartbeat_server, client_heartbeat)
else:
self._heartbeat_final = min(self._heartbeat_server, client_heartbeat)
# Ignore server heartbeat if client_heartbeat is disabled
if not self._heartbeat_client:
self._heartbeat_final = 0
self._send_tune_ok(self.channel_max, self.frame_max, self._heartbeat_final)
def _send_tune_ok(self, channel_max, frame_max, heartbeat):
"""Negotiate connection tuning parameters
This method sends the client's connection tuning parameters to the server. Certain fields
are negotiated, others provide capability information.
PARAMETERS:
channel_max: short
negotiated maximum channels
The maximum total number of channels that the client will use per connection.
May not be higher than
the value specified by the server.
RULE:
The server MAY ignore the channel-max value or MAY use it for tuning its
resource allocation.
frame_max: long
negotiated maximum frame size
The largest frame size that the client and server will use for the connection.
Zero means that the
client does not impose any specific limit but may reject very large frames if it
cannot allocate
resources for them. Note that the frame-max limit applies principally to content
frames, where large
contents can be broken into frames of arbitrary size.
RULE:
Until the frame-max has been negotiated, both peers must accept frames of up
to 4096 octets large.
The minimum non-zero value for the frame- max field is 4096.
heartbeat: short
desired heartbeat delay
The delay, in seconds, of the connection heartbeat that the client wants. Zero
means the client does not
want a heartbeat.
"""
args = AMQPWriter()
args.write_short(channel_max)
args.write_long(frame_max)
args.write_short(heartbeat or 0)
self._send_method(Method(spec.Connection.TuneOk, args))
self._wait_tune_ok = False
METHOD_MAP = {
spec.Connection.Start: _cb_start,
spec.Connection.Secure: _cb_secure,
spec.Connection.Tune: _cb_tune,
spec.Connection.OpenOk: _cb_open_ok,
spec.Connection.Close: _cb_close,
spec.Connection.CloseOk: _cb_close_ok,
spec.Connection.Blocked: _cb_blocked,
spec.Connection.Unblocked: _cb_unblocked,
}
| {
"repo_name": "veegee/amqpy",
"path": "amqpy/connection.py",
"copies": "1",
"size": "27811",
"license": "mit",
"hash": 6925710791181273000,
"line_mean": 38.6733238231,
"line_max": 100,
"alpha_frac": 0.6214447521,
"autogenerated": false,
"ratio": 4.6922557786401216,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012329160151983228,
"num_lines": 701
} |
"""AMQP Connections
"""
import logging
import socket
from array import array
import pprint
from threading import Event, Thread
import time
from . import __version__, compat
from .proto import Method
from .method_io import MethodReader, MethodWriter
from .serialization import AMQPWriter
from .abstract_channel import AbstractChannel
from .channel import Channel
from .exceptions import ResourceError, AMQPConnectionError, Timeout, error_for_code
from .transport import create_transport
from . import spec
from .spec import method_t
from .concurrency import synchronized
__all__ = ['Connection']
# client property info that gets sent to the server on connection startup
LIBRARY_PROPERTIES = {
'product': 'amqpy',
'product_version': __version__,
'version': __version__,
'capabilities': {},
}
log = logging.getLogger('amqpy')
compat.patch()
class Connection(AbstractChannel):
"""The connection class provides methods for a client to establish a network connection to a
server, and for both peers to operate the connection thereafter
"""
def __init__(self, *, host='localhost', port=5672, ssl=None, connect_timeout=None,
userid='guest', password='guest', login_method='AMQPLAIN', virtual_host='/',
locale='en_US',
channel_max=65535, frame_max=131072,
heartbeat=0,
client_properties=None,
on_blocked=None, on_unblocked=None):
"""Create a connection to the specified host
If you are using SSL, make sure the correct port number is specified (usually 5671), as the
default of 5672 is for non-SSL connections.
:param str host: host
:param int port: port
:param ssl: dict of SSL options passed to :func:`ssl.wrap_socket()`, None to disable SSL
:param float connect_timeout: connect timeout
:param str userid: username
:param str password: password
:param str login_method: login method (this is server-specific); default is for RabbitMQ
:param str virtual_host: virtual host
:param str locale: locale
:param int channel_max: maximum number of channels
:param int frame_max: maximum frame payload size in bytes
:param float heartbeat: heartbeat interval in seconds, 0 disables heartbeat
:param client_properties: dict of client properties
:param on_blocked: callback on connection blocked
:param on_unblocked: callback on connection unblocked
:type connect_timeout: float or None
:type client_properties: dict or None
:type ssl: dict or None
:type on_blocked: Callable or None
:type on_unblocked: Callable or None
"""
log.debug('amqpy {} Connection.__init__()'.format(__version__))
#: Map of `{channel_id: Channel}` for all active channels
#:
#: :type: dict[int, Channel]
self.channels = {} # dict of {channel_id int: Channel}
# the connection object itself is treated as channel 0
super().__init__(self, 0) # also sets channels[0] = self
# instance variables
#: :type: amqpy.transport.Transport
self.transport = None
self.method_reader = None
self.method_writer = None
self._wait_tune_ok = None
# properties set in the start method, after a connection is established
self.version_major = 0
self.version_minor = 0
self.server_properties = {}
self.mechanisms = []
self.locales = []
# properties set in the Tune method
self.channel_max = channel_max
self.frame_max = frame_max
self._avail_channel_ids = array('H', range(self.channel_max, 0, -1))
self._heartbeat_final = 0 # final heartbeat interval after negotiation
self._heartbeat_server = None
# save connection parameters
self._host = host
self._port = port
self._connect_timeout = connect_timeout
self._ssl = ssl
self._userid = userid
self._password = password
self._login_method = login_method
self._virtual_host = virtual_host
self._locale = locale
self._heartbeat_client = heartbeat # original heartbeat interval value proposed by client
self._client_properties = client_properties
# callbacks
self.on_blocked = on_blocked
self.on_unblocked = on_unblocked
# heartbeat
self._close_event = Event()
self._heartbeat_thread = None
self.connect()
def connect(self):
"""Connect using saved connection parameters
This method does not need to be called explicitly; it is called by
the constructor during initialization.
Note: reconnecting invalidates all declarations (channels, queues,
consumers, delivery tags, etc.).
"""
# start the connection; this also sends the connection protocol header
self.connection = self # AbstractChannel.connection
self.transport = create_transport(self._host, self._port, self._connect_timeout,
self.frame_max, self._ssl)
# create global instances of `MethodReader` and `MethodWriter` which can be used by all
# channels
self.method_reader = MethodReader(self.transport)
self.method_writer = MethodWriter(self.transport, self.frame_max)
# wait for server to send the 'start' method
self.wait(spec.Connection.Start)
# create 'login response' to send to server
login_response = AMQPWriter()
login_response.write_table({'LOGIN': self._userid, 'PASSWORD': self._password})
login_response = login_response.getvalue()[4:] # skip the length
# reply with 'start-ok' and connection parameters
# noinspection PyArgumentList
client_props = dict(LIBRARY_PROPERTIES, **self._client_properties or {})
self._send_start_ok(client_props, self._login_method, login_response, self._locale)
self._wait_tune_ok = True
while self._wait_tune_ok:
self.wait_any([spec.Connection.Secure, spec.Connection.Tune])
self._send_open(self._virtual_host)
# set up automatic heartbeats, if requested for:
if self._heartbeat_final:
self._close_event.clear()
log.debug('Start automatic heartbeat thread')
thr = Thread(target=self._heartbeat_run,
name='amqp-HeartBeatThread-%s' % id(self),
daemon=True)
thr.start()
self._heartbeat_thread = thr
@property
def last_heartbeat_recv(self):
return self.transport.last_heartbeat_received
@property
def last_heartbeat_sent(self):
return self.transport.last_heartbeat_sent
@property
def connected(self):
"""Check if connection is connected
:return: True if connected, else False
:rtype: bool
"""
return bool(self.transport and self.transport.connected)
@property
def sock(self):
"""Access underlying TCP socket
:return: socket
:rtype: socket.socket
"""
if self.transport and self.transport.sock:
return self.transport.sock
@property
def server_capabilities(self):
"""Get server capabilities
These properties are set only after successfully connecting.
:return: server capabilities
:rtype: dict
"""
return self.server_properties.get('capabilities') or {}
@synchronized('lock')
def channel(self, channel_id=None):
"""Create a new channel, or fetch the channel associated with `channel_id` if specified
:param channel_id: channel ID number
:type channel_id: int or None
:return: Channel
:rtype: amqpy.channel.Channel
"""
try:
return self.channels[channel_id]
except KeyError:
return Channel(self, channel_id)
def send_heartbeat(self):
"""Send a heartbeat to the server
"""
self.transport.send_heartbeat()
def is_alive(self):
"""Check if connection is alive
This method is the primary way to check if the connection is alive.
Side effects: This method may send a heartbeat as a last resort to check if the connection
is alive.
:return: True if connection is alive, else False
:rtype: bool
"""
if self.transport:
return self.transport.is_alive()
else:
return False
def _wait_any(self, timeout=None):
"""Wait for any event on the connection (for any channel)
When a method is received on the channel, it is delivered to the
appropriate channel incoming method queue
:param float timeout: timeout
:return: method
:rtype: amqpy.proto.Method
"""
# check the method queue of each channel
for ch_id, channel in self.channels.items():
if channel.incoming_methods:
return channel.incoming_methods.pop(0)
# do a blocking read for any incoming method
method = self.method_reader.read_method(timeout)
return method
def drain_events(self, timeout=None):
"""Wait for an event on all channels
This method should be called after creating consumers in order to
receive delivered messages and execute consumer callbacks.
:param timeout: maximum allowed time to wait for an event
:type timeout: float or None
:raise amqpy.exceptions.Timeout: if the operation times out
"""
method = self._wait_any(timeout)
assert isinstance(method, Method)
#: :type: amqpy.Channel
channel = self.channels[method.channel_id]
return channel.handle_method(method)
def loop(self, timeout=None):
"""Call :meth:`drain_events` continuously
- Does not raise Timeout exceptions if a timeout occurs
:param timeout: maximum allowed time to wait for an event
:type timeout: float or None
"""
while True:
try:
self.drain_events(timeout)
except Timeout:
break
def close(self, reply_code=0, reply_text='', method_type=method_t(0, 0)):
"""Close connection to the server
This method performs a connection close handshake with the server, then closes the
underlying connection.
If this connection close is due to a client error, the client may provide a `reply_code`,
`reply_text`, and `method_type` to indicate to the server the reason for closing the
connection.
:param int reply_code: the reply code
:param str reply_text: localized reply text
:param method_type: if close is triggered by a failing method, this is the method that
caused it
:type method_type: amqpy.spec.method_t
"""
if not self.is_alive():
# already closed
log.debug('Already closed')
return
# signal to the heartbeat thread to stop sending heartbeats
if self._heartbeat_final:
self._close_event.set()
self._heartbeat_thread.join()
self._heartbeat_thread = None
args = AMQPWriter()
args.write_short(reply_code)
args.write_shortstr(reply_text)
args.write_short(method_type.class_id)
args.write_short(method_type.method_id)
self._send_method(Method(spec.Connection.Close, args))
return self.wait_any([spec.Connection.Close, spec.Connection.CloseOk])
def _heartbeat_run(self):
# `is_alive()` sends heartbeats if the connection is alive
while self.is_alive():
# `close` is set to true if the `close_event` is signalled
close = self._close_event.wait(self._heartbeat_final / 1.5)
if close:
break
def _close(self):
try:
self.transport.close()
channels = [x for x in self.channels.values() if x is not self]
for ch in channels:
# noinspection PyProtectedMember
ch._close()
except socket.error:
pass # connection already closed on the other end
finally:
self.transport = self.connection = self.channels = None
def _get_free_channel_id(self):
"""Get next free channel ID
:return: next free channel_id
:rtype: int
"""
try:
return self._avail_channel_ids.pop()
except IndexError:
raise ResourceError('No free channel ids, current={0}, channel_max={1}'.format(
len(self.channels), self.channel_max), spec.Channel.Open)
def _claim_channel_id(self, channel_id):
"""Claim channel ID
:param channel_id: channel ID
:type channel_id: int
"""
try:
return self._avail_channel_ids.remove(channel_id)
except ValueError:
raise AMQPConnectionError('Channel {} already open'.format(channel_id))
def _cb_close(self, method):
"""Handle received connection close
This method indicates that the sender (server) wants to close the connection. This may be
due to internal conditions (e.g. a forced shut-down) or due to an error handling a specific
method, i.e. an exception. When a close is due to an exception, the sender provides the
class and method id of the method which caused the exception.
"""
args = method.args
reply_code = args.read_short() # the AMQP reply code
reply_text = args.read_shortstr() # the localized reply text
class_id = args.read_short() # class_id of method
method_id = args.read_short() # method_id of method
self._send_close_ok() # send a close-ok to the server, to confirm that we've
# acknowledged the close request
method_type = method_t(class_id, method_id)
raise error_for_code(reply_code, reply_text, method_type, AMQPConnectionError,
self.channel_id)
def _cb_blocked(self, method):
"""RabbitMQ Extension
"""
reason = method.args.read_shortstr()
if callable(self.on_blocked):
# noinspection PyCallingNonCallable
return self.on_blocked(reason)
def _cb_unblocked(self, method):
assert method
if callable(self.on_unblocked):
# noinspection PyCallingNonCallable
return self.on_unblocked()
def _send_close_ok(self):
"""Confirm a connection close that has been requested by the server
This method confirms a Connection.Close method and tells the recipient that it is safe to
release resources for the connection and close the socket. RULE: A peer that detects a
socket closure without having received a Close-Ok handshake method SHOULD log the error.
"""
self._send_method(Method(spec.Connection.CloseOk))
self._close()
def _cb_close_ok(self, method):
"""Confirm a connection close
This method is called when the server send a close-ok in response to our close request. It
is now safe to close the underlying connection.
"""
assert method
self._close()
def _send_open(self, virtual_host, capabilities=''):
"""Open connection to virtual host
This method opens a connection to a virtual host, which is a collection of resources, and
acts to separate multiple application domains within a server. RULE: The client MUST open
the context before doing any work on the connection.
:param virtual_host: virtual host path
:param capabilities: required capabilities
:type virtual_host: str
:type capabilities: str
"""
args = AMQPWriter()
args.write_shortstr(virtual_host)
args.write_shortstr(capabilities)
args.write_bit(False)
self._send_method(Method(spec.Connection.Open, args))
return self.wait(spec.Connection.OpenOk)
def _cb_open_ok(self, method):
"""Signal that the connection is ready
This method signals to the client that the connection is ready for use.
"""
assert method
log.debug('Open OK')
def _cb_secure(self, method):
"""Security mechanism challenge
The SASL protocol works by exchanging challenges and responses until both peers have
received sufficient information to authenticate each other This method challenges the
client to provide more information.
PARAMETERS:
challenge: longstr
security challenge data
Challenge information, a block of opaque binary data passed to the security
mechanism.
"""
challenge = method.args.read_longstr()
assert challenge
def _send_secure_ok(self, response):
"""Security mechanism response
This method attempts to authenticate, passing a block of SASL data for the security
mechanism at the server side.
PARAMETERS:
response: longstr
security response data
A block of opaque data passed to the security mechanism. The contents of this data
are defined by the SASL security mechanism.
"""
args = AMQPWriter()
args.write_longstr(response)
self._send_method(Method(spec.Connection.SecureOk, args))
def _cb_start(self, method):
"""Start connection negotiation callback
This method starts the connection negotiation process by telling the client the protocol
version that the server proposes, along with a list of security mechanisms which the client
can use for authentication.
RULE: If the client cannot handle the protocol version suggested by the server it MUST close
the socket connection.
RULE: The server MUST provide a protocol version that is lower than or equal to that
requested by the client in the protocol header. If the server cannot support the specified
protocol it MUST NOT send this method, but MUST close the socket connection.
PARAMETERS:
version_major: octet
protocol major version
The protocol major version that the server agrees to use, which cannot be higher
than the client's major version.
version_minor: octet
protocol major version
The protocol minor version that the server agrees to use, which cannot be higher
than the client's minor version.
server_properties: table
server properties
mechanisms: longstr
available security mechanisms
A list of the security mechanisms that the server supports, delimited by spaces.
Currently ASL supports these mechanisms: PLAIN.
locales: longstr
available message locales
A list of the message locales that the server supports, delimited by spaces The
locale defines the language in which the server will send reply texts.
RULE:
All servers MUST support at least the en_US locale.
"""
args = method.args
self.version_major = args.read_octet()
self.version_minor = args.read_octet()
self.server_properties = args.read_table()
self.mechanisms = args.read_longstr().split(' ')
self.locales = args.read_longstr().split(' ')
properties = pprint.pformat(self.server_properties)
log.debug('Start from server')
log.debug('Version: {}.{}'.format(self.version_major, self.version_minor))
log.debug('Server properties:\n{}'.format(properties))
log.debug('Security mechanisms: {}'.format(self.mechanisms))
log.debug('Locales: {}'.format(self.locales))
def _send_start_ok(self, client_properties, mechanism, response, locale):
"""Select security mechanism and locale
This method selects a SASL security mechanism. ASL uses SASL (RFC2222) to negotiate
authentication and encryption.
PARAMETERS:
client_properties: table
client properties
mechanism: shortstr
selected security mechanism
A single security mechanisms selected by the client, which must be one of those
specified by the server.
RULE:
The client SHOULD authenticate using the highest- level security profile it
can handle from the list
provided by the server.
RULE:
The mechanism field MUST contain one of the security mechanisms proposed by
the server in the Start
method. If it doesn't, the server MUST close the socket.
response: longstr
security response data
A block of opaque data passed to the security mechanism. The contents of this
data are defined by the
SASL security mechanism For the PLAIN security mechanism this is defined as a
field table holding two
fields, LOGIN and PASSWORD.
locale: shortstr
selected message locale
A single message local selected by the client, which must be one of those
specified by the server.
"""
if self.server_capabilities.get('consumer_cancel_notify'):
if 'capabilities' not in client_properties:
client_properties['capabilities'] = {}
client_properties['capabilities']['consumer_cancel_notify'] = True
if self.server_capabilities.get('connection.blocked'):
if 'capabilities' not in client_properties:
client_properties['capabilities'] = {}
client_properties['capabilities']['connection.blocked'] = True
args = AMQPWriter()
args.write_table(client_properties)
args.write_shortstr(mechanism)
args.write_longstr(response)
args.write_shortstr(locale)
self._send_method(Method(spec.Connection.StartOk, args))
def _cb_tune(self, method):
"""Handle received "tune" method
This method is the handler for receiving a "tune" method. `channel_max` and `frame_max`
are set to the lower
of the values proposed by each party.
PARAMETERS:
channel_max: short
proposed maximum channels
The maximum total number of channels that the server allows per connection. Zero
means that the server
does not impose a fixed limit, but the number of allowed channels may be limited
by available server
resources.
frame_max: long
proposed maximum frame size
The largest frame size that the server proposes for the connection. The client
can negotiate a lower
value Zero means that the server does not impose any specific limit but may
reject very large frames
if it cannot allocate resources for them.
RULE:
Until the frame-max has been negotiated, both peers MUST accept frames of up
to 4096 octets large.
The minimum non-zero value for the frame-max field is 4096.
heartbeat: short
desired heartbeat delay
The delay, in seconds, of the connection heartbeat that the server wants Zero
means the server does
not want a heartbeat.
"""
args = method.args
client_heartbeat = self._heartbeat_client or 0
# maximum number of channels that the server supports
self.channel_max = min(args.read_short(), self.channel_max)
# largest frame size the server proposes for the connection
self.frame_max = min(args.read_long(), self.frame_max)
self.method_writer.frame_max = self.frame_max
# heartbeat interval proposed by server
self._heartbeat_server = args.read_short() or 0
# negotiate the heartbeat interval to the smaller of the specified values
if self._heartbeat_server == 0 or client_heartbeat == 0:
self._heartbeat_final = max(self._heartbeat_server, client_heartbeat)
else:
self._heartbeat_final = min(self._heartbeat_server, client_heartbeat)
# Ignore server heartbeat if client_heartbeat is disabled
if not self._heartbeat_client:
self._heartbeat_final = 0
self._send_tune_ok(self.channel_max, self.frame_max, self._heartbeat_final)
def _send_tune_ok(self, channel_max, frame_max, heartbeat):
"""Negotiate connection tuning parameters
This method sends the client's connection tuning parameters to the server. Certain fields
are negotiated, others provide capability information.
PARAMETERS:
channel_max: short
negotiated maximum channels
The maximum total number of channels that the client will use per connection.
May not be higher than
the value specified by the server.
RULE:
The server MAY ignore the channel-max value or MAY use it for tuning its
resource allocation.
frame_max: long
negotiated maximum frame size
The largest frame size that the client and server will use for the connection.
Zero means that the
client does not impose any specific limit but may reject very large frames if it
cannot allocate
resources for them. Note that the frame-max limit applies principally to content
frames, where large
contents can be broken into frames of arbitrary size.
RULE:
Until the frame-max has been negotiated, both peers must accept frames of up
to 4096 octets large.
The minimum non-zero value for the frame- max field is 4096.
heartbeat: short
desired heartbeat delay
The delay, in seconds, of the connection heartbeat that the client wants. Zero
means the client does not
want a heartbeat.
"""
args = AMQPWriter()
args.write_short(channel_max)
args.write_long(frame_max)
args.write_short(heartbeat or 0)
self._send_method(Method(spec.Connection.TuneOk, args))
self._wait_tune_ok = False
METHOD_MAP = {
spec.Connection.Start: _cb_start,
spec.Connection.Secure: _cb_secure,
spec.Connection.Tune: _cb_tune,
spec.Connection.OpenOk: _cb_open_ok,
spec.Connection.Close: _cb_close,
spec.Connection.CloseOk: _cb_close_ok,
spec.Connection.Blocked: _cb_blocked,
spec.Connection.Unblocked: _cb_unblocked,
}
| {
"repo_name": "gst/amqpy",
"path": "amqpy/connection.py",
"copies": "1",
"size": "27569",
"license": "mit",
"hash": -8458294714909558000,
"line_mean": 38.7821067821,
"line_max": 100,
"alpha_frac": 0.6209147956,
"autogenerated": false,
"ratio": 4.7078210382513666,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5828735833851366,
"avg_score": null,
"num_lines": null
} |
"""AMQP Messages."""
# Copyright (C) 2007-2008 Barry Pederson <bp@barryp.org>
from __future__ import absolute_import, unicode_literals
from .serialization import GenericContent
# Intended to fix #85: ImportError: cannot import name spec
# Encountered on python 2.7.3
# "The submodules often need to refer to each other. For example, the
# surround [sic] module might use the echo module. In fact, such
# references are so common that the import statement first looks in
# the containing package before looking in the standard module search
# path."
# Source:
# http://stackoverflow.com/a/14216937/4982251
from .spec import Basic
__all__ = ['Message']
class Message(GenericContent):
"""A Message for use with the Channel.basic_* methods.
Expected arg types
body: string
children: (not supported)
Keyword properties may include:
content_type: shortstr
MIME content type
content_encoding: shortstr
MIME content encoding
application_headers: table
Message header field table, a dict with string keys,
and string | int | Decimal | datetime | dict values.
delivery_mode: octet
Non-persistent (1) or persistent (2)
priority: octet
The message priority, 0 to 9
correlation_id: shortstr
The application correlation identifier
reply_to: shortstr
The destination to reply to
expiration: shortstr
Message expiration specification
message_id: shortstr
The application message identifier
timestamp: datetime.datetime
The message timestamp
type: shortstr
The message type name
user_id: shortstr
The creating user id
app_id: shortstr
The creating application id
cluster_id: shortstr
Intra-cluster routing identifier
Unicode bodies are encoded according to the 'content_encoding'
argument. If that's None, it's set to 'UTF-8' automatically.
Example::
msg = Message('hello world',
content_type='text/plain',
application_headers={'foo': 7})
"""
CLASS_ID = Basic.CLASS_ID
#: Instances of this class have these attributes, which
#: are passed back and forth as message properties between
#: client and server
PROPERTIES = [
('content_type', 's'),
('content_encoding', 's'),
('application_headers', 'F'),
('delivery_mode', 'o'),
('priority', 'o'),
('correlation_id', 's'),
('reply_to', 's'),
('expiration', 's'),
('message_id', 's'),
('timestamp', 'L'),
('type', 's'),
('user_id', 's'),
('app_id', 's'),
('cluster_id', 's')
]
def __init__(self, body='', children=None, channel=None, **properties):
super(Message, self).__init__(**properties)
#: set by basic_consume/basic_get
self.delivery_info = None
self.body = body
self.channel = channel
@property
def headers(self):
return self.properties.get('application_headers')
@property
def delivery_tag(self):
return self.delivery_info.get('delivery_tag')
| {
"repo_name": "cloudera/hue",
"path": "desktop/core/ext-py/amqp-2.4.1/amqp/basic_message.py",
"copies": "4",
"size": "3342",
"license": "apache-2.0",
"hash": 7161767717731535000,
"line_mean": 27.3220338983,
"line_max": 75,
"alpha_frac": 0.5969479354,
"autogenerated": false,
"ratio": 4.467914438502674,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 118
} |
"""AMQP Messaging Framework for Python"""
from __future__ import absolute_import
VERSION = (2, 1, 2)
__version__ = ".".join(map(str, VERSION[0:3])) + "".join(VERSION[3:])
__author__ = "Ask Solem"
__contact__ = "ask@celeryproject.org"
__homepage__ = "http://github.com/ask/kombu/"
__docformat__ = "restructuredtext en"
# -eof meta-
import os
import sys
if sys.version_info < (2, 5): # pragma: no cover
if sys.version_info >= (2, 4):
raise Exception(
"Python 2.4 is not supported by this version. "
"Please use Kombu versions 1.x.")
else:
raise Exception("Kombu requires Python versions 2.5 or later.")
# Lazy loading.
# - See werkzeug/__init__.py for the rationale behind this.
from types import ModuleType
all_by_module = {
"kombu.connection": ["BrokerConnection", "Connection"],
"kombu.entity": ["Exchange", "Queue"],
"kombu.messaging": ["Consumer", "Producer"],
"kombu.pools": ["connections", "producers"],
}
object_origins = {}
for module, items in all_by_module.iteritems():
for item in items:
object_origins[item] = module
class module(ModuleType):
def __getattr__(self, name):
if name in object_origins:
module = __import__(object_origins[name], None, None, [name])
for extra_name in all_by_module[module.__name__]:
setattr(self, extra_name, getattr(module, extra_name))
return getattr(module, name)
return ModuleType.__getattribute__(self, name)
def __dir__(self):
result = list(new_module.__all__)
result.extend(("__file__", "__path__", "__doc__", "__all__",
"__docformat__", "__name__", "__path__", "VERSION",
"__package__", "__version__", "__author__",
"__contact__", "__homepage__", "__docformat__"))
return result
# 2.5 does not define __package__
try:
package = __package__
except NameError:
package = "kombu"
# keep a reference to this module so that it's not garbage collected
old_module = sys.modules[__name__]
new_module = sys.modules[__name__] = module(__name__)
new_module.__dict__.update({
"__file__": __file__,
"__path__": __path__,
"__doc__": __doc__,
"__all__": tuple(object_origins),
"__version__": __version__,
"__author__": __author__,
"__contact__": __contact__,
"__homepage__": __homepage__,
"__docformat__": __docformat__,
"__package__": package,
"VERSION": VERSION})
if os.environ.get("KOMBU_LOG_DEBUG"):
os.environ.update(KOMBU_LOG_CHANNEL="1", KOMBU_LOG_CONNECTION="1")
from .utils import debug
debug.setup_logging()
| {
"repo_name": "kumar303/rockit",
"path": "vendor-local/kombu/__init__.py",
"copies": "1",
"size": "2682",
"license": "bsd-3-clause",
"hash": -9216560969761368000,
"line_mean": 30.5529411765,
"line_max": 74,
"alpha_frac": 0.571961223,
"autogenerated": false,
"ratio": 3.6639344262295084,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4735895649229508,
"avg_score": null,
"num_lines": null
} |
"""AMQP Messaging Framework for Python"""
VERSION = (1, 1, 2)
__version__ = ".".join(map(str, VERSION[0:3])) + "".join(VERSION[3:])
__author__ = "Ask Solem"
__contact__ = "ask@celeryproject.org"
__homepage__ = "http://github.com/ask/kombu/"
__docformat__ = "restructuredtext en"
import os
import sys
if not os.environ.get("KOMBU_NO_EVAL", False):
# Lazy loading.
# - See werkzeug/__init__.py for the rationale behind this.
from types import ModuleType
all_by_module = {
"kombu.connection": ["BrokerConnection", "Connection"],
"kombu.entity": ["Exchange", "Queue"],
"kombu.messaging": ["Consumer", "Producer"],
}
object_origins = {}
for module, items in all_by_module.iteritems():
for item in items:
object_origins[item] = module
class module(ModuleType):
def __getattr__(self, name):
if name in object_origins:
module = __import__(object_origins[name], None, None, [name])
for extra_name in all_by_module[module.__name__]:
setattr(self, extra_name, getattr(module, extra_name))
return getattr(module, name)
return ModuleType.__getattribute__(self, name)
def __dir__(self):
result = list(new_module.__all__)
result.extend(("__file__", "__path__", "__doc__", "__all__",
"__docformat__", "__name__", "__path__", "VERSION",
"__package__", "__version__", "__author__",
"__contact__", "__homepage__", "__docformat__"))
return result
# keep a reference to this module so that it's not garbage collected
old_module = sys.modules[__name__]
new_module = sys.modules[__name__] = module(__name__)
new_module.__dict__.update({
"__file__": __file__,
"__path__": __path__,
"__doc__": __doc__,
"__all__": tuple(object_origins),
"__version__": __version__,
"__author__": __author__,
"__contact__": __contact__,
"__homepage__": __homepage__,
"__docformat__": __docformat__,
"VERSION": VERSION})
if os.environ.get("KOMBU_LOG_DEBUG"):
os.environ.update(KOMBU_LOG_CHANNEL="1", KOMBU_LOG_CONNECTION="1")
from kombu.utils import debug
debug.setup_logging()
| {
"repo_name": "disqus/kombu",
"path": "kombu/__init__.py",
"copies": "1",
"size": "2364",
"license": "bsd-3-clause",
"hash": 3520256919983443000,
"line_mean": 35.9375,
"line_max": 78,
"alpha_frac": 0.5283417936,
"autogenerated": false,
"ratio": 3.9797979797979797,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9993225000670707,
"avg_score": 0.0029829545454545456,
"num_lines": 64
} |
"""AMQP Messaging Framework for Python"""
VERSION = (1, 1, 3)
__version__ = ".".join(map(str, VERSION[0:3])) + "".join(VERSION[3:])
__author__ = "Ask Solem"
__contact__ = "ask@celeryproject.org"
__homepage__ = "http://github.com/ask/kombu/"
__docformat__ = "restructuredtext en"
import os
import sys
if not os.environ.get("KOMBU_NO_EVAL", False):
# Lazy loading.
# - See werkzeug/__init__.py for the rationale behind this.
from types import ModuleType
all_by_module = {
"kombu.connection": ["BrokerConnection", "Connection"],
"kombu.entity": ["Exchange", "Queue"],
"kombu.messaging": ["Consumer", "Producer"],
}
object_origins = {}
for module, items in all_by_module.iteritems():
for item in items:
object_origins[item] = module
class module(ModuleType):
def __getattr__(self, name):
if name in object_origins:
module = __import__(object_origins[name], None, None, [name])
for extra_name in all_by_module[module.__name__]:
setattr(self, extra_name, getattr(module, extra_name))
return getattr(module, name)
return ModuleType.__getattribute__(self, name)
def __dir__(self):
result = list(new_module.__all__)
result.extend(("__file__", "__path__", "__doc__", "__all__",
"__docformat__", "__name__", "__path__", "VERSION",
"__package__", "__version__", "__author__",
"__contact__", "__homepage__", "__docformat__"))
return result
# keep a reference to this module so that it's not garbage collected
old_module = sys.modules[__name__]
new_module = sys.modules[__name__] = module(__name__)
new_module.__dict__.update({
"__file__": __file__,
"__path__": __path__,
"__doc__": __doc__,
"__all__": tuple(object_origins),
"__version__": __version__,
"__author__": __author__,
"__contact__": __contact__,
"__homepage__": __homepage__,
"__docformat__": __docformat__,
"VERSION": VERSION})
if os.environ.get("KOMBU_LOG_DEBUG"):
os.environ.update(KOMBU_LOG_CHANNEL="1", KOMBU_LOG_CONNECTION="1")
from kombu.utils import debug
debug.setup_logging()
| {
"repo_name": "pantheon-systems/kombu",
"path": "kombu/__init__.py",
"copies": "1",
"size": "2364",
"license": "bsd-3-clause",
"hash": -5658283977309734000,
"line_mean": 35.9375,
"line_max": 78,
"alpha_frac": 0.5283417936,
"autogenerated": false,
"ratio": 3.9797979797979797,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5008139773397979,
"avg_score": null,
"num_lines": null
} |
"""AMQP Messaging Framework for Python"""
VERSION = (1, 2, 1)
__version__ = ".".join(map(str, VERSION[0:3])) + "".join(VERSION[3:])
__author__ = "Ask Solem"
__contact__ = "ask@celeryproject.org"
__homepage__ = "http://github.com/ask/kombu/"
__docformat__ = "restructuredtext en"
import os
import sys
if not os.environ.get("KOMBU_NO_EVAL", False):
# Lazy loading.
# - See werkzeug/__init__.py for the rationale behind this.
from types import ModuleType
all_by_module = {
"kombu.connection": ["BrokerConnection", "Connection"],
"kombu.entity": ["Exchange", "Queue"],
"kombu.messaging": ["Consumer", "Producer"],
}
object_origins = {}
for module, items in all_by_module.iteritems():
for item in items:
object_origins[item] = module
class module(ModuleType):
def __getattr__(self, name):
if name in object_origins:
module = __import__(object_origins[name], None, None, [name])
for extra_name in all_by_module[module.__name__]:
setattr(self, extra_name, getattr(module, extra_name))
return getattr(module, name)
return ModuleType.__getattribute__(self, name)
def __dir__(self):
result = list(new_module.__all__)
result.extend(("__file__", "__path__", "__doc__", "__all__",
"__docformat__", "__name__", "__path__", "VERSION",
"__package__", "__version__", "__author__",
"__contact__", "__homepage__", "__docformat__"))
return result
# keep a reference to this module so that it's not garbage collected
old_module = sys.modules[__name__]
new_module = sys.modules[__name__] = module(__name__)
new_module.__dict__.update({
"__file__": __file__,
"__path__": __path__,
"__doc__": __doc__,
"__all__": tuple(object_origins),
"__version__": __version__,
"__author__": __author__,
"__contact__": __contact__,
"__homepage__": __homepage__,
"__docformat__": __docformat__,
"VERSION": VERSION})
if os.environ.get("KOMBU_LOG_DEBUG"):
os.environ.update(KOMBU_LOG_CHANNEL="1", KOMBU_LOG_CONNECTION="1")
from kombu.utils import debug
debug.setup_logging()
| {
"repo_name": "WoLpH/kombu",
"path": "kombu/__init__.py",
"copies": "1",
"size": "2364",
"license": "bsd-3-clause",
"hash": -4680261998531073000,
"line_mean": 35.9375,
"line_max": 78,
"alpha_frac": 0.5283417936,
"autogenerated": false,
"ratio": 3.9797979797979797,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9993225000670707,
"avg_score": 0.0029829545454545456,
"num_lines": 64
} |
"""AMQP Messaging Framework for Python"""
VERSION = (1, 5, 1)
__version__ = ".".join(map(str, VERSION[0:3])) + "".join(VERSION[3:])
__author__ = "Ask Solem"
__contact__ = "ask@celeryproject.org"
__homepage__ = "http://github.com/ask/kombu/"
__docformat__ = "restructuredtext en"
# -eof meta-
import os
import sys
# Lazy loading.
# - See werkzeug/__init__.py for the rationale behind this.
from types import ModuleType
all_by_module = {
"kombu.connection": ["BrokerConnection", "Connection"],
"kombu.entity": ["Exchange", "Queue"],
"kombu.messaging": ["Consumer", "Producer"],
"kombu.pools": ["connections", "producers"],
}
object_origins = {}
for module, items in all_by_module.iteritems():
for item in items:
object_origins[item] = module
class module(ModuleType):
def __getattr__(self, name):
if name in object_origins:
module = __import__(object_origins[name], None, None, [name])
for extra_name in all_by_module[module.__name__]:
setattr(self, extra_name, getattr(module, extra_name))
return getattr(module, name)
return ModuleType.__getattribute__(self, name)
def __dir__(self):
result = list(new_module.__all__)
result.extend(("__file__", "__path__", "__doc__", "__all__",
"__docformat__", "__name__", "__path__", "VERSION",
"__package__", "__version__", "__author__",
"__contact__", "__homepage__", "__docformat__"))
return result
# keep a reference to this module so that it's not garbage collected
old_module = sys.modules[__name__]
new_module = sys.modules[__name__] = module(__name__)
new_module.__dict__.update({
"__file__": __file__,
"__path__": __path__,
"__doc__": __doc__,
"__all__": tuple(object_origins),
"__version__": __version__,
"__author__": __author__,
"__contact__": __contact__,
"__homepage__": __homepage__,
"__docformat__": __docformat__,
"VERSION": VERSION})
if os.environ.get("KOMBU_LOG_DEBUG"):
os.environ.update(KOMBU_LOG_CHANNEL="1", KOMBU_LOG_CONNECTION="1")
from kombu.utils import debug
debug.setup_logging()
| {
"repo_name": "softak/webfaction_demo",
"path": "vendor-local/lib/python/kombu/__init__.py",
"copies": "1",
"size": "2206",
"license": "bsd-3-clause",
"hash": -7674009037040722000,
"line_mean": 31.4411764706,
"line_max": 74,
"alpha_frac": 0.5679963735,
"autogenerated": false,
"ratio": 3.652317880794702,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9701640483846083,
"avg_score": 0.003734754089723664,
"num_lines": 68
} |
"""AMQP plugin for Henson."""
import asyncio
from collections import namedtuple
from enum import IntEnum
import json
import pkg_resources
import os
import aioamqp
from henson import Extension
__all__ = ('AMQP', 'Message')
try:
_dist = pkg_resources.get_distribution(__package__)
if not __file__.startswith(os.path.join(_dist.location, __package__)):
# Manually raise the exception if there is a distribution but
# it's installed from elsewhere.
raise pkg_resources.DistributionNotFound
except pkg_resources.DistributionNotFound:
__version__ = 'development'
else:
__version__ = _dist.version
# TODO: replace this namedtuple with a message class that supports
# acknowledgement, (de)serialization, and other convenience functions
Message = namedtuple('Message', ('body', 'envelope', 'properties'))
class Consumer:
"""A consumer of an AMQP queue.
Args:
app (henson.base.Application): The application for which this
consumer consumes.
"""
def __init__(self, app):
"""Initialize the consumer."""
# Store a reference to the app and declare some attributes that
# will be set later by async calls.
self.app = app
self._message_queue = None
self._transport = None
self._protocol = None
self._channel = None
# Register the message acknowledgement and application teardown
# callbacks with the application.
self.app.message_acknowledgement(self._acknowledge_message)
self.app.teardown(self._teardown)
async def _acknowledge_message(self, app, message):
"""Acknowledge a message on the AMQP server.
Args:
app (henson.base.Application): The application that
processed the message.
message (Message): The message returned from the consumer to
the application.
"""
await self._channel.basic_client_ack(message.envelope.delivery_tag) # NOQA: line length
async def _teardown(self, app):
"""Cleanup the protocol and transport before shutting down.
Args:
app (henson.base.Application): The application to which this
Consumer belongs.
"""
if self._protocol is not None:
await self._protocol.close()
if self._transport is not None:
self._transport.close()
async def _enqueue_message(self, channel, body, envelope, properties):
"""Add fetched messages to the internal message queue.
Args:
body (bytes): The message fetched from rabbit.
envelope (aioamqp.envelope.Envelope): An envelope of
message metadata.
properties (aioamqp.properties.Properties): Additional
properties about the message content (e.g. headers,
content_type, etc.).
"""
message = Message(body, envelope, properties)
await self._message_queue.put(message)
async def _connection_error_callback(self, exception):
"""Handle aioamqp connection errors.
Args:
exception (Exception): The exception resulting from the
connection being closed.
"""
await self._message_queue.put(exception)
async def _begin_consuming(self):
"""Begin reading messages from the specified AMQP broker."""
# Create a connection to the broker
self._message_queue = asyncio.Queue(
maxsize=self.app.settings['AMQP_PREFETCH_LIMIT'])
self._transport, self._protocol = await aioamqp.connect(
host=self.app.settings['AMQP_HOST'],
port=self.app.settings['AMQP_PORT'],
login=self.app.settings['AMQP_USERNAME'],
password=self.app.settings['AMQP_PASSWORD'],
virtualhost=self.app.settings['AMQP_VIRTUAL_HOST'],
heartbeat=self.app.settings['AMQP_HEARTBEAT_INTERVAL'],
on_error=self._connection_error_callback,
ssl=self.app.settings['AMQP_USE_SSL'],
login_method='PLAIN',
**self.app.settings['AMQP_CONNECTION_KWARGS']
)
# Declare the queue and exchange that we expect to read from
self._channel = await self._protocol.channel()
await self._channel.queue_declare(
queue_name=self.app.settings['AMQP_INBOUND_QUEUE'],
durable=self.app.settings['AMQP_INBOUND_QUEUE_DURABLE'],
)
await self._channel.basic_qos(
prefetch_count=self.app.settings['AMQP_PREFETCH_COUNT'],
prefetch_size=self.app.settings['AMQP_PREFETCH_SIZE'],
)
if self.app.settings['AMQP_INBOUND_EXCHANGE']:
await self._channel.exchange_declare(
arguments=self.app.settings['AMQP_INBOUND_EXCHANGE_KWARGS'],
durable=self.app.settings['AMQP_INBOUND_EXCHANGE_DURABLE'],
exchange_name=self.app.settings['AMQP_INBOUND_EXCHANGE'],
type_name=self.app.settings['AMQP_INBOUND_EXCHANGE_TYPE'],
)
await self._channel.queue_bind(
queue_name=self.app.settings['AMQP_INBOUND_QUEUE'],
exchange_name=self.app.settings['AMQP_INBOUND_EXCHANGE'],
routing_key=self.app.settings['AMQP_INBOUND_ROUTING_KEY'],
)
# Begin reading and assign the callback function to be called
# with each message retrieved from the broker
await self._channel.basic_consume(
queue_name=self.app.settings['AMQP_INBOUND_QUEUE'],
callback=self._enqueue_message,
)
async def read(self):
"""Read a single message from the message queue.
If the consumer has not yet begun reading from the AMQP broker,
that process is initiated before yielding from the queue.
Returns:
Message: The next available message.
Raises:
aioamqp.exceptions.AioamqpException: The exception raised on
connection close.
"""
# On the first call to read, connect to the AMQP server and
# begin consuming messages.
if self._message_queue is None:
await self._begin_consuming()
# Read the next result from the internal message queue.
result = await self._message_queue.get()
# If the result is an exception, the connection was closed, and
# the consumer was unable to recover. Raise the original
# exception.
if isinstance(result, Exception):
raise result
# Finally, return the result if it is a valid message.
return result
async def retry(self, app, message):
"""Requeue a message to be processed again.
This coroutine is meant for use with the
:class:`henson.contrib.retry.Retry` extension.
Args:
app (henson.base.Application): The application processing
the message.
message (dict): A copy of the message read from the AMQP
server.
.. note:: This function assumes that messages are JSON
serializeable. If they are not, a custom function may be
used in its place.
"""
await self._channel.publish(
payload=json.dumps(message).encode('utf-8'),
exchange_name=self.app.settings['AMQP_INBOUND_EXCHANGE'],
routing_key=self.app.settings['AMQP_INBOUND_ROUTING_KEY'],
)
class Producer:
"""A producer of an AMQP queue.
Args:
app (henson.base.Application): The application for which this
producer produces.
"""
def __init__(self, app):
"""Initialize the producer."""
# Store a reference to the application for later use.
self.app = app
self._transport = None
self._protocol = None
self._channel = None
# Register a teardown callback.
self.app.teardown(self._teardown)
async def _connect(self):
self._transport, self._protocol = await aioamqp.connect(
host=self.app.settings['AMQP_HOST'],
port=self.app.settings['AMQP_PORT'],
login=self.app.settings['AMQP_USERNAME'],
password=self.app.settings['AMQP_PASSWORD'],
virtualhost=self.app.settings['AMQP_VIRTUAL_HOST'],
heartbeat=self.app.settings['AMQP_HEARTBEAT_INTERVAL'],
ssl=self.app.settings['AMQP_USE_SSL'],
**self.app.settings['AMQP_CONNECTION_KWARGS']
)
self._channel = await self._protocol.channel()
async def _declare_exchange(self):
"""Declare the configured AMQP exchange."""
await self._channel.exchange_declare(
arguments=self.app.settings['AMQP_OUTBOUND_EXCHANGE_KWARGS'],
durable=self.app.settings['AMQP_OUTBOUND_EXCHANGE_DURABLE'],
exchange_name=self.app.settings['AMQP_OUTBOUND_EXCHANGE'],
type_name=self.app.settings['AMQP_OUTBOUND_EXCHANGE_TYPE'],
)
async def _teardown(self, app):
"""Cleanup the protocol and transport before shutting down.
Args:
app (henson.base.Application): The application to which this
Consumer belongs.
"""
if self._protocol is not None:
await self._protocol.close()
if self._transport is not None:
self._transport.close()
async def send(self, message, *, exchange_name=None, routing_key=None):
"""Send a message to the configured AMQP broker and exchange.
Args:
message (str): The body of the message to send.
routing_key (str): The routing key that should be used to
send the message. If set to ``None``, the
``AMQP_OUTBOUND_ROUTING_KEY`` application setting will
be used. Defaults to ``None``.
"""
properties = {
'delivery_mode': self.app.settings['AMQP_DELIVERY_MODE'],
}
if not self._channel:
await self._connect()
await self._declare_exchange()
if exchange_name is None:
exchange_name = self.app.settings['AMQP_OUTBOUND_EXCHANGE']
if routing_key is None:
routing_key = self.app.settings['AMQP_OUTBOUND_ROUTING_KEY']
await self._channel.publish(
payload=message,
exchange_name=exchange_name,
routing_key=routing_key,
properties=properties,
)
class DeliveryMode(IntEnum):
"""AMQP message delivery modes."""
NONPERSISTENT = 1
"""Mark messages as non-persistent before sending to the AMQP instance."""
PERSISTENT = 2
"""Mark messages as persistent before sending to the AMQP instance."""
class AMQP(Extension):
"""An interface to interact with an AMQP broker."""
DEFAULT_SETTINGS = {
# Connection settings
'AMQP_HOST': 'localhost',
'AMQP_PORT': 5672,
'AMQP_USERNAME': 'guest',
'AMQP_PASSWORD': 'guest',
'AMQP_VIRTUAL_HOST': '/',
'AMQP_HEARTBEAT_INTERVAL': 60,
'AMQP_CONNECTION_KWARGS': {},
'AMQP_USE_SSL': False,
# Consumer settings
'REGISTER_CONSUMER': False,
'AMQP_DISPATCH_METHOD': 'ROUND_ROBIN',
'AMQP_INBOUND_EXCHANGE': '',
'AMQP_INBOUND_EXCHANGE_DURABLE': False,
'AMQP_INBOUND_EXCHANGE_TYPE': 'direct',
'AMQP_INBOUND_EXCHANGE_KWARGS': {},
'AMQP_INBOUND_QUEUE': '',
'AMQP_INBOUND_QUEUE_DURABLE': False,
'AMQP_INBOUND_ROUTING_KEY': '',
'AMQP_PREFETCH_LIMIT': 0,
'AMQP_PREFETCH_COUNT': 0,
'AMQP_PREFETCH_SIZE': 0,
# Producer settings
'AMQP_OUTBOUND_EXCHANGE': '',
'AMQP_OUTBOUND_EXCHANGE_DURABLE': False,
'AMQP_OUTBOUND_EXCHANGE_TYPE': 'direct',
'AMQP_OUTBOUND_EXCHANGE_KWARGS': {},
'AMQP_OUTBOUND_ROUTING_KEY': '',
'AMQP_DELIVERY_MODE': DeliveryMode.NONPERSISTENT,
}
def init_app(self, app):
"""Initialize the application.
If the application's ``REGISTER_CONSUMER`` setting is truthy,
create a consumer and attach it to the application.
Args:
app (henson.base.Application): The application instance that
will be initialized.
"""
super().init_app(app)
if app.settings['REGISTER_CONSUMER']:
app.consumer = self.consumer()
def consumer(self):
"""Return a new AMQP consumer.
Returns:
Consumer: A new consumer object that can be used to read
from the AMQP broker and queue specified the
Application's settings.
"""
return Consumer(self.app)
def producer(self):
"""Return an AMQP producer, creating it if necessary.
Returns:
Producer: A new producer object that can be used to write to
the AMQP broker and exchange specified by the
Application's settings.
"""
if not hasattr(self, '_producer'):
self._producer = Producer(self.app)
return self._producer
| {
"repo_name": "iheartradio/Henson-AMQP",
"path": "henson_amqp/__init__.py",
"copies": "1",
"size": "13233",
"license": "apache-2.0",
"hash": -5321941687908394000,
"line_mean": 33.6413612565,
"line_max": 96,
"alpha_frac": 0.6056827628,
"autogenerated": false,
"ratio": 4.218361491871215,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 382
} |
"""AMQPStorm Base."""
import locale
import threading
from amqpstorm.compatibility import is_string
from amqpstorm.exception import AMQPChannelError
AUTH_MECHANISM = 'PLAIN'
IDLE_WAIT = 0.01
FRAME_MAX = 131072
MAX_CHANNELS = 1024
LOCALE = locale.getdefaultlocale()[0] or 'en_US'
class Stateful(object):
"""Stateful implementation."""
CLOSED = 0
CLOSING = 1
OPENING = 2
OPEN = 3
def __init__(self):
self._lock = threading.Lock()
self._state = self.CLOSED
self._exceptions = []
@property
def lock(self):
"""Threading lock.
:return:
"""
return self._lock
def set_state(self, state):
"""Set State.
:param int state:
:return:
"""
self._state = state
@property
def current_state(self):
"""Get the State.
:rtype: int
"""
return self._state
@property
def is_closed(self):
"""Is Closed?
:rtype: bool
"""
return self._state == self.CLOSED
@property
def is_closing(self):
"""Is Closing?
:rtype: bool
"""
return self._state == self.CLOSING
@property
def is_opening(self):
"""Is Opening?
:rtype: bool
"""
return self._state == self.OPENING
@property
def is_open(self):
"""Is Open?
:rtype: bool
"""
return self._state == self.OPEN
@property
def exceptions(self):
"""Stores all exceptions thrown by this instance.
This is useful for troubleshooting, and is used internally
to check the health of the connection.
:rtype: list
"""
return self._exceptions
class BaseChannel(Stateful):
"""Channel base class."""
__slots__ = [
'_channel_id', '_consumer_tags'
]
def __init__(self, channel_id):
super(BaseChannel, self).__init__()
self._consumer_tags = []
self._channel_id = channel_id
@property
def channel_id(self):
"""Get Channel id.
:rtype: int
"""
return self._channel_id
@property
def consumer_tags(self):
"""Get a list of consumer tags.
:rtype: list
"""
return self._consumer_tags
def add_consumer_tag(self, tag):
"""Add a Consumer tag.
:param str tag: Consumer tag.
:return:
"""
if not is_string(tag):
raise AMQPChannelError('consumer tag needs to be a string')
if tag not in self._consumer_tags:
self._consumer_tags.append(tag)
def remove_consumer_tag(self, tag=None):
"""Remove a Consumer tag.
If no tag is specified, all all tags will be removed.
:param str|None tag: Consumer tag.
:return:
"""
if tag is not None:
if tag in self._consumer_tags:
self._consumer_tags.remove(tag)
else:
self._consumer_tags = []
class BaseMessage(object):
"""Message base class."""
__slots__ = [
'_body', '_channel', '_method', '_properties'
]
def __init__(self, channel, **message):
"""
:param Channel channel: AMQPStorm Channel
:param str|unicode body: Message body
:param dict method: Message method
:param dict properties: Message properties
"""
self._channel = channel
self._body = message.get('body', None)
self._method = message.get('method', None)
self._properties = message.get('properties', {'headers': {}})
def __iter__(self):
for attribute in ['_body', '_channel', '_method', '_properties']:
yield (attribute[1::], getattr(self, attribute))
def to_dict(self):
"""Message to Dictionary.
:rtype: dict
"""
return {
'body': self._body,
'method': self._method,
'properties': self._properties,
'channel': self._channel
}
def to_tuple(self):
"""Message to Tuple.
:rtype: tuple
"""
return self._body, self._channel, self._method, self._properties
class Handler(object):
"""Operations Handler (e.g. Queue, Exchange)"""
__slots__ = [
'_channel'
]
def __init__(self, channel):
self._channel = channel
| {
"repo_name": "fake-name/ReadableWebProxy",
"path": "amqpstorm/base.py",
"copies": "1",
"size": "4408",
"license": "bsd-3-clause",
"hash": 722451481538029800,
"line_mean": 21.2626262626,
"line_max": 73,
"alpha_frac": 0.5360707804,
"autogenerated": false,
"ratio": 4.142857142857143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 198
} |
"""AMQPStorm Base."""
import locale
import threading
from amqpstorm.compatibility import is_string
from amqpstorm.exception import AMQPChannelError
AUTH_MECHANISM = 'PLAIN'
IDLE_WAIT = 0.01
LOCALE = locale.getdefaultlocale()[0] or 'en_US'
MAX_FRAME_SIZE = 131072
MAX_CHANNELS = 65535
class Stateful(object):
"""Stateful implementation."""
CLOSED = 0
CLOSING = 1
OPENING = 2
OPEN = 3
def __init__(self):
self._lock = threading.Lock()
self._state = self.CLOSED
self._exceptions = []
@property
def lock(self):
"""Threading lock.
:return:
"""
return self._lock
def set_state(self, state):
"""Set State.
:param int state:
:return:
"""
self._state = state
@property
def current_state(self):
"""Get the State.
:rtype: int
"""
return self._state
@property
def is_closed(self):
"""Is Closed?
:rtype: bool
"""
return self._state == self.CLOSED
@property
def is_closing(self):
"""Is Closing?
:rtype: bool
"""
return self._state == self.CLOSING
@property
def is_opening(self):
"""Is Opening?
:rtype: bool
"""
return self._state == self.OPENING
@property
def is_open(self):
"""Is Open?
:rtype: bool
"""
return self._state == self.OPEN
@property
def exceptions(self):
"""Stores all exceptions thrown by this instance.
This is useful for troubleshooting, and is used internally
to check the health of the connection.
:rtype: list
"""
return self._exceptions
class BaseChannel(Stateful):
"""Channel base class."""
__slots__ = [
'_channel_id', '_consumer_tags'
]
def __init__(self, channel_id):
super(BaseChannel, self).__init__()
self._consumer_tags = []
self._channel_id = channel_id
@property
def channel_id(self):
"""Get Channel id.
:rtype: int
"""
return self._channel_id
@property
def consumer_tags(self):
"""Get a list of consumer tags.
:rtype: list
"""
return self._consumer_tags
def add_consumer_tag(self, tag):
"""Add a Consumer tag.
:param str tag: Consumer tag.
:return:
"""
if not is_string(tag):
raise AMQPChannelError('consumer tag needs to be a string')
if tag not in self._consumer_tags:
self._consumer_tags.append(tag)
def remove_consumer_tag(self, tag=None):
"""Remove a Consumer tag.
If no tag is specified, all all tags will be removed.
:param str|None tag: Consumer tag.
:return:
"""
if tag is not None:
if tag in self._consumer_tags:
self._consumer_tags.remove(tag)
else:
self._consumer_tags = []
class BaseMessage(object):
"""Message base class."""
__slots__ = [
'_body', '_channel', '_method', '_properties'
]
def __init__(self, channel, **message):
"""
:param Channel channel: AMQPStorm Channel
:param str|unicode body: Message body
:param dict method: Message method
:param dict properties: Message properties
"""
self._channel = channel
self._body = message.get('body', None)
self._method = message.get('method', None)
self._properties = message.get('properties', {'headers': {}})
def __iter__(self):
for attribute in ['_body', '_channel', '_method', '_properties']:
yield (attribute[1::], getattr(self, attribute))
def to_dict(self):
"""Message to Dictionary.
:rtype: dict
"""
return {
'body': self._body,
'method': self._method,
'properties': self._properties,
'channel': self._channel
}
def to_tuple(self):
"""Message to Tuple.
:rtype: tuple
"""
return self._body, self._channel, self._method, self._properties
class Handler(object):
"""Operations Handler (e.g. Queue, Exchange)"""
__slots__ = [
'_channel'
]
def __init__(self, channel):
self._channel = channel
| {
"repo_name": "eandersson/amqp-storm",
"path": "amqpstorm/base.py",
"copies": "1",
"size": "4414",
"license": "mit",
"hash": -9129020114919980000,
"line_mean": 21.2929292929,
"line_max": 73,
"alpha_frac": 0.5364748527,
"autogenerated": false,
"ratio": 4.140712945590995,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5177187798290994,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.